repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
jesford/cluster-lensing
clusterlensing/utils.py
check_units_and_type
def check_units_and_type(input, expected_units, num=None, is_scalar=False): """Check whether variable has expected units and type. If input does not have units and expected units is not None, then the output will be assigned those units. If input has units that conflict with expected units a ValueError will be raised. Parameters ---------- input : array_like or float Variable that will be checked for units and type. Variable should be 1D or scalar. expected_units : astropy.units or None Unit expected for input. num : int, optional Length expected for input, if it is an array or list. is_scalar : bool, optional Sets whether the input is a scalar quantity. Default is False for array_like inputs; set is_scalar=True to check scalar units only. Returns ---------- ndarray or float, with astropy.units Returns the input array or scalar with expected units, unless a conflict of units or array length occurs, which raise errors. """ if hasattr(input, 'unit'): # check units if expected_units is None: raise ValueError('Expecting dimensionless input') elif input.unit != expected_units: raise ValueError('Expecting input units of ' + str(expected_units)) else: dimensionless = input.value else: dimensionless = input # check its a 1D array and/or convert list to array if is_scalar is False: dimensionfull = check_array_or_list(dimensionless) else: dimensionfull = dimensionless # include units if appropriate if expected_units is not None: dimensionfull = dimensionfull * expected_units # check array length if appropriate if num is not None: check_input_size(dimensionfull, num) return dimensionfull
python
def check_units_and_type(input, expected_units, num=None, is_scalar=False): """Check whether variable has expected units and type. If input does not have units and expected units is not None, then the output will be assigned those units. If input has units that conflict with expected units a ValueError will be raised. Parameters ---------- input : array_like or float Variable that will be checked for units and type. Variable should be 1D or scalar. expected_units : astropy.units or None Unit expected for input. num : int, optional Length expected for input, if it is an array or list. is_scalar : bool, optional Sets whether the input is a scalar quantity. Default is False for array_like inputs; set is_scalar=True to check scalar units only. Returns ---------- ndarray or float, with astropy.units Returns the input array or scalar with expected units, unless a conflict of units or array length occurs, which raise errors. """ if hasattr(input, 'unit'): # check units if expected_units is None: raise ValueError('Expecting dimensionless input') elif input.unit != expected_units: raise ValueError('Expecting input units of ' + str(expected_units)) else: dimensionless = input.value else: dimensionless = input # check its a 1D array and/or convert list to array if is_scalar is False: dimensionfull = check_array_or_list(dimensionless) else: dimensionfull = dimensionless # include units if appropriate if expected_units is not None: dimensionfull = dimensionfull * expected_units # check array length if appropriate if num is not None: check_input_size(dimensionfull, num) return dimensionfull
[ "def", "check_units_and_type", "(", "input", ",", "expected_units", ",", "num", "=", "None", ",", "is_scalar", "=", "False", ")", ":", "if", "hasattr", "(", "input", ",", "'unit'", ")", ":", "# check units", "if", "expected_units", "is", "None", ":", "rais...
Check whether variable has expected units and type. If input does not have units and expected units is not None, then the output will be assigned those units. If input has units that conflict with expected units a ValueError will be raised. Parameters ---------- input : array_like or float Variable that will be checked for units and type. Variable should be 1D or scalar. expected_units : astropy.units or None Unit expected for input. num : int, optional Length expected for input, if it is an array or list. is_scalar : bool, optional Sets whether the input is a scalar quantity. Default is False for array_like inputs; set is_scalar=True to check scalar units only. Returns ---------- ndarray or float, with astropy.units Returns the input array or scalar with expected units, unless a conflict of units or array length occurs, which raise errors.
[ "Check", "whether", "variable", "has", "expected", "units", "and", "type", "." ]
train
https://github.com/jesford/cluster-lensing/blob/2815c1bb07d904ca91a80dae3f52090016768072/clusterlensing/utils.py#L4-L55
jesford/cluster-lensing
clusterlensing/utils.py
check_array_or_list
def check_array_or_list(input): """Return 1D ndarray, if input can be converted and elements are non-negative.""" if type(input) != np.ndarray: if type(input) == list: output = np.array(input) else: raise TypeError('Expecting input type as ndarray or list.') else: output = input if output.ndim != 1: raise ValueError('Input array must have 1 dimension.') if np.sum(output < 0.) > 0: raise ValueError("Input array values cannot be negative.") return output
python
def check_array_or_list(input): """Return 1D ndarray, if input can be converted and elements are non-negative.""" if type(input) != np.ndarray: if type(input) == list: output = np.array(input) else: raise TypeError('Expecting input type as ndarray or list.') else: output = input if output.ndim != 1: raise ValueError('Input array must have 1 dimension.') if np.sum(output < 0.) > 0: raise ValueError("Input array values cannot be negative.") return output
[ "def", "check_array_or_list", "(", "input", ")", ":", "if", "type", "(", "input", ")", "!=", "np", ".", "ndarray", ":", "if", "type", "(", "input", ")", "==", "list", ":", "output", "=", "np", ".", "array", "(", "input", ")", "else", ":", "raise", ...
Return 1D ndarray, if input can be converted and elements are non-negative.
[ "Return", "1D", "ndarray", "if", "input", "can", "be", "converted", "and", "elements", "are", "non", "-", "negative", "." ]
train
https://github.com/jesford/cluster-lensing/blob/2815c1bb07d904ca91a80dae3f52090016768072/clusterlensing/utils.py#L58-L75
wangsix/vmo
bin/scripts/bach_cello_midi_shuffle.py
main
def main(): """ This example shows how to parse a music mxl file (music21 and musescore/finale required) and create a simple oracle representation. The output is a reshuffled midi stream shown in either musescore or finale based on your installation of music21. OPTIONAL ARGS: seq_len: an integer for the length of the output sequence. p: a float of the probability using the forward links. k: an integer for the starting state. LRS: an integer for the lower limit of the LRS of sfx/rsfx allowed to jump to. weight: None: choose uniformly among all the possible sfx/rsfx given current state. "max": always choose the sfx/rsfx having the longest LRS. "weight": choose sfx/rsfx in a way that favors longer ones than shorter ones. """ filename = os.path.abspath('') + '/../files/Suite_No_1_for_Cello_M1_Prelude.mxl' s = music21.converter.parse(filename) c = s.getElementById('Violoncello') m = c.flat.notes note_obj_seq = [x for x in m if type(x) is music21.note.Note] bo = vmo.build_oracle(note_obj_seq,'f') bo.name = 'bach_cello_example' if len(sys.argv) == 1: b, kend, ktrace = gen.generate(bo, len(note_obj_seq), 0.0, 0, LRS=2, weight='weight') else: seq_len = int(sys.argv[1]) if seq_len == 0: seq_len = len(note_obj_seq) p = float(sys.argv[2]) k = int(sys.argv[3]) LRS = int(sys.argv[4]) weight = sys.argv[5] b, kend, ktrace = gen.generate(bo, seq_len, p, k, LRS=LRS, weight=weight) stream1 = music21.stream.Stream() x = [bo.data[i] for i in b] for i in range(len(x)): _n = music21.note.Note(x[i][0].nameWithOctave) _n.duration.type = x[i][0].duration.type _n.duration = x[i][0].duration stream1.append(_n) s.show() stream1.show()
python
def main(): """ This example shows how to parse a music mxl file (music21 and musescore/finale required) and create a simple oracle representation. The output is a reshuffled midi stream shown in either musescore or finale based on your installation of music21. OPTIONAL ARGS: seq_len: an integer for the length of the output sequence. p: a float of the probability using the forward links. k: an integer for the starting state. LRS: an integer for the lower limit of the LRS of sfx/rsfx allowed to jump to. weight: None: choose uniformly among all the possible sfx/rsfx given current state. "max": always choose the sfx/rsfx having the longest LRS. "weight": choose sfx/rsfx in a way that favors longer ones than shorter ones. """ filename = os.path.abspath('') + '/../files/Suite_No_1_for_Cello_M1_Prelude.mxl' s = music21.converter.parse(filename) c = s.getElementById('Violoncello') m = c.flat.notes note_obj_seq = [x for x in m if type(x) is music21.note.Note] bo = vmo.build_oracle(note_obj_seq,'f') bo.name = 'bach_cello_example' if len(sys.argv) == 1: b, kend, ktrace = gen.generate(bo, len(note_obj_seq), 0.0, 0, LRS=2, weight='weight') else: seq_len = int(sys.argv[1]) if seq_len == 0: seq_len = len(note_obj_seq) p = float(sys.argv[2]) k = int(sys.argv[3]) LRS = int(sys.argv[4]) weight = sys.argv[5] b, kend, ktrace = gen.generate(bo, seq_len, p, k, LRS=LRS, weight=weight) stream1 = music21.stream.Stream() x = [bo.data[i] for i in b] for i in range(len(x)): _n = music21.note.Note(x[i][0].nameWithOctave) _n.duration.type = x[i][0].duration.type _n.duration = x[i][0].duration stream1.append(_n) s.show() stream1.show()
[ "def", "main", "(", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "''", ")", "+", "'/../files/Suite_No_1_for_Cello_M1_Prelude.mxl'", "s", "=", "music21", ".", "converter", ".", "parse", "(", "filename", ")", "c", "=", "s", ".", "getE...
This example shows how to parse a music mxl file (music21 and musescore/finale required) and create a simple oracle representation. The output is a reshuffled midi stream shown in either musescore or finale based on your installation of music21. OPTIONAL ARGS: seq_len: an integer for the length of the output sequence. p: a float of the probability using the forward links. k: an integer for the starting state. LRS: an integer for the lower limit of the LRS of sfx/rsfx allowed to jump to. weight: None: choose uniformly among all the possible sfx/rsfx given current state. "max": always choose the sfx/rsfx having the longest LRS. "weight": choose sfx/rsfx in a way that favors longer ones than shorter ones.
[ "This", "example", "shows", "how", "to", "parse", "a", "music", "mxl", "file", "(", "music21", "and", "musescore", "/", "finale", "required", ")", "and", "create", "a", "simple", "oracle", "representation", ".", "The", "output", "is", "a", "reshuffled", "m...
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/bin/scripts/bach_cello_midi_shuffle.py#L29-L77
gautammishra/lyft-rides-python-sdk
lyft_rides/utils/handlers.py
error_handler
def error_handler(response, **kwargs): """Error Handler to surface 4XX and 5XX errors. Attached as a callback hook on the Request object. Parameters response (requests.Response) The HTTP response from an API request. **kwargs Arbitrary keyword arguments. Raises ClientError (ApiError) Raised if response contains a 4XX status code. ServerError (ApiError) Raised if response contains a 5XX status code. Returns response (requests.Response) The original HTTP response from the API request. """ if 400 <= response.status_code <= 499: message = response.json()['error_description'] \ if 'error_description' in response.json() \ else response.json()['error_detail'] raise ClientError(response, message) elif 500 <= response.status_code <= 599: raise ServerError(response) return response
python
def error_handler(response, **kwargs): """Error Handler to surface 4XX and 5XX errors. Attached as a callback hook on the Request object. Parameters response (requests.Response) The HTTP response from an API request. **kwargs Arbitrary keyword arguments. Raises ClientError (ApiError) Raised if response contains a 4XX status code. ServerError (ApiError) Raised if response contains a 5XX status code. Returns response (requests.Response) The original HTTP response from the API request. """ if 400 <= response.status_code <= 499: message = response.json()['error_description'] \ if 'error_description' in response.json() \ else response.json()['error_detail'] raise ClientError(response, message) elif 500 <= response.status_code <= 599: raise ServerError(response) return response
[ "def", "error_handler", "(", "response", ",", "*", "*", "kwargs", ")", ":", "if", "400", "<=", "response", ".", "status_code", "<=", "499", ":", "message", "=", "response", ".", "json", "(", ")", "[", "'error_description'", "]", "if", "'error_description'"...
Error Handler to surface 4XX and 5XX errors. Attached as a callback hook on the Request object. Parameters response (requests.Response) The HTTP response from an API request. **kwargs Arbitrary keyword arguments. Raises ClientError (ApiError) Raised if response contains a 4XX status code. ServerError (ApiError) Raised if response contains a 5XX status code. Returns response (requests.Response) The original HTTP response from the API request.
[ "Error", "Handler", "to", "surface", "4XX", "and", "5XX", "errors", ".", "Attached", "as", "a", "callback", "hook", "on", "the", "Request", "object", ".", "Parameters", "response", "(", "requests", ".", "Response", ")", "The", "HTTP", "response", "from", "...
train
https://github.com/gautammishra/lyft-rides-python-sdk/blob/b6d96a0fceaf7dc3425153c418a8e25c57803431/lyft_rides/utils/handlers.py#L19-L45
tallforasmurf/byteplay
byteplay3.py
object_attributes
def object_attributes( thing, all=False ) : ''' Return a sorted list of names defined by thing that are not also names in a standard object, except include __doc__. ''' standard_names = set( dir( object() ) ) things_names = set( dir( thing ) ) if not all : things_names -= standard_names things_names |= set( ['__doc__'] ) return sorted( things_names )
python
def object_attributes( thing, all=False ) : ''' Return a sorted list of names defined by thing that are not also names in a standard object, except include __doc__. ''' standard_names = set( dir( object() ) ) things_names = set( dir( thing ) ) if not all : things_names -= standard_names things_names |= set( ['__doc__'] ) return sorted( things_names )
[ "def", "object_attributes", "(", "thing", ",", "all", "=", "False", ")", ":", "standard_names", "=", "set", "(", "dir", "(", "object", "(", ")", ")", ")", "things_names", "=", "set", "(", "dir", "(", "thing", ")", ")", "if", "not", "all", ":", "thi...
Return a sorted list of names defined by thing that are not also names in a standard object, except include __doc__.
[ "Return", "a", "sorted", "list", "of", "names", "defined", "by", "thing", "that", "are", "not", "also", "names", "in", "a", "standard", "object", "except", "include", "__doc__", "." ]
train
https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/byteplay3.py#L243-L253
tallforasmurf/byteplay
byteplay3.py
print_object_attributes
def print_object_attributes( thing, heading=None, file=None ): ''' Print the attribute names in thing vertically ''' if heading : print( '==', heading, '==', file=file ) print( '\n'.join( object_attributes( thing ) ), file=file )
python
def print_object_attributes( thing, heading=None, file=None ): ''' Print the attribute names in thing vertically ''' if heading : print( '==', heading, '==', file=file ) print( '\n'.join( object_attributes( thing ) ), file=file )
[ "def", "print_object_attributes", "(", "thing", ",", "heading", "=", "None", ",", "file", "=", "None", ")", ":", "if", "heading", ":", "print", "(", "'=='", ",", "heading", ",", "'=='", ",", "file", "=", "file", ")", "print", "(", "'\\n'", ".", "join...
Print the attribute names in thing vertically
[ "Print", "the", "attribute", "names", "in", "thing", "vertically" ]
train
https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/byteplay3.py#L255-L260
tallforasmurf/byteplay
byteplay3.py
print_attr_values
def print_attr_values( thing, all=False, heading=None, file=None ): ''' Print the attributes of thing which have non-empty values, as a vertical list of "name : value". When all=True, print all attributes even those with empty values. ''' if heading : if isinstance( heading, int ) : # request for default heading heading = '== {} attributes of {} =='.format( 'all' if all else 'non-empty', getattr( thing, '__name__', str(thing) ) ) print( heading, file=file ) for attr in object_attributes( thing, all ): attr_value = getattr( thing, attr ) if attr_value is not None : print( attr, ':', attr_value, file=file ) elif all : print( attr, ':' )
python
def print_attr_values( thing, all=False, heading=None, file=None ): ''' Print the attributes of thing which have non-empty values, as a vertical list of "name : value". When all=True, print all attributes even those with empty values. ''' if heading : if isinstance( heading, int ) : # request for default heading heading = '== {} attributes of {} =='.format( 'all' if all else 'non-empty', getattr( thing, '__name__', str(thing) ) ) print( heading, file=file ) for attr in object_attributes( thing, all ): attr_value = getattr( thing, attr ) if attr_value is not None : print( attr, ':', attr_value, file=file ) elif all : print( attr, ':' )
[ "def", "print_attr_values", "(", "thing", ",", "all", "=", "False", ",", "heading", "=", "None", ",", "file", "=", "None", ")", ":", "if", "heading", ":", "if", "isinstance", "(", "heading", ",", "int", ")", ":", "# request for default heading", "heading",...
Print the attributes of thing which have non-empty values, as a vertical list of "name : value". When all=True, print all attributes even those with empty values.
[ "Print", "the", "attributes", "of", "thing", "which", "have", "non", "-", "empty", "values", "as", "a", "vertical", "list", "of", "name", ":", "value", ".", "When", "all", "=", "True", "print", "all", "attributes", "even", "those", "with", "empty", "valu...
train
https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/byteplay3.py#L262-L282
tallforasmurf/byteplay
byteplay3.py
_get_a_code_object_from
def _get_a_code_object_from( thing ) : ''' Given a thing that might be a property, a class method, a function or a code object, reduce it to code object. If we cannot, return the thing itself. ''' # If we were passed a Method wrapper, get its function if isinstance( thing, types.MethodType ) : thing = thing.__func__ # If we were passed a property object, get its getter function # (no direct support for the fdel or fset functions) if hasattr( thing, 'fget' ) : thing = thing.fget # If we were passed, or now have, a function, get its code object. if isinstance( thing, types.FunctionType ) : thing = thing.__code__ # We should now have a code object, or will never have it. return thing
python
def _get_a_code_object_from( thing ) : ''' Given a thing that might be a property, a class method, a function or a code object, reduce it to code object. If we cannot, return the thing itself. ''' # If we were passed a Method wrapper, get its function if isinstance( thing, types.MethodType ) : thing = thing.__func__ # If we were passed a property object, get its getter function # (no direct support for the fdel or fset functions) if hasattr( thing, 'fget' ) : thing = thing.fget # If we were passed, or now have, a function, get its code object. if isinstance( thing, types.FunctionType ) : thing = thing.__code__ # We should now have a code object, or will never have it. return thing
[ "def", "_get_a_code_object_from", "(", "thing", ")", ":", "# If we were passed a Method wrapper, get its function", "if", "isinstance", "(", "thing", ",", "types", ".", "MethodType", ")", ":", "thing", "=", "thing", ".", "__func__", "# If we were passed a property object,...
Given a thing that might be a property, a class method, a function or a code object, reduce it to code object. If we cannot, return the thing itself.
[ "Given", "a", "thing", "that", "might", "be", "a", "property", "a", "class", "method", "a", "function", "or", "a", "code", "object", "reduce", "it", "to", "code", "object", ".", "If", "we", "cannot", "return", "the", "thing", "itself", "." ]
train
https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/byteplay3.py#L585-L602
tallforasmurf/byteplay
byteplay3.py
printcodelist
def printcodelist(thing, to=sys.stdout, heading=None): ''' Write the lines of the codelist string list to the given file, or to the default output. A little Python 3 problem: if the to-file is in binary mode, we need to encode the strings, else a TypeError will be raised. Obvious answer, test for 'b' in to.mode? Nope, only "real" file objects have a mode attribute. StringIO objects, and the variant StringIO used as default sys.stdout, do not have .mode. However, all file-like objects that support string output DO have an encoding attribute. (StringIO has one that is an empty string, but it exists.) So, if hasattr(to,'encoding'), just shove the whole string into it. Otherwise, encode the string utf-8 and shove that bytestring into it. (See? Python 3 not so hard...) ''' # If we were passed a list, assume that it is a CodeList or # a manually-assembled list of code tuples. if not isinstance( thing, list ) : # Passed something else. Reduce it to a CodeList. if isinstance( thing, Code ): thing = thing.code else : # Convert various sources to a code object. thing = _get_a_code_object_from( thing ) try : thing = Code.from_code( thing ).code except Exception as e: raise ValueError('Invalid input to printcodelist') # We have a CodeList or equivalent, # get the whole disassembly as a string. whole_thang = str( thing ) # if destination not a text file, encode it to bytes if not hasattr( to, 'encoding' ) : whole_thang = whole_thang.encode( 'UTF-8' ) if heading : # is not None or empty heading = heading.encode( 'UTF-8' ) # send it on its way if heading : to.write( '===' + heading + '===\n' ) to.write( whole_thang )
python
def printcodelist(thing, to=sys.stdout, heading=None): ''' Write the lines of the codelist string list to the given file, or to the default output. A little Python 3 problem: if the to-file is in binary mode, we need to encode the strings, else a TypeError will be raised. Obvious answer, test for 'b' in to.mode? Nope, only "real" file objects have a mode attribute. StringIO objects, and the variant StringIO used as default sys.stdout, do not have .mode. However, all file-like objects that support string output DO have an encoding attribute. (StringIO has one that is an empty string, but it exists.) So, if hasattr(to,'encoding'), just shove the whole string into it. Otherwise, encode the string utf-8 and shove that bytestring into it. (See? Python 3 not so hard...) ''' # If we were passed a list, assume that it is a CodeList or # a manually-assembled list of code tuples. if not isinstance( thing, list ) : # Passed something else. Reduce it to a CodeList. if isinstance( thing, Code ): thing = thing.code else : # Convert various sources to a code object. thing = _get_a_code_object_from( thing ) try : thing = Code.from_code( thing ).code except Exception as e: raise ValueError('Invalid input to printcodelist') # We have a CodeList or equivalent, # get the whole disassembly as a string. whole_thang = str( thing ) # if destination not a text file, encode it to bytes if not hasattr( to, 'encoding' ) : whole_thang = whole_thang.encode( 'UTF-8' ) if heading : # is not None or empty heading = heading.encode( 'UTF-8' ) # send it on its way if heading : to.write( '===' + heading + '===\n' ) to.write( whole_thang )
[ "def", "printcodelist", "(", "thing", ",", "to", "=", "sys", ".", "stdout", ",", "heading", "=", "None", ")", ":", "# If we were passed a list, assume that it is a CodeList or", "# a manually-assembled list of code tuples.", "if", "not", "isinstance", "(", "thing", ",",...
Write the lines of the codelist string list to the given file, or to the default output. A little Python 3 problem: if the to-file is in binary mode, we need to encode the strings, else a TypeError will be raised. Obvious answer, test for 'b' in to.mode? Nope, only "real" file objects have a mode attribute. StringIO objects, and the variant StringIO used as default sys.stdout, do not have .mode. However, all file-like objects that support string output DO have an encoding attribute. (StringIO has one that is an empty string, but it exists.) So, if hasattr(to,'encoding'), just shove the whole string into it. Otherwise, encode the string utf-8 and shove that bytestring into it. (See? Python 3 not so hard...)
[ "Write", "the", "lines", "of", "the", "codelist", "string", "list", "to", "the", "given", "file", "or", "to", "the", "default", "output", "." ]
train
https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/byteplay3.py#L605-L647
tallforasmurf/byteplay
byteplay3.py
Code._findlinestarts
def _findlinestarts(code_object): """ Find the offsets in a byte code which are the start of source lines. Generate pairs (offset, lineno) as described in Python/compile.c. This is a modified version of dis.findlinestarts. This version allows multiple "line starts" with the same line number. (The dis version conditions its yield on a test "if lineno != lastlineno".) FYI: code.co_lnotab is a byte array with one pair of bytes for each effective source line number in the bytecode. An effective line is one that generates code: not blank or comment lines. The first actual line number, typically the number of the "def" statement, is in code.co_firstlineno. An even byte of co_lnotab is the offset to the bytecode generated from the next effective line number. The following odd byte is an increment on the previous line's number to the next line's number. Thus co_firstlineno+co_lnotab[1] is the first effective line's number, and co_lnotab[0] is the number of bytes it generated. Note that an effective line number generates code by definition, hence the even byte cannot be zero; and as line numbers are monotonically increasing, the odd byte cannot be zero either. But what, the curious reader might ask, does Python do if a source line generates more than 255 bytes of code? In that *highly* unlikely case compile.c generates multiple pairs of (255,0) until it has accounted for all the generated code, then a final pair of (offset%256, lineincr). Oh, but what, the curious reader asks, do they do if there is a gap of more than 255 between effective line numbers? It is not unheard of to find blocks of comments larger than 255 lines (like this one?). Then compile.c generates pairs of (0, 255) until it has accounted for the line number difference and a final pair of (offset,lineincr%256). Uh, but...? Yes, what now, annoying reader? Well, does the following code handle these special cases of (255,0) and (0,255) properly? It handles the (0,255) case correctly, because of the "if byte_incr" test which skips the yield() but increments lineno. It does not handle the case of (255,0) correctly; it will yield false pairs (255,0). Fortunately that will only arise e.g. when disassembling some "obfuscated" code where most newlines are replaced with semicolons. Oh, and yes, the to_code() method does properly handle generation of the (255,0) and (0,255) entries correctly. """ # grab the even bytes as integer byte_increments: byte_increments = [c for c in code_object.co_lnotab[0::2]] # grab the odd bytes as integer line_increments: line_increments = [c for c in code_object.co_lnotab[1::2]] lineno = code_object.co_firstlineno addr = 0 for byte_incr, line_incr in zip(byte_increments, line_increments): if byte_incr: yield (addr, lineno) addr += byte_incr lineno += line_incr yield (addr, lineno)
python
def _findlinestarts(code_object): """ Find the offsets in a byte code which are the start of source lines. Generate pairs (offset, lineno) as described in Python/compile.c. This is a modified version of dis.findlinestarts. This version allows multiple "line starts" with the same line number. (The dis version conditions its yield on a test "if lineno != lastlineno".) FYI: code.co_lnotab is a byte array with one pair of bytes for each effective source line number in the bytecode. An effective line is one that generates code: not blank or comment lines. The first actual line number, typically the number of the "def" statement, is in code.co_firstlineno. An even byte of co_lnotab is the offset to the bytecode generated from the next effective line number. The following odd byte is an increment on the previous line's number to the next line's number. Thus co_firstlineno+co_lnotab[1] is the first effective line's number, and co_lnotab[0] is the number of bytes it generated. Note that an effective line number generates code by definition, hence the even byte cannot be zero; and as line numbers are monotonically increasing, the odd byte cannot be zero either. But what, the curious reader might ask, does Python do if a source line generates more than 255 bytes of code? In that *highly* unlikely case compile.c generates multiple pairs of (255,0) until it has accounted for all the generated code, then a final pair of (offset%256, lineincr). Oh, but what, the curious reader asks, do they do if there is a gap of more than 255 between effective line numbers? It is not unheard of to find blocks of comments larger than 255 lines (like this one?). Then compile.c generates pairs of (0, 255) until it has accounted for the line number difference and a final pair of (offset,lineincr%256). Uh, but...? Yes, what now, annoying reader? Well, does the following code handle these special cases of (255,0) and (0,255) properly? It handles the (0,255) case correctly, because of the "if byte_incr" test which skips the yield() but increments lineno. It does not handle the case of (255,0) correctly; it will yield false pairs (255,0). Fortunately that will only arise e.g. when disassembling some "obfuscated" code where most newlines are replaced with semicolons. Oh, and yes, the to_code() method does properly handle generation of the (255,0) and (0,255) entries correctly. """ # grab the even bytes as integer byte_increments: byte_increments = [c for c in code_object.co_lnotab[0::2]] # grab the odd bytes as integer line_increments: line_increments = [c for c in code_object.co_lnotab[1::2]] lineno = code_object.co_firstlineno addr = 0 for byte_incr, line_incr in zip(byte_increments, line_increments): if byte_incr: yield (addr, lineno) addr += byte_incr lineno += line_incr yield (addr, lineno)
[ "def", "_findlinestarts", "(", "code_object", ")", ":", "# grab the even bytes as integer byte_increments:", "byte_increments", "=", "[", "c", "for", "c", "in", "code_object", ".", "co_lnotab", "[", "0", ":", ":", "2", "]", "]", "# grab the odd bytes as integer line_i...
Find the offsets in a byte code which are the start of source lines. Generate pairs (offset, lineno) as described in Python/compile.c. This is a modified version of dis.findlinestarts. This version allows multiple "line starts" with the same line number. (The dis version conditions its yield on a test "if lineno != lastlineno".) FYI: code.co_lnotab is a byte array with one pair of bytes for each effective source line number in the bytecode. An effective line is one that generates code: not blank or comment lines. The first actual line number, typically the number of the "def" statement, is in code.co_firstlineno. An even byte of co_lnotab is the offset to the bytecode generated from the next effective line number. The following odd byte is an increment on the previous line's number to the next line's number. Thus co_firstlineno+co_lnotab[1] is the first effective line's number, and co_lnotab[0] is the number of bytes it generated. Note that an effective line number generates code by definition, hence the even byte cannot be zero; and as line numbers are monotonically increasing, the odd byte cannot be zero either. But what, the curious reader might ask, does Python do if a source line generates more than 255 bytes of code? In that *highly* unlikely case compile.c generates multiple pairs of (255,0) until it has accounted for all the generated code, then a final pair of (offset%256, lineincr). Oh, but what, the curious reader asks, do they do if there is a gap of more than 255 between effective line numbers? It is not unheard of to find blocks of comments larger than 255 lines (like this one?). Then compile.c generates pairs of (0, 255) until it has accounted for the line number difference and a final pair of (offset,lineincr%256). Uh, but...? Yes, what now, annoying reader? Well, does the following code handle these special cases of (255,0) and (0,255) properly? It handles the (0,255) case correctly, because of the "if byte_incr" test which skips the yield() but increments lineno. It does not handle the case of (255,0) correctly; it will yield false pairs (255,0). Fortunately that will only arise e.g. when disassembling some "obfuscated" code where most newlines are replaced with semicolons. Oh, and yes, the to_code() method does properly handle generation of the (255,0) and (0,255) entries correctly.
[ "Find", "the", "offsets", "in", "a", "byte", "code", "which", "are", "the", "start", "of", "source", "lines", "." ]
train
https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/byteplay3.py#L808-L870
tallforasmurf/byteplay
byteplay3.py
Code.from_code
def from_code(cls, code_object): """ Disassemble a Python code object and make a Code object from the bits. This is the expected way to make a Code instance. But you are welcome to call Code() directly if you wish. """ # It's an annoyance to keep having to add ".__code__" to a function # name, so let's automate that when needed. if isinstance( code_object, types.FunctionType ) : code_object = code_object.__code__ # get the actual bytecode string out of the code object co_code = code_object.co_code # Use dis.findlabels to locate the labeled bytecodes, that is, the # ones that are jump targets. (They are "labeled" in a disassembly # printout.) Store the list as a dict{ addr: Label object} for easy # lookup. labels = dict((addr, Label()) for addr in findlabels(co_code)) # Make a dict{ source_line : offset } for the source lines in the code. linestarts = dict(cls._findlinestarts(code_object)) cellfree = code_object.co_cellvars + code_object.co_freevars # Create a CodeList object to represent the bytecode string. code = CodeList() # receives (op,arg) tuples n = len(co_code) # number bytes in the bytecode string i = 0 # index over the bytecode string extended_arg = 0 # upper 16 bits of an extended arg # Iterate over the bytecode string expanding it into (Opcode,arg) tuples. while i < n: # First byte is the opcode op = Opcode( co_code[i] ) # If this op is a jump-target, insert (Label,) ahead of it. if i in labels: code.append((labels[i], None)) # If this op is the first from a source line, insert # (SetLineno, line#) ahead of it. if i in linestarts: code.append((SetLineno, linestarts[i])) i += 1 # step index to the argument if any if op not in hasargx : # No argument, push the minimal tuple, done. code.append((op, None)) else: # op takes an argument. Look for MAKE_FUNCTION or MAKE_CLOSURE. if op in hascode : # special case: with these opcodes, at runtime, TOS1 should # be a code object. We require the normal opcode sequence: # LOAD_CONST the code object # LOAD_CONST the name of the function # MAKE_FUNCTION/CLOSURE # When this exists, go back and convert the argument of the # first LOAD_CONST from a code object to a Code object. if len(code) >= 2 \ and code[-2][0] == LOAD_CONST \ and code[-1][0] == LOAD_CONST \ and isinstance( code[-2][1], types.CodeType ) : code[-2] = ( Opcode(LOAD_CONST), Code.from_code( code[-2][1] ) ) else : raise ValueError( 'Invalid opcode sequence for MAKE_FUNCTION/MAKE_CLOSURE' ) # now continue and handle the argument of MAKE_F/C normally. # Assemble the argument value from two bytes plus an extended # arg when present. arg = co_code[i] + co_code[i+1]*256 + extended_arg extended_arg = 0 # clear extended arg bits if any i += 2 # Step over the argument if op == opcode.EXTENDED_ARG: # The EXTENDED_ARG op is just a way of storing the upper # 16 bits of a 32-bit arg in the bytestream. Collect # those bits, but generate no code tuple. extended_arg = arg << 16 elif op in hasconst: # When the argument is a constant, put the constant # itself in the opcode tuple. If that constant is a code # object, the test above (if op in hascode) will later # convert it into a Code object. code.append((op, code_object.co_consts[arg])) elif op in hasname: # When the argument is a name, put the name string itself # in the opcode tuple. code.append((op, code_object.co_names[arg])) elif op in hasjabs: # When the argument is an absolute jump, put the label # in the tuple (in place of the label list index) code.append((op, labels[arg])) elif op in hasjrel: # When the argument is a relative jump, put the label # in the tuple in place of the forward offset. code.append((op, labels[i + arg])) elif op in haslocal: # When the argument is a local var, put the name string # in the tuple. code.append((op, code_object.co_varnames[arg])) elif op in hascompare: # When the argument is a relation (like ">=") put that # string in the tuple instead. code.append((op, cmp_op[arg])) elif op in hasfree: code.append((op, cellfree[arg])) else: # whatever, just put the arg in the tuple code.append((op, arg)) # Store certain flags from the code object as booleans for convenient # reference as Code members. varargs = bool(code_object.co_flags & CO_VARARGS) varkwargs = bool(code_object.co_flags & CO_VARKEYWORDS) newlocals = bool(code_object.co_flags & CO_NEWLOCALS) # Get the names of arguments as strings, from the varnames tuple. The # order of name strings in co_varnames is: # co_argcount names of regular (positional-or-keyword) arguments # names of co_kwonlyargcount keyword-only arguments if any # name of a *vararg argument # name of a **kwarg argument if any (not present if kwonlyargs > 0) # names of other local variables # Hence the count of argument names is # co_argcount + co_kwonlyargcount + varargs + varkwargs nargs = code_object.co_argcount + code_object.co_kwonlyargcount + varargs + varkwargs args = code_object.co_varnames[ : nargs ] # Preserve a docstring if any. If there are constants and the first # constant is a string, Python assumes that's a docstring. docstring = None if code_object.co_consts and isinstance(code_object.co_consts[0], str): docstring = code_object.co_consts[0] # Funnel all the collected bits through the Code.__init__() method. return cls( code = code, freevars = code_object.co_freevars, args = args, varargs = varargs, varkwargs = varkwargs, kwonlyargcount = code_object.co_kwonlyargcount, newlocals = newlocals, coflags = code_object.co_flags, name = code_object.co_name, filename = code_object.co_filename, firstlineno = code_object.co_firstlineno, docstring = docstring )
python
def from_code(cls, code_object): """ Disassemble a Python code object and make a Code object from the bits. This is the expected way to make a Code instance. But you are welcome to call Code() directly if you wish. """ # It's an annoyance to keep having to add ".__code__" to a function # name, so let's automate that when needed. if isinstance( code_object, types.FunctionType ) : code_object = code_object.__code__ # get the actual bytecode string out of the code object co_code = code_object.co_code # Use dis.findlabels to locate the labeled bytecodes, that is, the # ones that are jump targets. (They are "labeled" in a disassembly # printout.) Store the list as a dict{ addr: Label object} for easy # lookup. labels = dict((addr, Label()) for addr in findlabels(co_code)) # Make a dict{ source_line : offset } for the source lines in the code. linestarts = dict(cls._findlinestarts(code_object)) cellfree = code_object.co_cellvars + code_object.co_freevars # Create a CodeList object to represent the bytecode string. code = CodeList() # receives (op,arg) tuples n = len(co_code) # number bytes in the bytecode string i = 0 # index over the bytecode string extended_arg = 0 # upper 16 bits of an extended arg # Iterate over the bytecode string expanding it into (Opcode,arg) tuples. while i < n: # First byte is the opcode op = Opcode( co_code[i] ) # If this op is a jump-target, insert (Label,) ahead of it. if i in labels: code.append((labels[i], None)) # If this op is the first from a source line, insert # (SetLineno, line#) ahead of it. if i in linestarts: code.append((SetLineno, linestarts[i])) i += 1 # step index to the argument if any if op not in hasargx : # No argument, push the minimal tuple, done. code.append((op, None)) else: # op takes an argument. Look for MAKE_FUNCTION or MAKE_CLOSURE. if op in hascode : # special case: with these opcodes, at runtime, TOS1 should # be a code object. We require the normal opcode sequence: # LOAD_CONST the code object # LOAD_CONST the name of the function # MAKE_FUNCTION/CLOSURE # When this exists, go back and convert the argument of the # first LOAD_CONST from a code object to a Code object. if len(code) >= 2 \ and code[-2][0] == LOAD_CONST \ and code[-1][0] == LOAD_CONST \ and isinstance( code[-2][1], types.CodeType ) : code[-2] = ( Opcode(LOAD_CONST), Code.from_code( code[-2][1] ) ) else : raise ValueError( 'Invalid opcode sequence for MAKE_FUNCTION/MAKE_CLOSURE' ) # now continue and handle the argument of MAKE_F/C normally. # Assemble the argument value from two bytes plus an extended # arg when present. arg = co_code[i] + co_code[i+1]*256 + extended_arg extended_arg = 0 # clear extended arg bits if any i += 2 # Step over the argument if op == opcode.EXTENDED_ARG: # The EXTENDED_ARG op is just a way of storing the upper # 16 bits of a 32-bit arg in the bytestream. Collect # those bits, but generate no code tuple. extended_arg = arg << 16 elif op in hasconst: # When the argument is a constant, put the constant # itself in the opcode tuple. If that constant is a code # object, the test above (if op in hascode) will later # convert it into a Code object. code.append((op, code_object.co_consts[arg])) elif op in hasname: # When the argument is a name, put the name string itself # in the opcode tuple. code.append((op, code_object.co_names[arg])) elif op in hasjabs: # When the argument is an absolute jump, put the label # in the tuple (in place of the label list index) code.append((op, labels[arg])) elif op in hasjrel: # When the argument is a relative jump, put the label # in the tuple in place of the forward offset. code.append((op, labels[i + arg])) elif op in haslocal: # When the argument is a local var, put the name string # in the tuple. code.append((op, code_object.co_varnames[arg])) elif op in hascompare: # When the argument is a relation (like ">=") put that # string in the tuple instead. code.append((op, cmp_op[arg])) elif op in hasfree: code.append((op, cellfree[arg])) else: # whatever, just put the arg in the tuple code.append((op, arg)) # Store certain flags from the code object as booleans for convenient # reference as Code members. varargs = bool(code_object.co_flags & CO_VARARGS) varkwargs = bool(code_object.co_flags & CO_VARKEYWORDS) newlocals = bool(code_object.co_flags & CO_NEWLOCALS) # Get the names of arguments as strings, from the varnames tuple. The # order of name strings in co_varnames is: # co_argcount names of regular (positional-or-keyword) arguments # names of co_kwonlyargcount keyword-only arguments if any # name of a *vararg argument # name of a **kwarg argument if any (not present if kwonlyargs > 0) # names of other local variables # Hence the count of argument names is # co_argcount + co_kwonlyargcount + varargs + varkwargs nargs = code_object.co_argcount + code_object.co_kwonlyargcount + varargs + varkwargs args = code_object.co_varnames[ : nargs ] # Preserve a docstring if any. If there are constants and the first # constant is a string, Python assumes that's a docstring. docstring = None if code_object.co_consts and isinstance(code_object.co_consts[0], str): docstring = code_object.co_consts[0] # Funnel all the collected bits through the Code.__init__() method. return cls( code = code, freevars = code_object.co_freevars, args = args, varargs = varargs, varkwargs = varkwargs, kwonlyargcount = code_object.co_kwonlyargcount, newlocals = newlocals, coflags = code_object.co_flags, name = code_object.co_name, filename = code_object.co_filename, firstlineno = code_object.co_firstlineno, docstring = docstring )
[ "def", "from_code", "(", "cls", ",", "code_object", ")", ":", "# It's an annoyance to keep having to add \".__code__\" to a function", "# name, so let's automate that when needed.", "if", "isinstance", "(", "code_object", ",", "types", ".", "FunctionType", ")", ":", "code_obj...
Disassemble a Python code object and make a Code object from the bits. This is the expected way to make a Code instance. But you are welcome to call Code() directly if you wish.
[ "Disassemble", "a", "Python", "code", "object", "and", "make", "a", "Code", "object", "from", "the", "bits", ".", "This", "is", "the", "expected", "way", "to", "make", "a", "Code", "instance", ".", "But", "you", "are", "welcome", "to", "call", "Code", ...
train
https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/byteplay3.py#L873-L1037
tallforasmurf/byteplay
byteplay3.py
Code._compute_stacksize
def _compute_stacksize(self): ''' Given this object's code list, compute its maximal stack usage. This is done by scanning the code, and computing for each opcode the stack state at the opcode. ''' # get local access to code, save some attribute lookups later code = self.code # A mapping from labels to their positions in the code list label_pos = { op : pos for pos, (op, arg) in enumerate(code) if isinstance(op, Label) } # sf_targets are the targets of SETUP_FINALLY opcodes. They are # recorded because they have special stack behaviour. If an exception # was raised in the block pushed by a SETUP_FINALLY opcode, the block # is popped and 3 objects are pushed. On return or continue, the # block is popped and 2 objects are pushed. If nothing happened, the # block is popped by a POP_BLOCK opcode and 1 object is pushed by a # (LOAD_CONST, None) operation. # # In Python 3, the targets of SETUP_WITH have similar behavior, # complicated by the fact that they also have an __exit__ method # stacked and what it returns determines what they pop. So their # stack depth is one greater, a fact we are going to ignore for the # time being :-/ # # Our solution is to record the stack state of SETUP_FINALLY targets # as having 3 objects pushed, which is the maximum. However, to make # stack recording consistent, the get_next_stacks function will always # yield the stack state of the target as if 1 object was pushed, but # this will be corrected in the actual stack recording. sf_targets = set( label_pos[arg] for op, arg in code if op == SETUP_FINALLY or op == SETUP_WITH ) # What we compute - for each opcode, its stack state, as an n-tuple. # n is the number of blocks pushed. For each block, we record the number # of objects pushed. stacks = [None] * len(code) def get_next_stacks(pos, curstack): """ Get a code position and the stack state before the operation was done, and yield pairs (pos, curstack) for the next positions to be explored - those are the positions to which you can get from the given (pos, curstack). If the given position was already explored, nothing will be yielded. """ op, arg = code[pos] if isinstance(op, Label): # We should check if we already reached a node only if it is # a label. if pos in sf_targets: # Adjust a SETUP_FINALLY from 1 to 3 stack entries. curstack = curstack[:-1] + (curstack[-1] + 2,) if stacks[pos] is None: stacks[pos] = curstack else: if stacks[pos] != curstack: raise ValueError("Inconsistent code") return def newstack(n): # Return a new stack, modified by adding n elements to the last # block if curstack[-1] + n < 0: raise ValueError("Popped a non-existing element") return curstack[:-1] + (curstack[-1]+n,) if not isopcode(op): # label or SetLineno - just continue to next line yield pos+1, curstack elif op in ( RETURN_VALUE, RAISE_VARARGS ): # No place in particular to continue to pass elif op in (JUMP_FORWARD, JUMP_ABSOLUTE): # One possibility for a jump yield label_pos[arg], curstack elif op in (POP_JUMP_IF_FALSE, POP_JUMP_IF_TRUE): # Two possibilities for a jump yield label_pos[arg], newstack(-1) yield pos+1, newstack(-1) elif op in (JUMP_IF_TRUE_OR_POP, JUMP_IF_FALSE_OR_POP): # Two possibilities for a jump yield label_pos[arg], curstack yield pos+1, newstack(-1) elif op == FOR_ITER: # FOR_ITER pushes next(TOS) on success, and pops TOS and jumps # on failure yield label_pos[arg], newstack(-1) yield pos+1, newstack(1) elif op == BREAK_LOOP: # BREAK_LOOP goes to the end of a loop and pops a block # but like RETURN_VALUE we have no instruction position # to give. For now treat like RETURN_VALUE pass elif op == CONTINUE_LOOP: # CONTINUE_LOOP jumps to the beginning of a loop which should # already have been discovered. It does not change the stack # state nor does it create or pop a block. #yield label_pos[arg], curstack #yield label_pos[arg], curstack[:-1] pass elif op == SETUP_LOOP: # We continue with a new block. # On break, we jump to the label and return to current stack # state. yield label_pos[arg], curstack yield pos+1, curstack + (0,) elif op == SETUP_EXCEPT: # We continue with a new block. # On exception, we jump to the label with 3 extra objects on # stack yield label_pos[arg], newstack(3) yield pos+1, curstack + (0,) elif op == SETUP_FINALLY or op == SETUP_WITH : # We continue with a new block. # On exception, we jump to the label with 3 extra objects on # stack, but to keep stack recording consistent, we behave as # if we add only 1 object. Extra 2 will be added to the actual # recording. yield label_pos[arg], newstack(1) yield pos+1, curstack + ( int(op == SETUP_WITH) ,) elif op == POP_BLOCK: # Just pop the block yield pos+1, curstack[:-1] elif op == END_FINALLY : # Since stack recording of SETUP_FINALLY targets is of 3 pushed # objects (as when an exception is raised), we pop 3 objects. yield pos+1, newstack(-3) elif op == _WITH_CLEANUP_OPCODE: # Since WITH_CLEANUP[_START] is always found after SETUP_FINALLY # targets, and the stack recording is that of a raised # exception, we can simply pop 1 object and let END_FINALLY # pop the remaining 3. yield pos+1, newstack(-1) else: # nothing special, use the CPython value yield pos+1, newstack( stack_effect( op, arg ) ) # Now comes the calculation: open_positions holds positions which are # yet to be explored. In each step we take one open position, and # explore it by appending the positions to which it can go, to # open_positions. On the way, we update maxsize. # # open_positions is a list of tuples: (pos, stack state) # # Sneaky Python coding trick here. get_next_stacks() is a generator, # it contains yield statements. So when we call get_next_stacks() # what is returned is an iterator. However, the yield statements in # get_next_stacks() are not in a loop as usual; rather it is # straight-line code that will execute 0, 1 or 2 yields depending on # the Opcode at pos. # # the list.extend() method takes an iterator and exhausts it, adding # all yielded values to the list. Hence the statement # # open_positions.extend(get_next_stacks(pos,curstack)) # # appends 0, 1 or 2 tuples (pos, stack_state) to open_positions. maxsize = 0 open_positions = [(0, (0,))] while open_positions: pos, curstack = open_positions.pop() maxsize = max(maxsize, sum(curstack)) open_positions.extend(get_next_stacks(pos, curstack)) return maxsize
python
def _compute_stacksize(self): ''' Given this object's code list, compute its maximal stack usage. This is done by scanning the code, and computing for each opcode the stack state at the opcode. ''' # get local access to code, save some attribute lookups later code = self.code # A mapping from labels to their positions in the code list label_pos = { op : pos for pos, (op, arg) in enumerate(code) if isinstance(op, Label) } # sf_targets are the targets of SETUP_FINALLY opcodes. They are # recorded because they have special stack behaviour. If an exception # was raised in the block pushed by a SETUP_FINALLY opcode, the block # is popped and 3 objects are pushed. On return or continue, the # block is popped and 2 objects are pushed. If nothing happened, the # block is popped by a POP_BLOCK opcode and 1 object is pushed by a # (LOAD_CONST, None) operation. # # In Python 3, the targets of SETUP_WITH have similar behavior, # complicated by the fact that they also have an __exit__ method # stacked and what it returns determines what they pop. So their # stack depth is one greater, a fact we are going to ignore for the # time being :-/ # # Our solution is to record the stack state of SETUP_FINALLY targets # as having 3 objects pushed, which is the maximum. However, to make # stack recording consistent, the get_next_stacks function will always # yield the stack state of the target as if 1 object was pushed, but # this will be corrected in the actual stack recording. sf_targets = set( label_pos[arg] for op, arg in code if op == SETUP_FINALLY or op == SETUP_WITH ) # What we compute - for each opcode, its stack state, as an n-tuple. # n is the number of blocks pushed. For each block, we record the number # of objects pushed. stacks = [None] * len(code) def get_next_stacks(pos, curstack): """ Get a code position and the stack state before the operation was done, and yield pairs (pos, curstack) for the next positions to be explored - those are the positions to which you can get from the given (pos, curstack). If the given position was already explored, nothing will be yielded. """ op, arg = code[pos] if isinstance(op, Label): # We should check if we already reached a node only if it is # a label. if pos in sf_targets: # Adjust a SETUP_FINALLY from 1 to 3 stack entries. curstack = curstack[:-1] + (curstack[-1] + 2,) if stacks[pos] is None: stacks[pos] = curstack else: if stacks[pos] != curstack: raise ValueError("Inconsistent code") return def newstack(n): # Return a new stack, modified by adding n elements to the last # block if curstack[-1] + n < 0: raise ValueError("Popped a non-existing element") return curstack[:-1] + (curstack[-1]+n,) if not isopcode(op): # label or SetLineno - just continue to next line yield pos+1, curstack elif op in ( RETURN_VALUE, RAISE_VARARGS ): # No place in particular to continue to pass elif op in (JUMP_FORWARD, JUMP_ABSOLUTE): # One possibility for a jump yield label_pos[arg], curstack elif op in (POP_JUMP_IF_FALSE, POP_JUMP_IF_TRUE): # Two possibilities for a jump yield label_pos[arg], newstack(-1) yield pos+1, newstack(-1) elif op in (JUMP_IF_TRUE_OR_POP, JUMP_IF_FALSE_OR_POP): # Two possibilities for a jump yield label_pos[arg], curstack yield pos+1, newstack(-1) elif op == FOR_ITER: # FOR_ITER pushes next(TOS) on success, and pops TOS and jumps # on failure yield label_pos[arg], newstack(-1) yield pos+1, newstack(1) elif op == BREAK_LOOP: # BREAK_LOOP goes to the end of a loop and pops a block # but like RETURN_VALUE we have no instruction position # to give. For now treat like RETURN_VALUE pass elif op == CONTINUE_LOOP: # CONTINUE_LOOP jumps to the beginning of a loop which should # already have been discovered. It does not change the stack # state nor does it create or pop a block. #yield label_pos[arg], curstack #yield label_pos[arg], curstack[:-1] pass elif op == SETUP_LOOP: # We continue with a new block. # On break, we jump to the label and return to current stack # state. yield label_pos[arg], curstack yield pos+1, curstack + (0,) elif op == SETUP_EXCEPT: # We continue with a new block. # On exception, we jump to the label with 3 extra objects on # stack yield label_pos[arg], newstack(3) yield pos+1, curstack + (0,) elif op == SETUP_FINALLY or op == SETUP_WITH : # We continue with a new block. # On exception, we jump to the label with 3 extra objects on # stack, but to keep stack recording consistent, we behave as # if we add only 1 object. Extra 2 will be added to the actual # recording. yield label_pos[arg], newstack(1) yield pos+1, curstack + ( int(op == SETUP_WITH) ,) elif op == POP_BLOCK: # Just pop the block yield pos+1, curstack[:-1] elif op == END_FINALLY : # Since stack recording of SETUP_FINALLY targets is of 3 pushed # objects (as when an exception is raised), we pop 3 objects. yield pos+1, newstack(-3) elif op == _WITH_CLEANUP_OPCODE: # Since WITH_CLEANUP[_START] is always found after SETUP_FINALLY # targets, and the stack recording is that of a raised # exception, we can simply pop 1 object and let END_FINALLY # pop the remaining 3. yield pos+1, newstack(-1) else: # nothing special, use the CPython value yield pos+1, newstack( stack_effect( op, arg ) ) # Now comes the calculation: open_positions holds positions which are # yet to be explored. In each step we take one open position, and # explore it by appending the positions to which it can go, to # open_positions. On the way, we update maxsize. # # open_positions is a list of tuples: (pos, stack state) # # Sneaky Python coding trick here. get_next_stacks() is a generator, # it contains yield statements. So when we call get_next_stacks() # what is returned is an iterator. However, the yield statements in # get_next_stacks() are not in a loop as usual; rather it is # straight-line code that will execute 0, 1 or 2 yields depending on # the Opcode at pos. # # the list.extend() method takes an iterator and exhausts it, adding # all yielded values to the list. Hence the statement # # open_positions.extend(get_next_stacks(pos,curstack)) # # appends 0, 1 or 2 tuples (pos, stack_state) to open_positions. maxsize = 0 open_positions = [(0, (0,))] while open_positions: pos, curstack = open_positions.pop() maxsize = max(maxsize, sum(curstack)) open_positions.extend(get_next_stacks(pos, curstack)) return maxsize
[ "def", "_compute_stacksize", "(", "self", ")", ":", "# get local access to code, save some attribute lookups later", "code", "=", "self", ".", "code", "# A mapping from labels to their positions in the code list", "label_pos", "=", "{", "op", ":", "pos", "for", "pos", ",", ...
Given this object's code list, compute its maximal stack usage. This is done by scanning the code, and computing for each opcode the stack state at the opcode.
[ "Given", "this", "object", "s", "code", "list", "compute", "its", "maximal", "stack", "usage", ".", "This", "is", "done", "by", "scanning", "the", "code", "and", "computing", "for", "each", "opcode", "the", "stack", "state", "at", "the", "opcode", "." ]
train
https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/byteplay3.py#L1115-L1309
all-umass/graphs
graphs/mixins/embed.py
EmbedMixin.isomap
def isomap(self, num_dims=None, directed=None): '''Isomap embedding. num_dims : dimension of embedded coordinates, defaults to input dimension directed : used for .shortest_path() calculation ''' W = -0.5 * self.shortest_path(directed=directed) ** 2 kpca = KernelPCA(n_components=num_dims, kernel='precomputed') return kpca.fit_transform(W)
python
def isomap(self, num_dims=None, directed=None): '''Isomap embedding. num_dims : dimension of embedded coordinates, defaults to input dimension directed : used for .shortest_path() calculation ''' W = -0.5 * self.shortest_path(directed=directed) ** 2 kpca = KernelPCA(n_components=num_dims, kernel='precomputed') return kpca.fit_transform(W)
[ "def", "isomap", "(", "self", ",", "num_dims", "=", "None", ",", "directed", "=", "None", ")", ":", "W", "=", "-", "0.5", "*", "self", ".", "shortest_path", "(", "directed", "=", "directed", ")", "**", "2", "kpca", "=", "KernelPCA", "(", "n_component...
Isomap embedding. num_dims : dimension of embedded coordinates, defaults to input dimension directed : used for .shortest_path() calculation
[ "Isomap", "embedding", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/embed.py#L11-L19
all-umass/graphs
graphs/mixins/embed.py
EmbedMixin.laplacian_eigenmaps
def laplacian_eigenmaps(self, num_dims=None, normed=True, val_thresh=1e-8): '''Laplacian Eigenmaps embedding. num_dims : dimension of embedded coordinates, defaults to input dimension normed : used for .laplacian() calculation val_thresh : threshold for omitting vectors with near-zero eigenvalues ''' L = self.laplacian(normed=normed) return _null_space(L, num_dims, val_thresh, overwrite=True)
python
def laplacian_eigenmaps(self, num_dims=None, normed=True, val_thresh=1e-8): '''Laplacian Eigenmaps embedding. num_dims : dimension of embedded coordinates, defaults to input dimension normed : used for .laplacian() calculation val_thresh : threshold for omitting vectors with near-zero eigenvalues ''' L = self.laplacian(normed=normed) return _null_space(L, num_dims, val_thresh, overwrite=True)
[ "def", "laplacian_eigenmaps", "(", "self", ",", "num_dims", "=", "None", ",", "normed", "=", "True", ",", "val_thresh", "=", "1e-8", ")", ":", "L", "=", "self", ".", "laplacian", "(", "normed", "=", "normed", ")", "return", "_null_space", "(", "L", ","...
Laplacian Eigenmaps embedding. num_dims : dimension of embedded coordinates, defaults to input dimension normed : used for .laplacian() calculation val_thresh : threshold for omitting vectors with near-zero eigenvalues
[ "Laplacian", "Eigenmaps", "embedding", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/embed.py#L21-L29
all-umass/graphs
graphs/mixins/embed.py
EmbedMixin.locality_preserving_projections
def locality_preserving_projections(self, coordinates, num_dims=None): '''Locality Preserving Projections (LPP, linearized Laplacian Eigenmaps).''' X = np.atleast_2d(coordinates) # n x d L = self.laplacian(normed=True) # n x n u,s,_ = np.linalg.svd(X.T.dot(X)) Fplus = np.linalg.pinv(u * np.sqrt(s)) # d x d n, d = X.shape if n >= d: # optimized order: F(X'LX)F' T = Fplus.dot(X.T.dot(L.dot(X))).dot(Fplus.T) else: # optimized order: (FX')L(XF') T = Fplus.dot(X.T).dot(L.dot(X.dot(Fplus.T))) L = 0.5*(T+T.T) return _null_space(L, num_vecs=num_dims, overwrite=True)
python
def locality_preserving_projections(self, coordinates, num_dims=None): '''Locality Preserving Projections (LPP, linearized Laplacian Eigenmaps).''' X = np.atleast_2d(coordinates) # n x d L = self.laplacian(normed=True) # n x n u,s,_ = np.linalg.svd(X.T.dot(X)) Fplus = np.linalg.pinv(u * np.sqrt(s)) # d x d n, d = X.shape if n >= d: # optimized order: F(X'LX)F' T = Fplus.dot(X.T.dot(L.dot(X))).dot(Fplus.T) else: # optimized order: (FX')L(XF') T = Fplus.dot(X.T).dot(L.dot(X.dot(Fplus.T))) L = 0.5*(T+T.T) return _null_space(L, num_vecs=num_dims, overwrite=True)
[ "def", "locality_preserving_projections", "(", "self", ",", "coordinates", ",", "num_dims", "=", "None", ")", ":", "X", "=", "np", ".", "atleast_2d", "(", "coordinates", ")", "# n x d", "L", "=", "self", ".", "laplacian", "(", "normed", "=", "True", ")", ...
Locality Preserving Projections (LPP, linearized Laplacian Eigenmaps).
[ "Locality", "Preserving", "Projections", "(", "LPP", "linearized", "Laplacian", "Eigenmaps", ")", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/embed.py#L31-L43
all-umass/graphs
graphs/mixins/embed.py
EmbedMixin.locally_linear_embedding
def locally_linear_embedding(self, num_dims=None): '''Locally Linear Embedding (LLE). Note: may need to call barycenter_edge_weights() before this! ''' W = self.matrix() # compute M = (I-W)'(I-W) M = W.T.dot(W) - W.T - W if issparse(M): M = M.toarray() M.flat[::M.shape[0] + 1] += 1 return _null_space(M, num_vecs=num_dims, overwrite=True)
python
def locally_linear_embedding(self, num_dims=None): '''Locally Linear Embedding (LLE). Note: may need to call barycenter_edge_weights() before this! ''' W = self.matrix() # compute M = (I-W)'(I-W) M = W.T.dot(W) - W.T - W if issparse(M): M = M.toarray() M.flat[::M.shape[0] + 1] += 1 return _null_space(M, num_vecs=num_dims, overwrite=True)
[ "def", "locally_linear_embedding", "(", "self", ",", "num_dims", "=", "None", ")", ":", "W", "=", "self", ".", "matrix", "(", ")", "# compute M = (I-W)'(I-W)", "M", "=", "W", ".", "T", ".", "dot", "(", "W", ")", "-", "W", ".", "T", "-", "W", "if", ...
Locally Linear Embedding (LLE). Note: may need to call barycenter_edge_weights() before this!
[ "Locally", "Linear", "Embedding", "(", "LLE", ")", ".", "Note", ":", "may", "need", "to", "call", "barycenter_edge_weights", "()", "before", "this!" ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/embed.py#L45-L55
all-umass/graphs
graphs/mixins/embed.py
EmbedMixin.neighborhood_preserving_embedding
def neighborhood_preserving_embedding(self, X, num_dims=None, reweight=True): '''Neighborhood Preserving Embedding (NPE, linearized LLE).''' if reweight: W = self.barycenter_edge_weights(X).matrix() else: W = self.matrix() # compute M = (I-W)'(I-W) as in LLE M = W.T.dot(W) - W.T - W if issparse(M): M = M.toarray() M.flat[::M.shape[0] + 1] += 1 # solve generalized eig problem: X'MXa = \lambda X'Xa vals, vecs = eig(X.T.dot(M).dot(X), X.T.dot(X), overwrite_a=True, overwrite_b=True) if num_dims is None: return vecs return vecs[:,:num_dims]
python
def neighborhood_preserving_embedding(self, X, num_dims=None, reweight=True): '''Neighborhood Preserving Embedding (NPE, linearized LLE).''' if reweight: W = self.barycenter_edge_weights(X).matrix() else: W = self.matrix() # compute M = (I-W)'(I-W) as in LLE M = W.T.dot(W) - W.T - W if issparse(M): M = M.toarray() M.flat[::M.shape[0] + 1] += 1 # solve generalized eig problem: X'MXa = \lambda X'Xa vals, vecs = eig(X.T.dot(M).dot(X), X.T.dot(X), overwrite_a=True, overwrite_b=True) if num_dims is None: return vecs return vecs[:,:num_dims]
[ "def", "neighborhood_preserving_embedding", "(", "self", ",", "X", ",", "num_dims", "=", "None", ",", "reweight", "=", "True", ")", ":", "if", "reweight", ":", "W", "=", "self", ".", "barycenter_edge_weights", "(", "X", ")", ".", "matrix", "(", ")", "els...
Neighborhood Preserving Embedding (NPE, linearized LLE).
[ "Neighborhood", "Preserving", "Embedding", "(", "NPE", "linearized", "LLE", ")", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/embed.py#L57-L73
all-umass/graphs
graphs/mixins/embed.py
EmbedMixin.laplacian_pca
def laplacian_pca(self, coordinates, num_dims=None, beta=0.5): '''Graph-Laplacian PCA (CVPR 2013). coordinates : (n,d) array-like, assumed to be mean-centered. beta : float in [0,1], scales how much PCA/LapEig contributes. Returns an approximation of input coordinates, ala PCA.''' X = np.atleast_2d(coordinates) L = self.laplacian(normed=True) kernel = X.dot(X.T) kernel /= eigsh(kernel, k=1, which='LM', return_eigenvectors=False) L /= eigsh(L, k=1, which='LM', return_eigenvectors=False) W = (1-beta)*(np.identity(kernel.shape[0]) - kernel) + beta*L if num_dims is None: vals, vecs = np.linalg.eigh(W) else: vals, vecs = eigh(W, eigvals=(0, num_dims-1), overwrite_a=True) return X.T.dot(vecs).dot(vecs.T).T
python
def laplacian_pca(self, coordinates, num_dims=None, beta=0.5): '''Graph-Laplacian PCA (CVPR 2013). coordinates : (n,d) array-like, assumed to be mean-centered. beta : float in [0,1], scales how much PCA/LapEig contributes. Returns an approximation of input coordinates, ala PCA.''' X = np.atleast_2d(coordinates) L = self.laplacian(normed=True) kernel = X.dot(X.T) kernel /= eigsh(kernel, k=1, which='LM', return_eigenvectors=False) L /= eigsh(L, k=1, which='LM', return_eigenvectors=False) W = (1-beta)*(np.identity(kernel.shape[0]) - kernel) + beta*L if num_dims is None: vals, vecs = np.linalg.eigh(W) else: vals, vecs = eigh(W, eigvals=(0, num_dims-1), overwrite_a=True) return X.T.dot(vecs).dot(vecs.T).T
[ "def", "laplacian_pca", "(", "self", ",", "coordinates", ",", "num_dims", "=", "None", ",", "beta", "=", "0.5", ")", ":", "X", "=", "np", ".", "atleast_2d", "(", "coordinates", ")", "L", "=", "self", ".", "laplacian", "(", "normed", "=", "True", ")",...
Graph-Laplacian PCA (CVPR 2013). coordinates : (n,d) array-like, assumed to be mean-centered. beta : float in [0,1], scales how much PCA/LapEig contributes. Returns an approximation of input coordinates, ala PCA.
[ "Graph", "-", "Laplacian", "PCA", "(", "CVPR", "2013", ")", ".", "coordinates", ":", "(", "n", "d", ")", "array", "-", "like", "assumed", "to", "be", "mean", "-", "centered", ".", "beta", ":", "float", "in", "[", "0", "1", "]", "scales", "how", "...
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/embed.py#L75-L90
all-umass/graphs
graphs/mixins/embed.py
EmbedMixin.layout_circle
def layout_circle(self): '''Position vertices evenly around a circle.''' n = self.num_vertices() t = np.linspace(0, 2*np.pi, n+1)[:n] return np.column_stack((np.cos(t), np.sin(t)))
python
def layout_circle(self): '''Position vertices evenly around a circle.''' n = self.num_vertices() t = np.linspace(0, 2*np.pi, n+1)[:n] return np.column_stack((np.cos(t), np.sin(t)))
[ "def", "layout_circle", "(", "self", ")", ":", "n", "=", "self", ".", "num_vertices", "(", ")", "t", "=", "np", ".", "linspace", "(", "0", ",", "2", "*", "np", ".", "pi", ",", "n", "+", "1", ")", "[", ":", "n", "]", "return", "np", ".", "co...
Position vertices evenly around a circle.
[ "Position", "vertices", "evenly", "around", "a", "circle", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/embed.py#L92-L96
all-umass/graphs
graphs/mixins/embed.py
EmbedMixin.layout_spring
def layout_spring(self, num_dims=2, spring_constant=None, iterations=50, initial_temp=0.1, initial_layout=None): '''Position vertices using the Fruchterman-Reingold (spring) algorithm. num_dims : int (default=2) Number of dimensions to embed vertices in. spring_constant : float (default=None) Optimal distance between nodes. If None the distance is set to 1/sqrt(n) where n is the number of nodes. Increase this value to move nodes farther apart. iterations : int (default=50) Number of iterations of spring-force relaxation initial_temp : float (default=0.1) Largest step-size allowed in the dynamics, decays linearly. Must be positive, should probably be less than 1. initial_layout : array-like of shape (n, num_dims) If provided, serves as the initial placement of vertex coordinates. ''' if initial_layout is None: X = np.random.random((self.num_vertices(), num_dims)) else: X = np.array(initial_layout, dtype=float, copy=True) assert X.shape == (self.num_vertices(), num_dims) if spring_constant is None: # default to sqrt(area_of_viewport / num_vertices) spring_constant = X.shape[0] ** -0.5 S = self.matrix('csr', 'csc', 'coo', copy=True) S.data[:] = 1. / S.data # Convert to similarity ii,jj = S.nonzero() # cache nonzero indices # simple cooling scheme, linearly steps down cooling_scheme = np.linspace(initial_temp, 0, iterations+2)[:-2] # this is still O(V^2) # could use multilevel methods to speed this up significantly for t in cooling_scheme: delta = X[:,None] - X[None] distance = _bounded_norm(delta, 1e-8) # repulsion from all vertices force = spring_constant**2 / distance # attraction from connected vertices force[ii,jj] -= S.data * distance[ii,jj]**2 / spring_constant displacement = np.einsum('ijk,ij->ik', delta, force) # update positions length = _bounded_norm(displacement, 1e-2) X += displacement * t / length[:,None] return X
python
def layout_spring(self, num_dims=2, spring_constant=None, iterations=50, initial_temp=0.1, initial_layout=None): '''Position vertices using the Fruchterman-Reingold (spring) algorithm. num_dims : int (default=2) Number of dimensions to embed vertices in. spring_constant : float (default=None) Optimal distance between nodes. If None the distance is set to 1/sqrt(n) where n is the number of nodes. Increase this value to move nodes farther apart. iterations : int (default=50) Number of iterations of spring-force relaxation initial_temp : float (default=0.1) Largest step-size allowed in the dynamics, decays linearly. Must be positive, should probably be less than 1. initial_layout : array-like of shape (n, num_dims) If provided, serves as the initial placement of vertex coordinates. ''' if initial_layout is None: X = np.random.random((self.num_vertices(), num_dims)) else: X = np.array(initial_layout, dtype=float, copy=True) assert X.shape == (self.num_vertices(), num_dims) if spring_constant is None: # default to sqrt(area_of_viewport / num_vertices) spring_constant = X.shape[0] ** -0.5 S = self.matrix('csr', 'csc', 'coo', copy=True) S.data[:] = 1. / S.data # Convert to similarity ii,jj = S.nonzero() # cache nonzero indices # simple cooling scheme, linearly steps down cooling_scheme = np.linspace(initial_temp, 0, iterations+2)[:-2] # this is still O(V^2) # could use multilevel methods to speed this up significantly for t in cooling_scheme: delta = X[:,None] - X[None] distance = _bounded_norm(delta, 1e-8) # repulsion from all vertices force = spring_constant**2 / distance # attraction from connected vertices force[ii,jj] -= S.data * distance[ii,jj]**2 / spring_constant displacement = np.einsum('ijk,ij->ik', delta, force) # update positions length = _bounded_norm(displacement, 1e-2) X += displacement * t / length[:,None] return X
[ "def", "layout_spring", "(", "self", ",", "num_dims", "=", "2", ",", "spring_constant", "=", "None", ",", "iterations", "=", "50", ",", "initial_temp", "=", "0.1", ",", "initial_layout", "=", "None", ")", ":", "if", "initial_layout", "is", "None", ":", "...
Position vertices using the Fruchterman-Reingold (spring) algorithm. num_dims : int (default=2) Number of dimensions to embed vertices in. spring_constant : float (default=None) Optimal distance between nodes. If None the distance is set to 1/sqrt(n) where n is the number of nodes. Increase this value to move nodes farther apart. iterations : int (default=50) Number of iterations of spring-force relaxation initial_temp : float (default=0.1) Largest step-size allowed in the dynamics, decays linearly. Must be positive, should probably be less than 1. initial_layout : array-like of shape (n, num_dims) If provided, serves as the initial placement of vertex coordinates.
[ "Position", "vertices", "using", "the", "Fruchterman", "-", "Reingold", "(", "spring", ")", "algorithm", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/embed.py#L98-L146
calmjs/calmjs.parse
src/calmjs/parse/unparsers/walker.py
optimize_structure_handler
def optimize_structure_handler(rule, handler): """ Produce an "optimized" version of handler for the dispatcher to limit reference lookups. """ def runner(walk, dispatcher, node): handler(dispatcher, node) return yield # pragma: no cover return runner
python
def optimize_structure_handler(rule, handler): """ Produce an "optimized" version of handler for the dispatcher to limit reference lookups. """ def runner(walk, dispatcher, node): handler(dispatcher, node) return yield # pragma: no cover return runner
[ "def", "optimize_structure_handler", "(", "rule", ",", "handler", ")", ":", "def", "runner", "(", "walk", ",", "dispatcher", ",", "node", ")", ":", "handler", "(", "dispatcher", ",", "node", ")", "return", "yield", "# pragma: no cover", "return", "runner" ]
Produce an "optimized" version of handler for the dispatcher to limit reference lookups.
[ "Produce", "an", "optimized", "version", "of", "handler", "for", "the", "dispatcher", "to", "limit", "reference", "lookups", "." ]
train
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/unparsers/walker.py#L16-L27
calmjs/calmjs.parse
src/calmjs/parse/unparsers/walker.py
optimize_layout_handler
def optimize_layout_handler(rule, handler): """ Produce an "optimized" version of handler for the dispatcher to limit reference lookups. """ def runner(walk, dispatcher, node): yield LayoutChunk(rule, handler, node) return runner
python
def optimize_layout_handler(rule, handler): """ Produce an "optimized" version of handler for the dispatcher to limit reference lookups. """ def runner(walk, dispatcher, node): yield LayoutChunk(rule, handler, node) return runner
[ "def", "optimize_layout_handler", "(", "rule", ",", "handler", ")", ":", "def", "runner", "(", "walk", ",", "dispatcher", ",", "node", ")", ":", "yield", "LayoutChunk", "(", "rule", ",", "handler", ",", "node", ")", "return", "runner" ]
Produce an "optimized" version of handler for the dispatcher to limit reference lookups.
[ "Produce", "an", "optimized", "version", "of", "handler", "for", "the", "dispatcher", "to", "limit", "reference", "lookups", "." ]
train
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/unparsers/walker.py#L30-L39
calmjs/calmjs.parse
src/calmjs/parse/unparsers/walker.py
walk
def walk(dispatcher, node, definition=None): """ The default, standalone walk function following the standard argument ordering for the unparsing walkers. Arguments: dispatcher a Dispatcher instance, defined earlier in this module. This instance will dispatch out the correct callable for the various object types encountered throughout this recursive function. node the starting Node from asttypes. definition a standalone definition tuple to start working on the node with; if none is provided, an initial definition will be looked up using the dispatcher with the node for the generation of output. While the dispatcher object is able to provide the lookup directly, this extra definition argument allow more flexibility in having Token subtypes being able to provide specific definitions also that may be required, such as the generation of optional rendering output. """ # The inner walk function - this is actually exposed to the token # rule objects so they can also make use of it to process the node # with the dispatcher. nodes = [] sourcepath_stack = [NotImplemented] def _walk(dispatcher, node, definition=None, token=None): if not isinstance(node, Node): for fragment in dispatcher.token( token, nodes[-1], node, sourcepath_stack): yield fragment return push = bool(node.sourcepath) if push: sourcepath_stack.append(node.sourcepath) nodes.append(node) if definition is None: definition = dispatcher.get_optimized_definition(node) for rule in definition: for chunk in rule(_walk, dispatcher, node): yield chunk nodes.pop(-1) if push: sourcepath_stack.pop(-1) # Format layout markers are not handled immediately in the walk - # they will simply be buffered so that a collection of them can be # handled at once. def process_layouts(layout_rule_chunks, last_chunk, chunk): before_text = last_chunk.text if last_chunk else None after_text = chunk.text if chunk else None # the text that was yielded by the previous layout handler prev_text = None # While Layout rules in a typical definition are typically # interspersed with Tokens, certain assumptions with how the # Layouts are specified within there will fail when Tokens fail # to generate anything for any reason. However, the dispatcher # instance will be able to accept and resolve a tuple of Layouts # to some handler function, so that a form of normalization can # be done. For instance, an (Indent, Newline, Dedent) can # simply be resolved to no operations. To achieve this, iterate # through the layout_rule_chunks and generate a normalized form # for the final handling to happen. # the preliminary stack that will be cleared whenever a # normalized layout rule chunk is generated. lrcs_stack = [] # first pass: generate both the normalized/finalized lrcs. for lrc in layout_rule_chunks: lrcs_stack.append(lrc) # check every single chunk from left to right... for idx in range(len(lrcs_stack)): rule = tuple(lrc.rule for lrc in lrcs_stack[idx:]) handler = dispatcher.layout(rule) if handler is not NotImplemented: # not manipulating lrsc_stack from within the same # for loop that it is being iterated upon break else: # which continues back to the top of the outer for loop continue # So a handler is found from inside the rules; extend the # chunks from the stack that didn't get normalized, and # generate a new layout rule chunk. lrcs_stack[:] = lrcs_stack[:idx] lrcs_stack.append(LayoutChunk( rule, handler, layout_rule_chunks[idx].node, )) # second pass: now the processing can be done. for lr_chunk in lrcs_stack: gen = lr_chunk.handler( dispatcher, lr_chunk.node, before_text, after_text, prev_text) if not gen: continue for chunk_from_layout in gen: yield chunk_from_layout prev_text = chunk_from_layout.text # The top level walker implementation def walk(): last_chunk = None layout_rule_chunks = [] for chunk in _walk(dispatcher, node, definition): if isinstance(chunk, LayoutChunk): layout_rule_chunks.append(chunk) else: # process layout rule chunks that had been cached. for chunk_from_layout in process_layouts( layout_rule_chunks, last_chunk, chunk): yield chunk_from_layout layout_rule_chunks[:] = [] yield chunk last_chunk = chunk # process the remaining layout rule chunks. for chunk_from_layout in process_layouts( layout_rule_chunks, last_chunk, None): yield chunk_from_layout for chunk in walk(): yield chunk
python
def walk(dispatcher, node, definition=None): """ The default, standalone walk function following the standard argument ordering for the unparsing walkers. Arguments: dispatcher a Dispatcher instance, defined earlier in this module. This instance will dispatch out the correct callable for the various object types encountered throughout this recursive function. node the starting Node from asttypes. definition a standalone definition tuple to start working on the node with; if none is provided, an initial definition will be looked up using the dispatcher with the node for the generation of output. While the dispatcher object is able to provide the lookup directly, this extra definition argument allow more flexibility in having Token subtypes being able to provide specific definitions also that may be required, such as the generation of optional rendering output. """ # The inner walk function - this is actually exposed to the token # rule objects so they can also make use of it to process the node # with the dispatcher. nodes = [] sourcepath_stack = [NotImplemented] def _walk(dispatcher, node, definition=None, token=None): if not isinstance(node, Node): for fragment in dispatcher.token( token, nodes[-1], node, sourcepath_stack): yield fragment return push = bool(node.sourcepath) if push: sourcepath_stack.append(node.sourcepath) nodes.append(node) if definition is None: definition = dispatcher.get_optimized_definition(node) for rule in definition: for chunk in rule(_walk, dispatcher, node): yield chunk nodes.pop(-1) if push: sourcepath_stack.pop(-1) # Format layout markers are not handled immediately in the walk - # they will simply be buffered so that a collection of them can be # handled at once. def process_layouts(layout_rule_chunks, last_chunk, chunk): before_text = last_chunk.text if last_chunk else None after_text = chunk.text if chunk else None # the text that was yielded by the previous layout handler prev_text = None # While Layout rules in a typical definition are typically # interspersed with Tokens, certain assumptions with how the # Layouts are specified within there will fail when Tokens fail # to generate anything for any reason. However, the dispatcher # instance will be able to accept and resolve a tuple of Layouts # to some handler function, so that a form of normalization can # be done. For instance, an (Indent, Newline, Dedent) can # simply be resolved to no operations. To achieve this, iterate # through the layout_rule_chunks and generate a normalized form # for the final handling to happen. # the preliminary stack that will be cleared whenever a # normalized layout rule chunk is generated. lrcs_stack = [] # first pass: generate both the normalized/finalized lrcs. for lrc in layout_rule_chunks: lrcs_stack.append(lrc) # check every single chunk from left to right... for idx in range(len(lrcs_stack)): rule = tuple(lrc.rule for lrc in lrcs_stack[idx:]) handler = dispatcher.layout(rule) if handler is not NotImplemented: # not manipulating lrsc_stack from within the same # for loop that it is being iterated upon break else: # which continues back to the top of the outer for loop continue # So a handler is found from inside the rules; extend the # chunks from the stack that didn't get normalized, and # generate a new layout rule chunk. lrcs_stack[:] = lrcs_stack[:idx] lrcs_stack.append(LayoutChunk( rule, handler, layout_rule_chunks[idx].node, )) # second pass: now the processing can be done. for lr_chunk in lrcs_stack: gen = lr_chunk.handler( dispatcher, lr_chunk.node, before_text, after_text, prev_text) if not gen: continue for chunk_from_layout in gen: yield chunk_from_layout prev_text = chunk_from_layout.text # The top level walker implementation def walk(): last_chunk = None layout_rule_chunks = [] for chunk in _walk(dispatcher, node, definition): if isinstance(chunk, LayoutChunk): layout_rule_chunks.append(chunk) else: # process layout rule chunks that had been cached. for chunk_from_layout in process_layouts( layout_rule_chunks, last_chunk, chunk): yield chunk_from_layout layout_rule_chunks[:] = [] yield chunk last_chunk = chunk # process the remaining layout rule chunks. for chunk_from_layout in process_layouts( layout_rule_chunks, last_chunk, None): yield chunk_from_layout for chunk in walk(): yield chunk
[ "def", "walk", "(", "dispatcher", ",", "node", ",", "definition", "=", "None", ")", ":", "# The inner walk function - this is actually exposed to the token", "# rule objects so they can also make use of it to process the node", "# with the dispatcher.", "nodes", "=", "[", "]", ...
The default, standalone walk function following the standard argument ordering for the unparsing walkers. Arguments: dispatcher a Dispatcher instance, defined earlier in this module. This instance will dispatch out the correct callable for the various object types encountered throughout this recursive function. node the starting Node from asttypes. definition a standalone definition tuple to start working on the node with; if none is provided, an initial definition will be looked up using the dispatcher with the node for the generation of output. While the dispatcher object is able to provide the lookup directly, this extra definition argument allow more flexibility in having Token subtypes being able to provide specific definitions also that may be required, such as the generation of optional rendering output.
[ "The", "default", "standalone", "walk", "function", "following", "the", "standard", "argument", "ordering", "for", "the", "unparsing", "walkers", "." ]
train
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/unparsers/walker.py#L226-L365
garyp/sifter
sifter/grammar/lexer.py
t_QUOTED_STRING
def t_QUOTED_STRING(t): r'"([^"\\]|\\["\\])*"' # TODO: Add support for: # - An undefined escape sequence (such as "\a" in a context where "a" # has no special meaning) is interpreted as if there were no backslash # (in this case, "\a" is just "a"), though that may be changed by # extensions. # - Non-printing characters such as tabs, CRLF, and control characters # are permitted in quoted strings. Quoted strings MAY span multiple # lines. An unencoded NUL (US-ASCII 0) is not allowed in strings. t.value = t.value.strip('"').replace(r'\"', '"').replace(r'\\', '\\') return t
python
def t_QUOTED_STRING(t): r'"([^"\\]|\\["\\])*"' # TODO: Add support for: # - An undefined escape sequence (such as "\a" in a context where "a" # has no special meaning) is interpreted as if there were no backslash # (in this case, "\a" is just "a"), though that may be changed by # extensions. # - Non-printing characters such as tabs, CRLF, and control characters # are permitted in quoted strings. Quoted strings MAY span multiple # lines. An unencoded NUL (US-ASCII 0) is not allowed in strings. t.value = t.value.strip('"').replace(r'\"', '"').replace(r'\\', '\\') return t
[ "def", "t_QUOTED_STRING", "(", "t", ")", ":", "# TODO: Add support for:", "# - An undefined escape sequence (such as \"\\a\" in a context where \"a\"", "# has no special meaning) is interpreted as if there were no backslash", "# (in this case, \"\\a\" is just \"a\"), though that may be changed by"...
r'"([^"\\]|\\["\\])*"
[ "r", "(", "[", "^", "\\\\", "]", "|", "\\\\", "[", "\\\\", "]", ")", "*" ]
train
https://github.com/garyp/sifter/blob/9c472af76853c1196387141e017114d282637474/sifter/grammar/lexer.py#L55-L66
garyp/sifter
sifter/grammar/lexer.py
t_NUMBER
def t_NUMBER(t): r'[0-9]+[KkMmGg]?' exponents = { 'G' : 30, 'g' : 30, 'M' : 20, 'm' : 20, 'K' : 10, 'k' : 10, } if t.value[-1] in exponents: t.value = math.ldexp(int(t.value[:-1]), exponents[t.value[-1]]) else: t.value = int(t.value) return t
python
def t_NUMBER(t): r'[0-9]+[KkMmGg]?' exponents = { 'G' : 30, 'g' : 30, 'M' : 20, 'm' : 20, 'K' : 10, 'k' : 10, } if t.value[-1] in exponents: t.value = math.ldexp(int(t.value[:-1]), exponents[t.value[-1]]) else: t.value = int(t.value) return t
[ "def", "t_NUMBER", "(", "t", ")", ":", "exponents", "=", "{", "'G'", ":", "30", ",", "'g'", ":", "30", ",", "'M'", ":", "20", ",", "'m'", ":", "20", ",", "'K'", ":", "10", ",", "'k'", ":", "10", ",", "}", "if", "t", ".", "value", "[", "-"...
r'[0-9]+[KkMmGg]?
[ "r", "[", "0", "-", "9", "]", "+", "[", "KkMmGg", "]", "?" ]
train
https://github.com/garyp/sifter/blob/9c472af76853c1196387141e017114d282637474/sifter/grammar/lexer.py#L79-L90
ScatterHQ/machinist
machinist/_fsm.py
_missingExtraCheck
def _missingExtraCheck(given, required, extraException, missingException): """ If the L{sets<set>} C{required} and C{given} do not contain the same elements raise an exception describing how they are different. @param given: The L{set} of elements that was actually given. @param required: The L{set} of elements that must be given. @param extraException: An exception to raise if there are elements in C{given} that are not in C{required}. @param missingException: An exception to raise if there are elements in C{required} that are not in C{given}. @return: C{None} """ extra = given - required if extra: raise extraException(extra) missing = required - given if missing: raise missingException(missing)
python
def _missingExtraCheck(given, required, extraException, missingException): """ If the L{sets<set>} C{required} and C{given} do not contain the same elements raise an exception describing how they are different. @param given: The L{set} of elements that was actually given. @param required: The L{set} of elements that must be given. @param extraException: An exception to raise if there are elements in C{given} that are not in C{required}. @param missingException: An exception to raise if there are elements in C{required} that are not in C{given}. @return: C{None} """ extra = given - required if extra: raise extraException(extra) missing = required - given if missing: raise missingException(missing)
[ "def", "_missingExtraCheck", "(", "given", ",", "required", ",", "extraException", ",", "missingException", ")", ":", "extra", "=", "given", "-", "required", "if", "extra", ":", "raise", "extraException", "(", "extra", ")", "missing", "=", "required", "-", "...
If the L{sets<set>} C{required} and C{given} do not contain the same elements raise an exception describing how they are different. @param given: The L{set} of elements that was actually given. @param required: The L{set} of elements that must be given. @param extraException: An exception to raise if there are elements in C{given} that are not in C{required}. @param missingException: An exception to raise if there are elements in C{required} that are not in C{given}. @return: C{None}
[ "If", "the", "L", "{", "sets<set", ">", "}", "C", "{", "required", "}", "and", "C", "{", "given", "}", "do", "not", "contain", "the", "same", "elements", "raise", "an", "exception", "describing", "how", "they", "are", "different", "." ]
train
https://github.com/ScatterHQ/machinist/blob/1d1c017ac03be8e737d50af0dfabf31722ddc621/machinist/_fsm.py#L227-L248
ScatterHQ/machinist
machinist/_fsm.py
constructFiniteStateMachine
def constructFiniteStateMachine(inputs, outputs, states, table, initial, richInputs, inputContext, world, logger=LOGGER): """ Construct a new finite state machine from a definition of its states. @param inputs: Definitions of all input symbols the resulting machine will need to handle, as a L{twisted.python.constants.Names} subclass. @param outputs: Definitions of all output symbols the resulting machine is allowed to emit, as a L{twisted.python.constants.Names} subclass. @param states: Definitions of all possible states the resulting machine will be capable of inhabiting, as a L{twisted.python.constants.Names} subclass. @param table: The state transition table, defining which output and next state results from the receipt of any and all inputs in any and all states. @type table: L{TransitionTable} @param initial: The state the machine will start in (one of the symbols from C{states}). @param richInputs: A L{list} of types which correspond to each of the input symbols from C{inputs}. @type richInputs: L{list} of L{IRichInput} I{providers} @param inputContext: A L{dict} mapping output symbols to L{Interface} subclasses describing the requirements of the inputs which lead to them. @param world: An object responsible for turning FSM outputs into observable side-effects. @type world: L{IOutputExecutor} provider @param logger: The logger to which to write messages. @type logger: L{eliot.ILogger} or L{NoneType} if there is no logger. @return: An L{IFiniteStateMachine} provider """ table = table.table _missingExtraCheck( set(table.keys()), set(states.iterconstants()), ExtraTransitionState, MissingTransitionState) _missingExtraCheck( set(i for s in table.values() for i in s), set(inputs.iterconstants()), ExtraTransitionInput, MissingTransitionInput) _missingExtraCheck( set(output for s in table.values() for transition in s.values() for output in transition.output), set(outputs.iterconstants()), ExtraTransitionOutput, MissingTransitionOutput) try: _missingExtraCheck( set(transition.nextState for s in table.values() for transition in s.values()), set(states.iterconstants()), ExtraTransitionNextState, MissingTransitionNextState) except MissingTransitionNextState as e: if e.args != ({initial},): raise if initial not in states.iterconstants(): raise InvalidInitialState(initial) extraInputContext = set(inputContext) - set(outputs.iterconstants()) if extraInputContext: raise ExtraInputContext(extraInputContext) _checkConsistency(richInputs, table, inputContext) fsm = _FiniteStateMachine(inputs, outputs, states, table, initial) executor = IOutputExecutor(world) interpreter = _FiniteStateInterpreter( tuple(richInputs), inputContext, fsm, executor) if logger is not None: interpreter = FiniteStateLogger( interpreter, logger, executor.identifier()) return interpreter
python
def constructFiniteStateMachine(inputs, outputs, states, table, initial, richInputs, inputContext, world, logger=LOGGER): """ Construct a new finite state machine from a definition of its states. @param inputs: Definitions of all input symbols the resulting machine will need to handle, as a L{twisted.python.constants.Names} subclass. @param outputs: Definitions of all output symbols the resulting machine is allowed to emit, as a L{twisted.python.constants.Names} subclass. @param states: Definitions of all possible states the resulting machine will be capable of inhabiting, as a L{twisted.python.constants.Names} subclass. @param table: The state transition table, defining which output and next state results from the receipt of any and all inputs in any and all states. @type table: L{TransitionTable} @param initial: The state the machine will start in (one of the symbols from C{states}). @param richInputs: A L{list} of types which correspond to each of the input symbols from C{inputs}. @type richInputs: L{list} of L{IRichInput} I{providers} @param inputContext: A L{dict} mapping output symbols to L{Interface} subclasses describing the requirements of the inputs which lead to them. @param world: An object responsible for turning FSM outputs into observable side-effects. @type world: L{IOutputExecutor} provider @param logger: The logger to which to write messages. @type logger: L{eliot.ILogger} or L{NoneType} if there is no logger. @return: An L{IFiniteStateMachine} provider """ table = table.table _missingExtraCheck( set(table.keys()), set(states.iterconstants()), ExtraTransitionState, MissingTransitionState) _missingExtraCheck( set(i for s in table.values() for i in s), set(inputs.iterconstants()), ExtraTransitionInput, MissingTransitionInput) _missingExtraCheck( set(output for s in table.values() for transition in s.values() for output in transition.output), set(outputs.iterconstants()), ExtraTransitionOutput, MissingTransitionOutput) try: _missingExtraCheck( set(transition.nextState for s in table.values() for transition in s.values()), set(states.iterconstants()), ExtraTransitionNextState, MissingTransitionNextState) except MissingTransitionNextState as e: if e.args != ({initial},): raise if initial not in states.iterconstants(): raise InvalidInitialState(initial) extraInputContext = set(inputContext) - set(outputs.iterconstants()) if extraInputContext: raise ExtraInputContext(extraInputContext) _checkConsistency(richInputs, table, inputContext) fsm = _FiniteStateMachine(inputs, outputs, states, table, initial) executor = IOutputExecutor(world) interpreter = _FiniteStateInterpreter( tuple(richInputs), inputContext, fsm, executor) if logger is not None: interpreter = FiniteStateLogger( interpreter, logger, executor.identifier()) return interpreter
[ "def", "constructFiniteStateMachine", "(", "inputs", ",", "outputs", ",", "states", ",", "table", ",", "initial", ",", "richInputs", ",", "inputContext", ",", "world", ",", "logger", "=", "LOGGER", ")", ":", "table", "=", "table", ".", "table", "_missingExtr...
Construct a new finite state machine from a definition of its states. @param inputs: Definitions of all input symbols the resulting machine will need to handle, as a L{twisted.python.constants.Names} subclass. @param outputs: Definitions of all output symbols the resulting machine is allowed to emit, as a L{twisted.python.constants.Names} subclass. @param states: Definitions of all possible states the resulting machine will be capable of inhabiting, as a L{twisted.python.constants.Names} subclass. @param table: The state transition table, defining which output and next state results from the receipt of any and all inputs in any and all states. @type table: L{TransitionTable} @param initial: The state the machine will start in (one of the symbols from C{states}). @param richInputs: A L{list} of types which correspond to each of the input symbols from C{inputs}. @type richInputs: L{list} of L{IRichInput} I{providers} @param inputContext: A L{dict} mapping output symbols to L{Interface} subclasses describing the requirements of the inputs which lead to them. @param world: An object responsible for turning FSM outputs into observable side-effects. @type world: L{IOutputExecutor} provider @param logger: The logger to which to write messages. @type logger: L{eliot.ILogger} or L{NoneType} if there is no logger. @return: An L{IFiniteStateMachine} provider
[ "Construct", "a", "new", "finite", "state", "machine", "from", "a", "definition", "of", "its", "states", "." ]
train
https://github.com/ScatterHQ/machinist/blob/1d1c017ac03be8e737d50af0dfabf31722ddc621/machinist/_fsm.py#L252-L333
ScatterHQ/machinist
machinist/_fsm.py
_checkConsistency
def _checkConsistency(richInputs, fsm, inputContext): """ Verify that the outputs that can be generated by fsm have their requirements satisfied by the given rich inputs. @param richInputs: A L{list} of all of the types which will serve as rich inputs to an L{IFiniteStateMachine}. @type richInputs: L{list} of L{IRichInput} providers @param fsm: The L{IFiniteStateMachine} to which these rich inputs are to be delivered. @param inputContext: A L{dict} mapping output symbols to L{Interface} subclasses. Rich inputs which result in these outputs being produced by C{fsm} must provide the corresponding interface. @raise DoesNotImplement: If any of the rich input types fails to implement the interfaces required by the outputs C{fsm} can produce when they are received. """ for richInput in richInputs: for state in fsm: for input in fsm[state]: if richInput.symbol() == input: # This rich input will be supplied to represent this input # symbol in this state. Check to see if it satisfies the # output requirements. outputs = fsm[state][input].output for output in outputs: try: required = inputContext[output] except KeyError: continue # Consider supporting non-interface based checking in # the future: extend this to also allow # issubclass(richInput, required) if required.implementedBy(richInput): continue raise DoesNotImplement( "%r not implemented by %r, " "required by %r in state %r" % ( required, richInput, input, state))
python
def _checkConsistency(richInputs, fsm, inputContext): """ Verify that the outputs that can be generated by fsm have their requirements satisfied by the given rich inputs. @param richInputs: A L{list} of all of the types which will serve as rich inputs to an L{IFiniteStateMachine}. @type richInputs: L{list} of L{IRichInput} providers @param fsm: The L{IFiniteStateMachine} to which these rich inputs are to be delivered. @param inputContext: A L{dict} mapping output symbols to L{Interface} subclasses. Rich inputs which result in these outputs being produced by C{fsm} must provide the corresponding interface. @raise DoesNotImplement: If any of the rich input types fails to implement the interfaces required by the outputs C{fsm} can produce when they are received. """ for richInput in richInputs: for state in fsm: for input in fsm[state]: if richInput.symbol() == input: # This rich input will be supplied to represent this input # symbol in this state. Check to see if it satisfies the # output requirements. outputs = fsm[state][input].output for output in outputs: try: required = inputContext[output] except KeyError: continue # Consider supporting non-interface based checking in # the future: extend this to also allow # issubclass(richInput, required) if required.implementedBy(richInput): continue raise DoesNotImplement( "%r not implemented by %r, " "required by %r in state %r" % ( required, richInput, input, state))
[ "def", "_checkConsistency", "(", "richInputs", ",", "fsm", ",", "inputContext", ")", ":", "for", "richInput", "in", "richInputs", ":", "for", "state", "in", "fsm", ":", "for", "input", "in", "fsm", "[", "state", "]", ":", "if", "richInput", ".", "symbol"...
Verify that the outputs that can be generated by fsm have their requirements satisfied by the given rich inputs. @param richInputs: A L{list} of all of the types which will serve as rich inputs to an L{IFiniteStateMachine}. @type richInputs: L{list} of L{IRichInput} providers @param fsm: The L{IFiniteStateMachine} to which these rich inputs are to be delivered. @param inputContext: A L{dict} mapping output symbols to L{Interface} subclasses. Rich inputs which result in these outputs being produced by C{fsm} must provide the corresponding interface. @raise DoesNotImplement: If any of the rich input types fails to implement the interfaces required by the outputs C{fsm} can produce when they are received.
[ "Verify", "that", "the", "outputs", "that", "can", "be", "generated", "by", "fsm", "have", "their", "requirements", "satisfied", "by", "the", "given", "rich", "inputs", "." ]
train
https://github.com/ScatterHQ/machinist/blob/1d1c017ac03be8e737d50af0dfabf31722ddc621/machinist/_fsm.py#L337-L379
ScatterHQ/machinist
machinist/_fsm.py
trivialInput
def trivialInput(symbol): """ Create a new L{IRichInput} implementation for the given input symbol. This creates a new type object and is intended to be used at module scope to define rich input types. Generally, only one use per symbol should be required. For example:: Apple = trivialInput(Fruit.apple) @param symbol: A symbol from some state machine's input alphabet. @return: A new type object usable as a rich input for the given symbol. @rtype: L{type} """ return implementer(IRichInput)(type( symbol.name.title(), (FancyStrMixin, object), { "symbol": _symbol(symbol), }))
python
def trivialInput(symbol): """ Create a new L{IRichInput} implementation for the given input symbol. This creates a new type object and is intended to be used at module scope to define rich input types. Generally, only one use per symbol should be required. For example:: Apple = trivialInput(Fruit.apple) @param symbol: A symbol from some state machine's input alphabet. @return: A new type object usable as a rich input for the given symbol. @rtype: L{type} """ return implementer(IRichInput)(type( symbol.name.title(), (FancyStrMixin, object), { "symbol": _symbol(symbol), }))
[ "def", "trivialInput", "(", "symbol", ")", ":", "return", "implementer", "(", "IRichInput", ")", "(", "type", "(", "symbol", ".", "name", ".", "title", "(", ")", ",", "(", "FancyStrMixin", ",", "object", ")", ",", "{", "\"symbol\"", ":", "_symbol", "("...
Create a new L{IRichInput} implementation for the given input symbol. This creates a new type object and is intended to be used at module scope to define rich input types. Generally, only one use per symbol should be required. For example:: Apple = trivialInput(Fruit.apple) @param symbol: A symbol from some state machine's input alphabet. @return: A new type object usable as a rich input for the given symbol. @rtype: L{type}
[ "Create", "a", "new", "L", "{", "IRichInput", "}", "implementation", "for", "the", "given", "input", "symbol", "." ]
train
https://github.com/ScatterHQ/machinist/blob/1d1c017ac03be8e737d50af0dfabf31722ddc621/machinist/_fsm.py#L389-L407
ScatterHQ/machinist
machinist/_fsm.py
TransitionTable._copy
def _copy(self): """ Create a new L{TransitionTable} just like this one using a copy of the underlying transition table. @rtype: L{TransitionTable} """ table = {} for existingState, existingOutputs in self.table.items(): table[existingState] = {} for (existingInput, existingTransition) in existingOutputs.items(): table[existingState][existingInput] = existingTransition return TransitionTable(table)
python
def _copy(self): """ Create a new L{TransitionTable} just like this one using a copy of the underlying transition table. @rtype: L{TransitionTable} """ table = {} for existingState, existingOutputs in self.table.items(): table[existingState] = {} for (existingInput, existingTransition) in existingOutputs.items(): table[existingState][existingInput] = existingTransition return TransitionTable(table)
[ "def", "_copy", "(", "self", ")", ":", "table", "=", "{", "}", "for", "existingState", ",", "existingOutputs", "in", "self", ".", "table", ".", "items", "(", ")", ":", "table", "[", "existingState", "]", "=", "{", "}", "for", "(", "existingInput", ",...
Create a new L{TransitionTable} just like this one using a copy of the underlying transition table. @rtype: L{TransitionTable}
[ "Create", "a", "new", "L", "{", "TransitionTable", "}", "just", "like", "this", "one", "using", "a", "copy", "of", "the", "underlying", "transition", "table", "." ]
train
https://github.com/ScatterHQ/machinist/blob/1d1c017ac03be8e737d50af0dfabf31722ddc621/machinist/_fsm.py#L163-L175
ScatterHQ/machinist
machinist/_fsm.py
TransitionTable.addTransition
def addTransition(self, state, input, output, nextState): """ Create a new L{TransitionTable} with all the same transitions as this L{TransitionTable} plus a new transition. @param state: The state for which the new transition is defined. @param input: The input that triggers the new transition. @param output: The output produced by the new transition. @param nextState: The state that will follow the new transition. @return: The newly created L{TransitionTable}. """ return self.addTransitions(state, {input: (output, nextState)})
python
def addTransition(self, state, input, output, nextState): """ Create a new L{TransitionTable} with all the same transitions as this L{TransitionTable} plus a new transition. @param state: The state for which the new transition is defined. @param input: The input that triggers the new transition. @param output: The output produced by the new transition. @param nextState: The state that will follow the new transition. @return: The newly created L{TransitionTable}. """ return self.addTransitions(state, {input: (output, nextState)})
[ "def", "addTransition", "(", "self", ",", "state", ",", "input", ",", "output", ",", "nextState", ")", ":", "return", "self", ".", "addTransitions", "(", "state", ",", "{", "input", ":", "(", "output", ",", "nextState", ")", "}", ")" ]
Create a new L{TransitionTable} with all the same transitions as this L{TransitionTable} plus a new transition. @param state: The state for which the new transition is defined. @param input: The input that triggers the new transition. @param output: The output produced by the new transition. @param nextState: The state that will follow the new transition. @return: The newly created L{TransitionTable}.
[ "Create", "a", "new", "L", "{", "TransitionTable", "}", "with", "all", "the", "same", "transitions", "as", "this", "L", "{", "TransitionTable", "}", "plus", "a", "new", "transition", "." ]
train
https://github.com/ScatterHQ/machinist/blob/1d1c017ac03be8e737d50af0dfabf31722ddc621/machinist/_fsm.py#L178-L190
ScatterHQ/machinist
machinist/_fsm.py
TransitionTable.addTransitions
def addTransitions(self, state, transitions): """ Create a new L{TransitionTable} with all the same transitions as this L{TransitionTable} plus a number of new transitions. @param state: The state for which the new transitions are defined. @param transitions: A L{dict} mapping inputs to output, nextState pairs. Each item from this L{dict} will define a new transition in C{state}. @return: The newly created L{TransitionTable}. """ table = self._copy() state = table.table.setdefault(state, {}) for (input, (output, nextState)) in transitions.items(): state[input] = Transition(output, nextState) return table
python
def addTransitions(self, state, transitions): """ Create a new L{TransitionTable} with all the same transitions as this L{TransitionTable} plus a number of new transitions. @param state: The state for which the new transitions are defined. @param transitions: A L{dict} mapping inputs to output, nextState pairs. Each item from this L{dict} will define a new transition in C{state}. @return: The newly created L{TransitionTable}. """ table = self._copy() state = table.table.setdefault(state, {}) for (input, (output, nextState)) in transitions.items(): state[input] = Transition(output, nextState) return table
[ "def", "addTransitions", "(", "self", ",", "state", ",", "transitions", ")", ":", "table", "=", "self", ".", "_copy", "(", ")", "state", "=", "table", ".", "table", ".", "setdefault", "(", "state", ",", "{", "}", ")", "for", "(", "input", ",", "(",...
Create a new L{TransitionTable} with all the same transitions as this L{TransitionTable} plus a number of new transitions. @param state: The state for which the new transitions are defined. @param transitions: A L{dict} mapping inputs to output, nextState pairs. Each item from this L{dict} will define a new transition in C{state}. @return: The newly created L{TransitionTable}.
[ "Create", "a", "new", "L", "{", "TransitionTable", "}", "with", "all", "the", "same", "transitions", "as", "this", "L", "{", "TransitionTable", "}", "plus", "a", "number", "of", "new", "transitions", "." ]
train
https://github.com/ScatterHQ/machinist/blob/1d1c017ac03be8e737d50af0dfabf31722ddc621/machinist/_fsm.py#L193-L209
ScatterHQ/machinist
machinist/_fsm.py
TransitionTable.addTerminalState
def addTerminalState(self, state): """ Create a new L{TransitionTable} with all of the same transitions as this L{TransitionTable} plus a new state with no transitions. @param state: The new state to include in the new table. @return: The newly created L{TransitionTable}. """ table = self._copy() table.table[state] = {} return table
python
def addTerminalState(self, state): """ Create a new L{TransitionTable} with all of the same transitions as this L{TransitionTable} plus a new state with no transitions. @param state: The new state to include in the new table. @return: The newly created L{TransitionTable}. """ table = self._copy() table.table[state] = {} return table
[ "def", "addTerminalState", "(", "self", ",", "state", ")", ":", "table", "=", "self", ".", "_copy", "(", ")", "table", ".", "table", "[", "state", "]", "=", "{", "}", "return", "table" ]
Create a new L{TransitionTable} with all of the same transitions as this L{TransitionTable} plus a new state with no transitions. @param state: The new state to include in the new table. @return: The newly created L{TransitionTable}.
[ "Create", "a", "new", "L", "{", "TransitionTable", "}", "with", "all", "of", "the", "same", "transitions", "as", "this", "L", "{", "TransitionTable", "}", "plus", "a", "new", "state", "with", "no", "transitions", "." ]
train
https://github.com/ScatterHQ/machinist/blob/1d1c017ac03be8e737d50af0dfabf31722ddc621/machinist/_fsm.py#L212-L223
calmjs/calmjs.parse
src/calmjs/parse/rules.py
minify
def minify(drop_semi=True): """ Rules for minifying output. Arguments: drop_semi Drop semicolons whenever possible. Note that if Dedent and OptionalNewline has a handler defined, it will stop final break statements from being resolved due to reliance on normalized resolution. """ layout_handlers = { OpenBlock: layout_handler_openbrace, CloseBlock: layout_handler_closebrace, EndStatement: layout_handler_semicolon, Space: layout_handler_space_minimum, OptionalSpace: layout_handler_space_minimum, RequiredSpace: layout_handler_space_imply, (Space, OpenBlock): layout_handler_openbrace, (Space, EndStatement): layout_handler_semicolon, (OptionalSpace, EndStatement): layout_handler_semicolon, } if drop_semi: # if these are defined, they should be dropped; should really # provide these as a flag. # layout_handlers.update({ # OptionalNewline: None, # Dedent: None, # }) layout_handlers.update({ EndStatement: layout_handler_semicolon_optional, # these two rules rely on the normalized resolution (OptionalSpace, EndStatement): layout_handler_semicolon_optional, (EndStatement, CloseBlock): layout_handler_closebrace, # this is a fallback rule for when Dedent is defined by # some other rule, which won't neuter all optional # semicolons. (EndStatement, Dedent): rule_handler_noop, ((OptionalSpace, EndStatement), CloseBlock): layout_handler_closebrace, }) def minify_rule(): return { 'layout_handlers': layout_handlers, 'deferrable_handlers': { Literal: deferrable_handler_literal_continuation, }, } return minify_rule
python
def minify(drop_semi=True): """ Rules for minifying output. Arguments: drop_semi Drop semicolons whenever possible. Note that if Dedent and OptionalNewline has a handler defined, it will stop final break statements from being resolved due to reliance on normalized resolution. """ layout_handlers = { OpenBlock: layout_handler_openbrace, CloseBlock: layout_handler_closebrace, EndStatement: layout_handler_semicolon, Space: layout_handler_space_minimum, OptionalSpace: layout_handler_space_minimum, RequiredSpace: layout_handler_space_imply, (Space, OpenBlock): layout_handler_openbrace, (Space, EndStatement): layout_handler_semicolon, (OptionalSpace, EndStatement): layout_handler_semicolon, } if drop_semi: # if these are defined, they should be dropped; should really # provide these as a flag. # layout_handlers.update({ # OptionalNewline: None, # Dedent: None, # }) layout_handlers.update({ EndStatement: layout_handler_semicolon_optional, # these two rules rely on the normalized resolution (OptionalSpace, EndStatement): layout_handler_semicolon_optional, (EndStatement, CloseBlock): layout_handler_closebrace, # this is a fallback rule for when Dedent is defined by # some other rule, which won't neuter all optional # semicolons. (EndStatement, Dedent): rule_handler_noop, ((OptionalSpace, EndStatement), CloseBlock): layout_handler_closebrace, }) def minify_rule(): return { 'layout_handlers': layout_handlers, 'deferrable_handlers': { Literal: deferrable_handler_literal_continuation, }, } return minify_rule
[ "def", "minify", "(", "drop_semi", "=", "True", ")", ":", "layout_handlers", "=", "{", "OpenBlock", ":", "layout_handler_openbrace", ",", "CloseBlock", ":", "layout_handler_closebrace", ",", "EndStatement", ":", "layout_handler_semicolon", ",", "Space", ":", "layout...
Rules for minifying output. Arguments: drop_semi Drop semicolons whenever possible. Note that if Dedent and OptionalNewline has a handler defined, it will stop final break statements from being resolved due to reliance on normalized resolution.
[ "Rules", "for", "minifying", "output", "." ]
train
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/rules.py#L67-L124
calmjs/calmjs.parse
src/calmjs/parse/rules.py
indent
def indent(indent_str=None): """ A complete, standalone indent ruleset. Arguments: indent_str The string used for indentation. Defaults to None, which will defer the value used to the one provided by the Dispatcher. """ def indentation_rule(): inst = Indentator(indent_str) return {'layout_handlers': { OpenBlock: layout_handler_openbrace, CloseBlock: layout_handler_closebrace, EndStatement: layout_handler_semicolon, Space: layout_handler_space_imply, OptionalSpace: layout_handler_space_optional_pretty, RequiredSpace: layout_handler_space_imply, Indent: inst.layout_handler_indent, Dedent: inst.layout_handler_dedent, Newline: inst.layout_handler_newline, OptionalNewline: inst.layout_handler_newline_optional, (Space, OpenBlock): NotImplemented, (Space, EndStatement): layout_handler_semicolon, (OptionalSpace, EndStatement): layout_handler_semicolon, (Indent, Newline, Dedent): rule_handler_noop, }} return indentation_rule
python
def indent(indent_str=None): """ A complete, standalone indent ruleset. Arguments: indent_str The string used for indentation. Defaults to None, which will defer the value used to the one provided by the Dispatcher. """ def indentation_rule(): inst = Indentator(indent_str) return {'layout_handlers': { OpenBlock: layout_handler_openbrace, CloseBlock: layout_handler_closebrace, EndStatement: layout_handler_semicolon, Space: layout_handler_space_imply, OptionalSpace: layout_handler_space_optional_pretty, RequiredSpace: layout_handler_space_imply, Indent: inst.layout_handler_indent, Dedent: inst.layout_handler_dedent, Newline: inst.layout_handler_newline, OptionalNewline: inst.layout_handler_newline_optional, (Space, OpenBlock): NotImplemented, (Space, EndStatement): layout_handler_semicolon, (OptionalSpace, EndStatement): layout_handler_semicolon, (Indent, Newline, Dedent): rule_handler_noop, }} return indentation_rule
[ "def", "indent", "(", "indent_str", "=", "None", ")", ":", "def", "indentation_rule", "(", ")", ":", "inst", "=", "Indentator", "(", "indent_str", ")", "return", "{", "'layout_handlers'", ":", "{", "OpenBlock", ":", "layout_handler_openbrace", ",", "CloseBlock...
A complete, standalone indent ruleset. Arguments: indent_str The string used for indentation. Defaults to None, which will defer the value used to the one provided by the Dispatcher.
[ "A", "complete", "standalone", "indent", "ruleset", "." ]
train
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/rules.py#L127-L156
pyroscope/pyrobase
pavement.py
dist_docs
def dist_docs(): "create a documentation bundle" dist_dir = path("dist") docs_package = path("%s/%s-%s-docs.zip" % (dist_dir.abspath(), options.setup.name, options.setup.version)) dist_dir.exists() or dist_dir.makedirs() docs_package.exists() and docs_package.remove() sh(r'cd build/apidocs && zip -qr9 %s .' % (docs_package,)) print('') print("Upload @ http://pypi.python.org/pypi?:action=pkg_edit&name=%s" % ( options.setup.name,)) print(docs_package)
python
def dist_docs(): "create a documentation bundle" dist_dir = path("dist") docs_package = path("%s/%s-%s-docs.zip" % (dist_dir.abspath(), options.setup.name, options.setup.version)) dist_dir.exists() or dist_dir.makedirs() docs_package.exists() and docs_package.remove() sh(r'cd build/apidocs && zip -qr9 %s .' % (docs_package,)) print('') print("Upload @ http://pypi.python.org/pypi?:action=pkg_edit&name=%s" % ( options.setup.name,)) print(docs_package)
[ "def", "dist_docs", "(", ")", ":", "dist_dir", "=", "path", "(", "\"dist\"", ")", "docs_package", "=", "path", "(", "\"%s/%s-%s-docs.zip\"", "%", "(", "dist_dir", ".", "abspath", "(", ")", ",", "options", ".", "setup", ".", "name", ",", "options", ".", ...
create a documentation bundle
[ "create", "a", "documentation", "bundle" ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/pavement.py#L111-L123
pyroscope/pyrobase
pavement.py
release
def release(): "check release before upload to PyPI" sh("paver bdist_wheel") wheels = path("dist").files("*.whl") if not wheels: error("\n*** ERROR: No release wheel was built!") sys.exit(1) if any(".dev" in i for i in wheels): error("\n*** ERROR: You're still using a 'dev' version!") sys.exit(1) # Check that source distribution can be built and is complete print('') print("~" * 78) print("TESTING SOURCE BUILD") sh( "{ command cd dist/ && unzip -q %s-%s.zip && command cd %s-%s/" " && /usr/bin/python setup.py sdist >/dev/null" " && if { unzip -ql ../%s-%s.zip; unzip -ql dist/%s-%s.zip; }" " | cut -b26- | sort | uniq -c| egrep -v '^ +2 +' ; then" " echo '^^^ Difference in file lists! ^^^'; false;" " else true; fi; } 2>&1" % tuple([project["name"], version] * 4) ) path("dist/%s-%s" % (project["name"], version)).rmtree() print("~" * 78) print('') print("Created", " ".join([str(i) for i in path("dist").listdir()])) print("Use 'paver sdist bdist_wheel' to build the release and") print(" 'twine upload dist/*.{zip,whl}' to upload to PyPI") print("Use 'paver dist_docs' to prepare an API documentation upload")
python
def release(): "check release before upload to PyPI" sh("paver bdist_wheel") wheels = path("dist").files("*.whl") if not wheels: error("\n*** ERROR: No release wheel was built!") sys.exit(1) if any(".dev" in i for i in wheels): error("\n*** ERROR: You're still using a 'dev' version!") sys.exit(1) # Check that source distribution can be built and is complete print('') print("~" * 78) print("TESTING SOURCE BUILD") sh( "{ command cd dist/ && unzip -q %s-%s.zip && command cd %s-%s/" " && /usr/bin/python setup.py sdist >/dev/null" " && if { unzip -ql ../%s-%s.zip; unzip -ql dist/%s-%s.zip; }" " | cut -b26- | sort | uniq -c| egrep -v '^ +2 +' ; then" " echo '^^^ Difference in file lists! ^^^'; false;" " else true; fi; } 2>&1" % tuple([project["name"], version] * 4) ) path("dist/%s-%s" % (project["name"], version)).rmtree() print("~" * 78) print('') print("Created", " ".join([str(i) for i in path("dist").listdir()])) print("Use 'paver sdist bdist_wheel' to build the release and") print(" 'twine upload dist/*.{zip,whl}' to upload to PyPI") print("Use 'paver dist_docs' to prepare an API documentation upload")
[ "def", "release", "(", ")", ":", "sh", "(", "\"paver bdist_wheel\"", ")", "wheels", "=", "path", "(", "\"dist\"", ")", ".", "files", "(", "\"*.whl\"", ")", "if", "not", "wheels", ":", "error", "(", "\"\\n*** ERROR: No release wheel was built!\"", ")", "sys", ...
check release before upload to PyPI
[ "check", "release", "before", "upload", "to", "PyPI" ]
train
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/pavement.py#L167-L197
ubccr/pinky
pinky/perception/aromaticity.py
getPyroleLikeAtoms
def getPyroleLikeAtoms(cycle): """cycle->return a dictionary of pyrole nitrogen-like atoms in a cycle or a molecule The dictionary is keyed on the atom.handle""" result = {} # the outgoing bonds might need to be single or aromatic for atom in cycle.atoms: lookup = (atom.symbol, atom.charge, atom.hcount, len(atom.bonds)) if PyroleTable.get(lookup, 0): result[atom.handle] = atom return result
python
def getPyroleLikeAtoms(cycle): """cycle->return a dictionary of pyrole nitrogen-like atoms in a cycle or a molecule The dictionary is keyed on the atom.handle""" result = {} # the outgoing bonds might need to be single or aromatic for atom in cycle.atoms: lookup = (atom.symbol, atom.charge, atom.hcount, len(atom.bonds)) if PyroleTable.get(lookup, 0): result[atom.handle] = atom return result
[ "def", "getPyroleLikeAtoms", "(", "cycle", ")", ":", "result", "=", "{", "}", "# the outgoing bonds might need to be single or aromatic", "for", "atom", "in", "cycle", ".", "atoms", ":", "lookup", "=", "(", "atom", ".", "symbol", ",", "atom", ".", "charge", ",...
cycle->return a dictionary of pyrole nitrogen-like atoms in a cycle or a molecule The dictionary is keyed on the atom.handle
[ "cycle", "-", ">", "return", "a", "dictionary", "of", "pyrole", "nitrogen", "-", "like", "atoms", "in", "a", "cycle", "or", "a", "molecule", "The", "dictionary", "is", "keyed", "on", "the", "atom", ".", "handle" ]
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/aromaticity.py#L78-L88
ubccr/pinky
pinky/perception/aromaticity.py
canBeAromatic
def canBeAromatic(cycle, pyroleLike): """(cycle)-> returns AROMATIC if a ring is conjugatable and passes the simple tests for aromaticity returns MAYBE if the ring in its present form can be aromatic but is not currently NEVER if the ring can never be aromatic""" cycleLength = len(cycle) # ******************************************************* # check for kekular five membered rings if cycleLength == 5: # check atom types for atom in cycle.atoms: if not AROMATIC_PYROLE_ATOMS.has_key(atom.symbol): return NEVER # do we have exactly one pyrole nitrogen like atom? pyroleCount = 0 for atom in cycle.atoms: if pyroleLike.has_key(atom.handle): pyrole = atom pyroleCount += 1 if pyroleCount < 1 or pyroleCount > 2: return NEVER # rotate the ring so that we start on the pyrole like atom cycle.rotate(pyrole) bonds = cycle.bonds[:] # check the bonds for a kekular structure for index, bond in zip(range(len(bonds)), bonds): if bond.bondtype not in AROMATIC_5_RING[index]: return MAYBE return AROMATIC # ***************************************************** # check for kekular six membered rings # kekular rings must have atoms in the AROMATIC_ATOMS # groups and must belong in 6 membered rings. # bonds must be conjugated elif cycleLength == 6: # XXX FIX ME -> there is a lot of problems with this # code I think, what about bonds that are already fixed? for atom in cycle.atoms: if not AROMATIC_ATOMS.has_key(atom.symbol): return NEVER bonds = cycle.bonds[:] last = None switch = {1:2, 2:1} while bonds: bond = bonds.pop() bondtype = bond.bondtype if bond.bondorder == 3: return NEVER if last is None: if bond.bondtype in [1,2]: last = bond.bondtype else: if last == 1 and bond.bondtype not in [2,4]: return MAYBE elif last == 2 and bond.bondtype not in [1, 4]: return MAYBE last = switch[last] if bondtype != last: bond.bondorder = last return AROMATIC else: # we can never be aromatic return NEVER
python
def canBeAromatic(cycle, pyroleLike): """(cycle)-> returns AROMATIC if a ring is conjugatable and passes the simple tests for aromaticity returns MAYBE if the ring in its present form can be aromatic but is not currently NEVER if the ring can never be aromatic""" cycleLength = len(cycle) # ******************************************************* # check for kekular five membered rings if cycleLength == 5: # check atom types for atom in cycle.atoms: if not AROMATIC_PYROLE_ATOMS.has_key(atom.symbol): return NEVER # do we have exactly one pyrole nitrogen like atom? pyroleCount = 0 for atom in cycle.atoms: if pyroleLike.has_key(atom.handle): pyrole = atom pyroleCount += 1 if pyroleCount < 1 or pyroleCount > 2: return NEVER # rotate the ring so that we start on the pyrole like atom cycle.rotate(pyrole) bonds = cycle.bonds[:] # check the bonds for a kekular structure for index, bond in zip(range(len(bonds)), bonds): if bond.bondtype not in AROMATIC_5_RING[index]: return MAYBE return AROMATIC # ***************************************************** # check for kekular six membered rings # kekular rings must have atoms in the AROMATIC_ATOMS # groups and must belong in 6 membered rings. # bonds must be conjugated elif cycleLength == 6: # XXX FIX ME -> there is a lot of problems with this # code I think, what about bonds that are already fixed? for atom in cycle.atoms: if not AROMATIC_ATOMS.has_key(atom.symbol): return NEVER bonds = cycle.bonds[:] last = None switch = {1:2, 2:1} while bonds: bond = bonds.pop() bondtype = bond.bondtype if bond.bondorder == 3: return NEVER if last is None: if bond.bondtype in [1,2]: last = bond.bondtype else: if last == 1 and bond.bondtype not in [2,4]: return MAYBE elif last == 2 and bond.bondtype not in [1, 4]: return MAYBE last = switch[last] if bondtype != last: bond.bondorder = last return AROMATIC else: # we can never be aromatic return NEVER
[ "def", "canBeAromatic", "(", "cycle", ",", "pyroleLike", ")", ":", "cycleLength", "=", "len", "(", "cycle", ")", "# *******************************************************", "# check for kekular five membered rings", "if", "cycleLength", "==", "5", ":", "# check atom types...
(cycle)-> returns AROMATIC if a ring is conjugatable and passes the simple tests for aromaticity returns MAYBE if the ring in its present form can be aromatic but is not currently NEVER if the ring can never be aromatic
[ "(", "cycle", ")", "-", ">", "returns", "AROMATIC", "if", "a", "ring", "is", "conjugatable", "and", "passes", "the", "simple", "tests", "for", "aromaticity", "returns", "MAYBE", "if", "the", "ring", "in", "its", "present", "form", "can", "be", "aromatic", ...
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/aromaticity.py#L90-L165
ubccr/pinky
pinky/perception/aromaticity.py
convert
def convert(cycle, pyroleLike, usedPyroles): """cycle, pyroleLike, aromatic=0-> aromatize the cycle pyroleLike is a lookup of the pyrole like atoms in the cycle. return 1 if the cycle was aromatized 2 if the cycle could not be aromatized""" bonds = cycle.bonds atoms = cycle.atoms initialBondStates = [] initialAtomStates = [] _usedPyroles = {} for bond in bonds: # store the initial states but assume the # bond is aromatic initialBondStates.append((bond, bond.symbol, bond.bondorder, bond.bondtype, bond.aromatic, bond.stereo)) # XXX FIX ME # until we get proper conjugation, aromatic bond orders # are 1.5 bond.reset(':', bond.bondorder, 4, bond.fixed, bond.stereo) aromatized = 1 for atom in atoms: initialAtomStates.append((atom, atom.aromatic)) atom.aromatic = 1 nonhydrogens = atom.sumBondOrders() + atom.charge # look for the lowest valence where we don't # have to change the charge of the atom to # fill the valences for valence in atom.valences: neededHydrogens = int(valence - nonhydrogens) if neededHydrogens >= 0: break else: # we can't change the aromaticity and have correct # valence. # # there is one special case of a five membered # ring and a pyrole nitrogen like atom we need # to look for. if len(cycle) == 5 and pyroleLike.has_key(atom.handle): _usedPyroles[atom.handle] = 1 else: # nope, the valences don't work out so # we can't aromatize aromatized = 0 break # sanity check, this should be true because of the # canBeAromatic routine above assert len(_usedPyroles) <=1, "Too many used pyroles!" cycle.aromatic = aromatized if not aromatized: for bond, symbol, order, bondtype, aromatic, stereo in initialBondStates: bond.reset(symbol, order, bondtype, bond.fixed, stereo) for atom, aromatic in initialAtomStates: atom.aromatic = aromatic else: # we used some pyroles, we'll have to send these to # the valence checker later usedPyroles.update(_usedPyroles) return aromatized
python
def convert(cycle, pyroleLike, usedPyroles): """cycle, pyroleLike, aromatic=0-> aromatize the cycle pyroleLike is a lookup of the pyrole like atoms in the cycle. return 1 if the cycle was aromatized 2 if the cycle could not be aromatized""" bonds = cycle.bonds atoms = cycle.atoms initialBondStates = [] initialAtomStates = [] _usedPyroles = {} for bond in bonds: # store the initial states but assume the # bond is aromatic initialBondStates.append((bond, bond.symbol, bond.bondorder, bond.bondtype, bond.aromatic, bond.stereo)) # XXX FIX ME # until we get proper conjugation, aromatic bond orders # are 1.5 bond.reset(':', bond.bondorder, 4, bond.fixed, bond.stereo) aromatized = 1 for atom in atoms: initialAtomStates.append((atom, atom.aromatic)) atom.aromatic = 1 nonhydrogens = atom.sumBondOrders() + atom.charge # look for the lowest valence where we don't # have to change the charge of the atom to # fill the valences for valence in atom.valences: neededHydrogens = int(valence - nonhydrogens) if neededHydrogens >= 0: break else: # we can't change the aromaticity and have correct # valence. # # there is one special case of a five membered # ring and a pyrole nitrogen like atom we need # to look for. if len(cycle) == 5 and pyroleLike.has_key(atom.handle): _usedPyroles[atom.handle] = 1 else: # nope, the valences don't work out so # we can't aromatize aromatized = 0 break # sanity check, this should be true because of the # canBeAromatic routine above assert len(_usedPyroles) <=1, "Too many used pyroles!" cycle.aromatic = aromatized if not aromatized: for bond, symbol, order, bondtype, aromatic, stereo in initialBondStates: bond.reset(symbol, order, bondtype, bond.fixed, stereo) for atom, aromatic in initialAtomStates: atom.aromatic = aromatic else: # we used some pyroles, we'll have to send these to # the valence checker later usedPyroles.update(_usedPyroles) return aromatized
[ "def", "convert", "(", "cycle", ",", "pyroleLike", ",", "usedPyroles", ")", ":", "bonds", "=", "cycle", ".", "bonds", "atoms", "=", "cycle", ".", "atoms", "initialBondStates", "=", "[", "]", "initialAtomStates", "=", "[", "]", "_usedPyroles", "=", "{", "...
cycle, pyroleLike, aromatic=0-> aromatize the cycle pyroleLike is a lookup of the pyrole like atoms in the cycle. return 1 if the cycle was aromatized 2 if the cycle could not be aromatized
[ "cycle", "pyroleLike", "aromatic", "=", "0", "-", ">", "aromatize", "the", "cycle", "pyroleLike", "is", "a", "lookup", "of", "the", "pyrole", "like", "atoms", "in", "the", "cycle", ".", "return", "1", "if", "the", "cycle", "was", "aromatized", "2", "if",...
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/aromaticity.py#L167-L235
ubccr/pinky
pinky/perception/aromaticity.py
addHydrogens
def addHydrogens(molecule, usedPyroles=None): """(molecule) -> add implicit hydrogens to a molecule. If the atom has specified valences and the atom must be charged then a Valence Error is raised""" for atom in molecule.atoms: # if the atom has an explicit hcount, we can't set the # hcount if atom.has_explicit_hcount: atom.hcount = atom.explicit_hcount continue if atom.valences: for valence in atom.valences: hcount = max(0, int(valence - atom.sumBondOrders() + atom.charge)) if hcount >= 0: break else: if usedPyroles and not usedPyroles.has_key(atom.handle): #print atom.symbol, atom.valences, atom.hcount, atom.charge,\ # atom.sumBondOrders() #print [x.bondtype for x in atom.bonds] #print molecule.cansmiles() raise PinkyError("Valence error in atom %s"%molecule.atoms.index(atom)) pass #hcount = int(hcount) atom.hcount = hcount return molecule
python
def addHydrogens(molecule, usedPyroles=None): """(molecule) -> add implicit hydrogens to a molecule. If the atom has specified valences and the atom must be charged then a Valence Error is raised""" for atom in molecule.atoms: # if the atom has an explicit hcount, we can't set the # hcount if atom.has_explicit_hcount: atom.hcount = atom.explicit_hcount continue if atom.valences: for valence in atom.valences: hcount = max(0, int(valence - atom.sumBondOrders() + atom.charge)) if hcount >= 0: break else: if usedPyroles and not usedPyroles.has_key(atom.handle): #print atom.symbol, atom.valences, atom.hcount, atom.charge,\ # atom.sumBondOrders() #print [x.bondtype for x in atom.bonds] #print molecule.cansmiles() raise PinkyError("Valence error in atom %s"%molecule.atoms.index(atom)) pass #hcount = int(hcount) atom.hcount = hcount return molecule
[ "def", "addHydrogens", "(", "molecule", ",", "usedPyroles", "=", "None", ")", ":", "for", "atom", "in", "molecule", ".", "atoms", ":", "# if the atom has an explicit hcount, we can't set the", "# hcount", "if", "atom", ".", "has_explicit_hcount", ":", "atom", ".", ...
(molecule) -> add implicit hydrogens to a molecule. If the atom has specified valences and the atom must be charged then a Valence Error is raised
[ "(", "molecule", ")", "-", ">", "add", "implicit", "hydrogens", "to", "a", "molecule", ".", "If", "the", "atom", "has", "specified", "valences", "and", "the", "atom", "must", "be", "charged", "then", "a", "Valence", "Error", "is", "raised" ]
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/aromaticity.py#L237-L264
ubccr/pinky
pinky/perception/aromaticity.py
aromatize
def aromatize(molecule, usedPyroles=None): """(molecule, usedPyroles=None)->aromatize a molecular graph usedPyroles is a dictionary that holds the pyrole like atoms that are used in the conversion process. The following valence checker may need this information""" pyroleLike = getPyroleLikeAtoms(molecule) if usedPyroles is None: usedPyroles = {} cyclesToCheck = [] # determine which cycles came in marked as aromatic # and which need to be checked form the kekular form # # if a cycle came in as aromatic, convert it # before going on. for cycle in molecule.cycles: for atom in cycle.atoms: if not atom.aromatic: cyclesToCheck.append(cycle) break else: if not convert(cycle, pyroleLike, usedPyroles): # XXX FIX ME # oops, an aromatic ring came in but # we can't convert it. This is an error # daylight would conjugate the ring raise PinkyError("Bad initial aromaticity") # keep checking rings until something happens while 1: # assume nothing happened needToCheckAgain = 0 _cyclesToCheck = [] for cycle in cyclesToCheck: canAromatic = canBeAromatic(cycle, pyroleLike) if canAromatic == NEVER: # the ring can NEVER EVER be aromatic, so remove it for good pass elif canAromatic and convert(cycle, pyroleLike, usedPyroles): needToCheckAgain = 1 else: _cyclesToCheck.append(cycle) cyclesToCheck = _cyclesToCheck if not needToCheckAgain: break # fix bonds that have no bondorder if necessary molecule = fixBonds(molecule, pyroleLike) # add implicit hydrogens return addHydrogens(molecule, usedPyroles)
python
def aromatize(molecule, usedPyroles=None): """(molecule, usedPyroles=None)->aromatize a molecular graph usedPyroles is a dictionary that holds the pyrole like atoms that are used in the conversion process. The following valence checker may need this information""" pyroleLike = getPyroleLikeAtoms(molecule) if usedPyroles is None: usedPyroles = {} cyclesToCheck = [] # determine which cycles came in marked as aromatic # and which need to be checked form the kekular form # # if a cycle came in as aromatic, convert it # before going on. for cycle in molecule.cycles: for atom in cycle.atoms: if not atom.aromatic: cyclesToCheck.append(cycle) break else: if not convert(cycle, pyroleLike, usedPyroles): # XXX FIX ME # oops, an aromatic ring came in but # we can't convert it. This is an error # daylight would conjugate the ring raise PinkyError("Bad initial aromaticity") # keep checking rings until something happens while 1: # assume nothing happened needToCheckAgain = 0 _cyclesToCheck = [] for cycle in cyclesToCheck: canAromatic = canBeAromatic(cycle, pyroleLike) if canAromatic == NEVER: # the ring can NEVER EVER be aromatic, so remove it for good pass elif canAromatic and convert(cycle, pyroleLike, usedPyroles): needToCheckAgain = 1 else: _cyclesToCheck.append(cycle) cyclesToCheck = _cyclesToCheck if not needToCheckAgain: break # fix bonds that have no bondorder if necessary molecule = fixBonds(molecule, pyroleLike) # add implicit hydrogens return addHydrogens(molecule, usedPyroles)
[ "def", "aromatize", "(", "molecule", ",", "usedPyroles", "=", "None", ")", ":", "pyroleLike", "=", "getPyroleLikeAtoms", "(", "molecule", ")", "if", "usedPyroles", "is", "None", ":", "usedPyroles", "=", "{", "}", "cyclesToCheck", "=", "[", "]", "# determine ...
(molecule, usedPyroles=None)->aromatize a molecular graph usedPyroles is a dictionary that holds the pyrole like atoms that are used in the conversion process. The following valence checker may need this information
[ "(", "molecule", "usedPyroles", "=", "None", ")", "-", ">", "aromatize", "a", "molecular", "graph", "usedPyroles", "is", "a", "dictionary", "that", "holds", "the", "pyrole", "like", "atoms", "that", "are", "used", "in", "the", "conversion", "process", ".", ...
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/aromaticity.py#L369-L422
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/sensitivity.py
SensitivityContainer._prep_noise_interpolants
def _prep_noise_interpolants(self): """Construct interpolated sensitivity curves This will construct the interpolated sensitivity curves using scipy.interpolate.interp1d. It will add wd noise if that is requested. Raises: ValueError: ``len(noise_type_in) != len(sensitivity_curves)`` ValueError: Issue with sensitivity curve type provided. """ noise_lists = {} self.noise_interpolants = {} if isinstance(self.sensitivity_curves, str): self.sensitivity_curves = [self.sensitivity_curves] if isinstance(self.noise_type_in, list): if len(self.noise_type_in) != len(self.sensitivity_curves): raise ValueError('noise_type_in must have same shape as sensitivity_curves if it is' + 'provided as a list.' + 'If all curves are of the same type, provide a string.') else: assert isinstance(self.noise_type_in, str) self.noise_type_in = [self.noise_type_in for _ in self.sensitivity_curves] if isinstance(self.signal_type, str): self.signal_type = [self.signal_type] # read in all the noise curves for num, sc in enumerate(self.sensitivity_curves): if isinstance(sc, str): f, h_n = read_noise_curve(sc, noise_type_in=self.noise_type_in[num], noise_type_out='char_strain') if sc[-4:] == '.txt': key = sc.split('.')[0].split('/')[-1] else: key = sc elif isinstance(sc, list): # TODO: add to docs if inputing special noise curve, make sure its char_strain f, h_n = sc key = str(num) else: raise ValueError('Sensitivity curves must either be string' + 'or list containing f_n and asd_n.') noise_lists[key] = [f, h_n] # add wd noise if str(self.add_wd_noise).lower() in ['true', 'both', 'yes']: if isinstance(self.wd_noise, str): f_n_wd, h_n_wd = read_noise_curve(self.wd_noise, noise_type_in=self.wd_noise_type_in, noise_type_out='char_strain') elif isinstance(self, wd_noise, list): f_n_wd, h_n_wd = self.wd_noise trans_dict = {} for sc in noise_lists.keys(): f_n, h_n = noise_lists[sc] if self.add_wd_noise.lower() == 'both': trans_dict[sc] = [f_n, h_n] f_n, h_n = combine_with_wd_noise(f_n, h_n, f_n_wd, h_n_wd) trans_dict[sc + '_wd'] = [f_n, h_n] noise_lists = trans_dict # interpolate for sc in noise_lists: f_n, h_n = noise_lists[sc] self.noise_interpolants[sc] = (interpolate.interp1d(f_n, h_n, bounds_error=False, fill_value=1e30)) return
python
def _prep_noise_interpolants(self): """Construct interpolated sensitivity curves This will construct the interpolated sensitivity curves using scipy.interpolate.interp1d. It will add wd noise if that is requested. Raises: ValueError: ``len(noise_type_in) != len(sensitivity_curves)`` ValueError: Issue with sensitivity curve type provided. """ noise_lists = {} self.noise_interpolants = {} if isinstance(self.sensitivity_curves, str): self.sensitivity_curves = [self.sensitivity_curves] if isinstance(self.noise_type_in, list): if len(self.noise_type_in) != len(self.sensitivity_curves): raise ValueError('noise_type_in must have same shape as sensitivity_curves if it is' + 'provided as a list.' + 'If all curves are of the same type, provide a string.') else: assert isinstance(self.noise_type_in, str) self.noise_type_in = [self.noise_type_in for _ in self.sensitivity_curves] if isinstance(self.signal_type, str): self.signal_type = [self.signal_type] # read in all the noise curves for num, sc in enumerate(self.sensitivity_curves): if isinstance(sc, str): f, h_n = read_noise_curve(sc, noise_type_in=self.noise_type_in[num], noise_type_out='char_strain') if sc[-4:] == '.txt': key = sc.split('.')[0].split('/')[-1] else: key = sc elif isinstance(sc, list): # TODO: add to docs if inputing special noise curve, make sure its char_strain f, h_n = sc key = str(num) else: raise ValueError('Sensitivity curves must either be string' + 'or list containing f_n and asd_n.') noise_lists[key] = [f, h_n] # add wd noise if str(self.add_wd_noise).lower() in ['true', 'both', 'yes']: if isinstance(self.wd_noise, str): f_n_wd, h_n_wd = read_noise_curve(self.wd_noise, noise_type_in=self.wd_noise_type_in, noise_type_out='char_strain') elif isinstance(self, wd_noise, list): f_n_wd, h_n_wd = self.wd_noise trans_dict = {} for sc in noise_lists.keys(): f_n, h_n = noise_lists[sc] if self.add_wd_noise.lower() == 'both': trans_dict[sc] = [f_n, h_n] f_n, h_n = combine_with_wd_noise(f_n, h_n, f_n_wd, h_n_wd) trans_dict[sc + '_wd'] = [f_n, h_n] noise_lists = trans_dict # interpolate for sc in noise_lists: f_n, h_n = noise_lists[sc] self.noise_interpolants[sc] = (interpolate.interp1d(f_n, h_n, bounds_error=False, fill_value=1e30)) return
[ "def", "_prep_noise_interpolants", "(", "self", ")", ":", "noise_lists", "=", "{", "}", "self", ".", "noise_interpolants", "=", "{", "}", "if", "isinstance", "(", "self", ".", "sensitivity_curves", ",", "str", ")", ":", "self", ".", "sensitivity_curves", "="...
Construct interpolated sensitivity curves This will construct the interpolated sensitivity curves using scipy.interpolate.interp1d. It will add wd noise if that is requested. Raises: ValueError: ``len(noise_type_in) != len(sensitivity_curves)`` ValueError: Issue with sensitivity curve type provided.
[ "Construct", "interpolated", "sensitivity", "curves" ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/sensitivity.py#L61-L136
wangsix/vmo
vmo/analysis/analysis.py
create_selfsim
def create_selfsim(oracle, method='rsfx'): """ Create self similarity matrix from attributes of a vmo object :param oracle: a encoded vmo object :param method: "comp":use the compression codes "sfx" - use suffix links "rsfx" - use reverse suffix links "lrs" - use LRS values "seg" - use patterns found :return: the created self-similarity matrix """ len_oracle = oracle.n_states - 1 mat = np.zeros((len_oracle, len_oracle)) if method == 'com': if not oracle.code: print("Codes not generated. Generating codes with encode().") oracle.encode() ind = 0 # index for l, p in oracle.code: # l for length, p for position if l == 0: inc = 1 else: inc = l mat[range(ind, ind + inc), range(p - 1, p - 1 + inc)] = 1 mat[range(p - 1, p - 1 + inc), range(ind, ind + inc)] = 1 ind = ind + l elif method == 'sfx': for i, s in enumerate(oracle.sfx[1:]): if s != 0: mat[i][s - 1] = 1 mat[s - 1][i] = 1 elif method == 'rsfx': for cluster in oracle.latent: p = itertools.product(cluster, repeat=2) for _p in p: mat[_p[0] - 1][_p[1] - 1] = 1 elif method == 'lrs': for i, l in enumerate(oracle.lrs[1:]): if l != 0: s = oracle.sfx[i + 1] mat[range((s - l) + 1, s + 1), range(i - l + 1, i + 1)] = 1 mat[range(i - l + 1, i + 1), range((s - l) + 1, s + 1)] = 1 elif method == 'seg': seg = oracle.segment ind = 0 for l, p in seg: # l for length, p for position if l == 0: inc = 1 else: inc = l mat[range(ind, ind + inc), range(p - 1, p - 1 + inc)] = 1 mat[range(p - 1, p - 1 + inc), range(ind, ind + inc)] = 1 ind = ind + l return mat
python
def create_selfsim(oracle, method='rsfx'): """ Create self similarity matrix from attributes of a vmo object :param oracle: a encoded vmo object :param method: "comp":use the compression codes "sfx" - use suffix links "rsfx" - use reverse suffix links "lrs" - use LRS values "seg" - use patterns found :return: the created self-similarity matrix """ len_oracle = oracle.n_states - 1 mat = np.zeros((len_oracle, len_oracle)) if method == 'com': if not oracle.code: print("Codes not generated. Generating codes with encode().") oracle.encode() ind = 0 # index for l, p in oracle.code: # l for length, p for position if l == 0: inc = 1 else: inc = l mat[range(ind, ind + inc), range(p - 1, p - 1 + inc)] = 1 mat[range(p - 1, p - 1 + inc), range(ind, ind + inc)] = 1 ind = ind + l elif method == 'sfx': for i, s in enumerate(oracle.sfx[1:]): if s != 0: mat[i][s - 1] = 1 mat[s - 1][i] = 1 elif method == 'rsfx': for cluster in oracle.latent: p = itertools.product(cluster, repeat=2) for _p in p: mat[_p[0] - 1][_p[1] - 1] = 1 elif method == 'lrs': for i, l in enumerate(oracle.lrs[1:]): if l != 0: s = oracle.sfx[i + 1] mat[range((s - l) + 1, s + 1), range(i - l + 1, i + 1)] = 1 mat[range(i - l + 1, i + 1), range((s - l) + 1, s + 1)] = 1 elif method == 'seg': seg = oracle.segment ind = 0 for l, p in seg: # l for length, p for position if l == 0: inc = 1 else: inc = l mat[range(ind, ind + inc), range(p - 1, p - 1 + inc)] = 1 mat[range(p - 1, p - 1 + inc), range(ind, ind + inc)] = 1 ind = ind + l return mat
[ "def", "create_selfsim", "(", "oracle", ",", "method", "=", "'rsfx'", ")", ":", "len_oracle", "=", "oracle", ".", "n_states", "-", "1", "mat", "=", "np", ".", "zeros", "(", "(", "len_oracle", ",", "len_oracle", ")", ")", "if", "method", "==", "'com'", ...
Create self similarity matrix from attributes of a vmo object :param oracle: a encoded vmo object :param method: "comp":use the compression codes "sfx" - use suffix links "rsfx" - use reverse suffix links "lrs" - use LRS values "seg" - use patterns found :return: the created self-similarity matrix
[ "Create", "self", "similarity", "matrix", "from", "attributes", "of", "a", "vmo", "object" ]
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/analysis/analysis.py#L33-L91
wangsix/vmo
vmo/analysis/analysis.py
create_transition
def create_transition(oracle, method='trn'): """Create a transition matrix based on oracle links""" mat, hist, n = _create_trn_mat_symbolic(oracle, method) return mat, hist, n
python
def create_transition(oracle, method='trn'): """Create a transition matrix based on oracle links""" mat, hist, n = _create_trn_mat_symbolic(oracle, method) return mat, hist, n
[ "def", "create_transition", "(", "oracle", ",", "method", "=", "'trn'", ")", ":", "mat", ",", "hist", ",", "n", "=", "_create_trn_mat_symbolic", "(", "oracle", ",", "method", ")", "return", "mat", ",", "hist", ",", "n" ]
Create a transition matrix based on oracle links
[ "Create", "a", "transition", "matrix", "based", "on", "oracle", "links" ]
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/analysis/analysis.py#L94-L97
wangsix/vmo
vmo/analysis/analysis.py
predict
def predict(oracle, context, ab=None, verbose=False): """Single symbolic prediction given a context, an oracle and an alphabet. :param oracle: a learned vmo object from a symbolic sequence. :param context: the context precedes the predicted symbol :param ab: alphabet :param verbose: to show if the context if pruned or not :return: a probability distribution over the alphabet for the prediction. """ if verbose: print("original context: ", context) if ab is None: ab = oracle.get_alphabet() _b, _s, context = _test_context(oracle, context) _lrs = [oracle.lrs[k] for k in oracle.rsfx[_s]] context_state = [] while not context_state: for _i, _l in enumerate(_lrs): if _l >= len(context): context_state.append(oracle.rsfx[_s][_i]) if context_state: break else: context = context[1:] _b, _s = oracle.accept(context) _lrs = [oracle.lrs[k] for k in oracle.rsfx[_s]] if verbose: print("final context: ", context) print("context_state: ", context_state) d_count = len(ab) hist = [1.0] * len(ab) # initialize all histograms with 1s. trn_data = [oracle.data[n] for n in oracle.trn[_s]] for k in trn_data: hist[ab[k]] += 1.0 d_count += 1.0 for i in context_state: d_count, hist = _rsfx_count(oracle, i, d_count, hist, ab) return [hist[idx] / d_count for idx in range(len(hist))], context
python
def predict(oracle, context, ab=None, verbose=False): """Single symbolic prediction given a context, an oracle and an alphabet. :param oracle: a learned vmo object from a symbolic sequence. :param context: the context precedes the predicted symbol :param ab: alphabet :param verbose: to show if the context if pruned or not :return: a probability distribution over the alphabet for the prediction. """ if verbose: print("original context: ", context) if ab is None: ab = oracle.get_alphabet() _b, _s, context = _test_context(oracle, context) _lrs = [oracle.lrs[k] for k in oracle.rsfx[_s]] context_state = [] while not context_state: for _i, _l in enumerate(_lrs): if _l >= len(context): context_state.append(oracle.rsfx[_s][_i]) if context_state: break else: context = context[1:] _b, _s = oracle.accept(context) _lrs = [oracle.lrs[k] for k in oracle.rsfx[_s]] if verbose: print("final context: ", context) print("context_state: ", context_state) d_count = len(ab) hist = [1.0] * len(ab) # initialize all histograms with 1s. trn_data = [oracle.data[n] for n in oracle.trn[_s]] for k in trn_data: hist[ab[k]] += 1.0 d_count += 1.0 for i in context_state: d_count, hist = _rsfx_count(oracle, i, d_count, hist, ab) return [hist[idx] / d_count for idx in range(len(hist))], context
[ "def", "predict", "(", "oracle", ",", "context", ",", "ab", "=", "None", ",", "verbose", "=", "False", ")", ":", "if", "verbose", ":", "print", "(", "\"original context: \"", ",", "context", ")", "if", "ab", "is", "None", ":", "ab", "=", "oracle", "....
Single symbolic prediction given a context, an oracle and an alphabet. :param oracle: a learned vmo object from a symbolic sequence. :param context: the context precedes the predicted symbol :param ab: alphabet :param verbose: to show if the context if pruned or not :return: a probability distribution over the alphabet for the prediction.
[ "Single", "symbolic", "prediction", "given", "a", "context", "an", "oracle", "and", "an", "alphabet", "." ]
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/analysis/analysis.py#L131-L172
wangsix/vmo
vmo/analysis/analysis.py
log_loss
def log_loss(oracle, test_seq, ab=[], m_order=None, verbose=False): """ Evaluate the average log-loss of a sequence given an oracle """ if not ab: ab = oracle.get_alphabet() if verbose: print(' ') logP = 0.0 context = [] increment = np.floor((len(test_seq) - 1) / 100) bar_count = -1 maxContextLength = 0 avgContext = 0 for i, t in enumerate(test_seq): p, c = predict(oracle, context, ab, verbose=False) if len(c) < len(context): context = context[-len(c):] logP -= np.log2(p[ab[t]]) context.append(t) if m_order is not None: if len(context) > m_order: context = context[-m_order:] avgContext += float(len(context)) / len(test_seq) if verbose: percentage = np.mod(i, increment) if percentage == 0: bar_count += 1 if len(context) > maxContextLength: maxContextLength = len(context) sys.stdout.write('\r') sys.stdout.write("\r[" + "=" * bar_count + " " * (100 - bar_count) + "] " + str(bar_count) + "% " + str(i) + "/" + str(len(test_seq) - 1) + " Current max length: " + str( maxContextLength)) sys.stdout.flush() return logP / len(test_seq), avgContext
python
def log_loss(oracle, test_seq, ab=[], m_order=None, verbose=False): """ Evaluate the average log-loss of a sequence given an oracle """ if not ab: ab = oracle.get_alphabet() if verbose: print(' ') logP = 0.0 context = [] increment = np.floor((len(test_seq) - 1) / 100) bar_count = -1 maxContextLength = 0 avgContext = 0 for i, t in enumerate(test_seq): p, c = predict(oracle, context, ab, verbose=False) if len(c) < len(context): context = context[-len(c):] logP -= np.log2(p[ab[t]]) context.append(t) if m_order is not None: if len(context) > m_order: context = context[-m_order:] avgContext += float(len(context)) / len(test_seq) if verbose: percentage = np.mod(i, increment) if percentage == 0: bar_count += 1 if len(context) > maxContextLength: maxContextLength = len(context) sys.stdout.write('\r') sys.stdout.write("\r[" + "=" * bar_count + " " * (100 - bar_count) + "] " + str(bar_count) + "% " + str(i) + "/" + str(len(test_seq) - 1) + " Current max length: " + str( maxContextLength)) sys.stdout.flush() return logP / len(test_seq), avgContext
[ "def", "log_loss", "(", "oracle", ",", "test_seq", ",", "ab", "=", "[", "]", ",", "m_order", "=", "None", ",", "verbose", "=", "False", ")", ":", "if", "not", "ab", ":", "ab", "=", "oracle", ".", "get_alphabet", "(", ")", "if", "verbose", ":", "p...
Evaluate the average log-loss of a sequence given an oracle
[ "Evaluate", "the", "average", "log", "-", "loss", "of", "a", "sequence", "given", "an", "oracle" ]
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/analysis/analysis.py#L175-L215
wangsix/vmo
vmo/analysis/analysis.py
_rsfx_count
def _rsfx_count(oracle, s, count, hist, ab): """ Accumulate counts for context """ trn_data = [oracle.data[n] for n in oracle.trn[s]] for k in trn_data: hist[ab[k]] += 1.0 count += 1.0 rsfx_candidate = oracle.rsfx[s][:] while rsfx_candidate: s = rsfx_candidate.pop(0) trn_data = [oracle.data[n] for n in oracle.trn[s]] for k in trn_data: hist[ab[k]] += 1.0 count += 1.0 rsfx_candidate.extend(oracle.rsfx[s]) return count, hist
python
def _rsfx_count(oracle, s, count, hist, ab): """ Accumulate counts for context """ trn_data = [oracle.data[n] for n in oracle.trn[s]] for k in trn_data: hist[ab[k]] += 1.0 count += 1.0 rsfx_candidate = oracle.rsfx[s][:] while rsfx_candidate: s = rsfx_candidate.pop(0) trn_data = [oracle.data[n] for n in oracle.trn[s]] for k in trn_data: hist[ab[k]] += 1.0 count += 1.0 rsfx_candidate.extend(oracle.rsfx[s]) return count, hist
[ "def", "_rsfx_count", "(", "oracle", ",", "s", ",", "count", ",", "hist", ",", "ab", ")", ":", "trn_data", "=", "[", "oracle", ".", "data", "[", "n", "]", "for", "n", "in", "oracle", ".", "trn", "[", "s", "]", "]", "for", "k", "in", "trn_data",...
Accumulate counts for context
[ "Accumulate", "counts", "for", "context" ]
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/analysis/analysis.py#L226-L243
wangsix/vmo
vmo/analysis/analysis.py
tracking
def tracking(oracle, obs, trn_type=1, reverse_init=False, method='else', decay=1.0): """ Off-line tracking function using sub-optimal query-matching algorithm""" N = len(obs) if reverse_init: r_oracle = create_reverse_oracle(oracle) _ind = [r_oracle.n_states - rsfx for rsfx in r_oracle.rsfx[0][:]] init_ind = [] for i in _ind: s = i while oracle.sfx[s] != 0: s = oracle.sfx[s] init_ind.append(s) K = r_oracle.num_clusters() else: init_ind = oracle.rsfx[0][:] K = oracle.num_clusters() P = np.zeros((N, K), dtype='int') T = np.zeros((N,), dtype='int') map_k_outer = partial(_query_k, oracle=oracle, query=obs) map_query = partial(_query_init, oracle=oracle, query=obs[0], method=method) # map_query = partial(_query_init, oracle=oracle, query=obs[0], method) argmin = np.argmin P[0], C = zip(*map(map_query, init_ind)) C = np.array(C) T[0] = P[0][argmin(C)] if trn_type == 1: trn = _create_trn_self elif trn_type == 2: trn = _create_trn_sfx_rsfx else: trn = _create_trn distance_cache = np.zeros(oracle.n_states) for i in range(1, N): # iterate over the rest of query state_cache = [] dist_cache = distance_cache map_k_inner = partial(map_k_outer, i=i, P=P, trn=trn, state_cache=state_cache, dist_cache=dist_cache) P[i], _c = zip(*map(map_k_inner, range(K))) C = decay * C + np.array(_c) T[i] = P[i][argmin(C)] return T
python
def tracking(oracle, obs, trn_type=1, reverse_init=False, method='else', decay=1.0): """ Off-line tracking function using sub-optimal query-matching algorithm""" N = len(obs) if reverse_init: r_oracle = create_reverse_oracle(oracle) _ind = [r_oracle.n_states - rsfx for rsfx in r_oracle.rsfx[0][:]] init_ind = [] for i in _ind: s = i while oracle.sfx[s] != 0: s = oracle.sfx[s] init_ind.append(s) K = r_oracle.num_clusters() else: init_ind = oracle.rsfx[0][:] K = oracle.num_clusters() P = np.zeros((N, K), dtype='int') T = np.zeros((N,), dtype='int') map_k_outer = partial(_query_k, oracle=oracle, query=obs) map_query = partial(_query_init, oracle=oracle, query=obs[0], method=method) # map_query = partial(_query_init, oracle=oracle, query=obs[0], method) argmin = np.argmin P[0], C = zip(*map(map_query, init_ind)) C = np.array(C) T[0] = P[0][argmin(C)] if trn_type == 1: trn = _create_trn_self elif trn_type == 2: trn = _create_trn_sfx_rsfx else: trn = _create_trn distance_cache = np.zeros(oracle.n_states) for i in range(1, N): # iterate over the rest of query state_cache = [] dist_cache = distance_cache map_k_inner = partial(map_k_outer, i=i, P=P, trn=trn, state_cache=state_cache, dist_cache=dist_cache) P[i], _c = zip(*map(map_k_inner, range(K))) C = decay * C + np.array(_c) T[i] = P[i][argmin(C)] return T
[ "def", "tracking", "(", "oracle", ",", "obs", ",", "trn_type", "=", "1", ",", "reverse_init", "=", "False", ",", "method", "=", "'else'", ",", "decay", "=", "1.0", ")", ":", "N", "=", "len", "(", "obs", ")", "if", "reverse_init", ":", "r_oracle", "...
Off-line tracking function using sub-optimal query-matching algorithm
[ "Off", "-", "line", "tracking", "function", "using", "sub", "-", "optimal", "query", "-", "matching", "algorithm" ]
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/analysis/analysis.py#L308-L355
wangsix/vmo
vmo/analysis/analysis.py
_query_init
def _query_init(k, oracle, query, method='all'): """A helper function for query-matching function initialization.""" if method == 'all': a = np.subtract(query, [oracle.f_array[t] for t in oracle.latent[oracle.data[k]]]) dvec = (a * a).sum(axis=1) # Could skip the sqrt _d = dvec.argmin() return oracle.latent[oracle.data[k]][_d], dvec[_d] else: a = np.subtract(query, oracle.f_array[k]) dvec = (a * a).sum() # Could skip the sqrt return k, dvec
python
def _query_init(k, oracle, query, method='all'): """A helper function for query-matching function initialization.""" if method == 'all': a = np.subtract(query, [oracle.f_array[t] for t in oracle.latent[oracle.data[k]]]) dvec = (a * a).sum(axis=1) # Could skip the sqrt _d = dvec.argmin() return oracle.latent[oracle.data[k]][_d], dvec[_d] else: a = np.subtract(query, oracle.f_array[k]) dvec = (a * a).sum() # Could skip the sqrt return k, dvec
[ "def", "_query_init", "(", "k", ",", "oracle", ",", "query", ",", "method", "=", "'all'", ")", ":", "if", "method", "==", "'all'", ":", "a", "=", "np", ".", "subtract", "(", "query", ",", "[", "oracle", ".", "f_array", "[", "t", "]", "for", "t", ...
A helper function for query-matching function initialization.
[ "A", "helper", "function", "for", "query", "-", "matching", "function", "initialization", "." ]
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/analysis/analysis.py#L449-L460
wangsix/vmo
vmo/analysis/analysis.py
_dist_obs_oracle
def _dist_obs_oracle(oracle, query, trn_list): """A helper function calculating distances between a feature and frames in oracle.""" a = np.subtract(query, [oracle.f_array[t] for t in trn_list]) return (a * a).sum(axis=1)
python
def _dist_obs_oracle(oracle, query, trn_list): """A helper function calculating distances between a feature and frames in oracle.""" a = np.subtract(query, [oracle.f_array[t] for t in trn_list]) return (a * a).sum(axis=1)
[ "def", "_dist_obs_oracle", "(", "oracle", ",", "query", ",", "trn_list", ")", ":", "a", "=", "np", ".", "subtract", "(", "query", ",", "[", "oracle", ".", "f_array", "[", "t", "]", "for", "t", "in", "trn_list", "]", ")", "return", "(", "a", "*", ...
A helper function calculating distances between a feature and frames in oracle.
[ "A", "helper", "function", "calculating", "distances", "between", "a", "feature", "and", "frames", "in", "oracle", "." ]
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/analysis/analysis.py#L463-L466
wangsix/vmo
vmo/analysis/analysis.py
_query_k
def _query_k(k, i, P, oracle, query, trn, state_cache, dist_cache, smooth=False, D=None, weight=0.5): """A helper function for query-matching function`s iteration over observations. Args: k - index of the candidate path i - index of the frames of the observations P - the path matrix of size K x N, K the number for paths initiated, N the frame number of observations oracle - an encoded oracle query - observations matrix (numpy array) of dimension N x D. D the dimension of the observation. trn - function handle of forward links vector gathering state_cache - a list storing the states visited during the for loop for k dist_cache - a list of the same lenth as oracle storing the distance calculated between the current observation and states in the oracle smooth - whether to enforce a preference on continuation or not D - Self-similarity matrix, required if smooth is set to True weight - the weight between continuation or jumps (1.0 for certain continuation) """ _trn = trn(oracle, P[i - 1][k]) t = list(itertools.chain.from_iterable([oracle.latent[oracle.data[j]] for j in _trn])) _trn_unseen = [_t for _t in _trn if _t not in state_cache] state_cache.extend(_trn_unseen) if _trn_unseen: t_unseen = list(itertools.chain.from_iterable([oracle.latent[oracle.data[j]] for j in _trn_unseen])) dist_cache[t_unseen] = _dist_obs_oracle(oracle, query[i], t_unseen) dvec = dist_cache[t] if smooth and P[i - 1][k] < oracle.n_states - 1: dvec = dvec * (1.0 - weight) + weight * np.array([D[P[i - 1][k]][_t - 1] for _t in t]) _m = np.argmin(dvec) return t[_m], dvec[_m]
python
def _query_k(k, i, P, oracle, query, trn, state_cache, dist_cache, smooth=False, D=None, weight=0.5): """A helper function for query-matching function`s iteration over observations. Args: k - index of the candidate path i - index of the frames of the observations P - the path matrix of size K x N, K the number for paths initiated, N the frame number of observations oracle - an encoded oracle query - observations matrix (numpy array) of dimension N x D. D the dimension of the observation. trn - function handle of forward links vector gathering state_cache - a list storing the states visited during the for loop for k dist_cache - a list of the same lenth as oracle storing the distance calculated between the current observation and states in the oracle smooth - whether to enforce a preference on continuation or not D - Self-similarity matrix, required if smooth is set to True weight - the weight between continuation or jumps (1.0 for certain continuation) """ _trn = trn(oracle, P[i - 1][k]) t = list(itertools.chain.from_iterable([oracle.latent[oracle.data[j]] for j in _trn])) _trn_unseen = [_t for _t in _trn if _t not in state_cache] state_cache.extend(_trn_unseen) if _trn_unseen: t_unseen = list(itertools.chain.from_iterable([oracle.latent[oracle.data[j]] for j in _trn_unseen])) dist_cache[t_unseen] = _dist_obs_oracle(oracle, query[i], t_unseen) dvec = dist_cache[t] if smooth and P[i - 1][k] < oracle.n_states - 1: dvec = dvec * (1.0 - weight) + weight * np.array([D[P[i - 1][k]][_t - 1] for _t in t]) _m = np.argmin(dvec) return t[_m], dvec[_m]
[ "def", "_query_k", "(", "k", ",", "i", ",", "P", ",", "oracle", ",", "query", ",", "trn", ",", "state_cache", ",", "dist_cache", ",", "smooth", "=", "False", ",", "D", "=", "None", ",", "weight", "=", "0.5", ")", ":", "_trn", "=", "trn", "(", "...
A helper function for query-matching function`s iteration over observations. Args: k - index of the candidate path i - index of the frames of the observations P - the path matrix of size K x N, K the number for paths initiated, N the frame number of observations oracle - an encoded oracle query - observations matrix (numpy array) of dimension N x D. D the dimension of the observation. trn - function handle of forward links vector gathering state_cache - a list storing the states visited during the for loop for k dist_cache - a list of the same lenth as oracle storing the distance calculated between the current observation and states in the oracle smooth - whether to enforce a preference on continuation or not D - Self-similarity matrix, required if smooth is set to True weight - the weight between continuation or jumps (1.0 for certain continuation)
[ "A", "helper", "function", "for", "query", "-", "matching", "function", "s", "iteration", "over", "observations", ".", "Args", ":", "k", "-", "index", "of", "the", "candidate", "path", "i", "-", "index", "of", "the", "frames", "of", "the", "observations", ...
train
https://github.com/wangsix/vmo/blob/bb1cc4cf1f33f0bb49e38c91126c1be1a0cdd09d/vmo/analysis/analysis.py#L469-L503
hayd/ctox
ctox/shell.py
safe_shell_out
def safe_shell_out(cmd, verbose=False, **kwargs): """run cmd and return True if it went ok, False if something went wrong. Suppress all output. """ # TODO rename this suppressed_shell_out ? # TODO this should probably return 1 if there's an error (i.e. vice-versa). # print("cmd %s" % cmd) try: with open(os.devnull, "w") as fnull: with captured_output(): check_output(cmd, stderr=fnull, **kwargs) return True except (CalledProcessError, OSError) as e: if verbose: cprint(" Error running command %s" % ' '.join(cmd), 'err') print(e.output) return False except Exception as e: # TODO no idea # Can this be if you try and unistall pip? (don't do that) return False
python
def safe_shell_out(cmd, verbose=False, **kwargs): """run cmd and return True if it went ok, False if something went wrong. Suppress all output. """ # TODO rename this suppressed_shell_out ? # TODO this should probably return 1 if there's an error (i.e. vice-versa). # print("cmd %s" % cmd) try: with open(os.devnull, "w") as fnull: with captured_output(): check_output(cmd, stderr=fnull, **kwargs) return True except (CalledProcessError, OSError) as e: if verbose: cprint(" Error running command %s" % ' '.join(cmd), 'err') print(e.output) return False except Exception as e: # TODO no idea # Can this be if you try and unistall pip? (don't do that) return False
[ "def", "safe_shell_out", "(", "cmd", ",", "verbose", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# TODO rename this suppressed_shell_out ?", "# TODO this should probably return 1 if there's an error (i.e. vice-versa).", "# print(\"cmd %s\" % cmd)", "try", ":", "with", "...
run cmd and return True if it went ok, False if something went wrong. Suppress all output.
[ "run", "cmd", "and", "return", "True", "if", "it", "went", "ok", "False", "if", "something", "went", "wrong", "." ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/shell.py#L69-L91
hayd/ctox
ctox/shell.py
cprint
def cprint(message, status=None): """color printing based on status: None -> BRIGHT 'ok' -> GREEN 'err' -> RED 'warn' -> YELLOW """ # TODO use less obscure dict, probably "error", "warn", "success" as keys status = {'warn': Fore.YELLOW, 'err': Fore.RED, 'ok': Fore.GREEN, None: Style.BRIGHT}[status] print(status + message + Style.RESET_ALL)
python
def cprint(message, status=None): """color printing based on status: None -> BRIGHT 'ok' -> GREEN 'err' -> RED 'warn' -> YELLOW """ # TODO use less obscure dict, probably "error", "warn", "success" as keys status = {'warn': Fore.YELLOW, 'err': Fore.RED, 'ok': Fore.GREEN, None: Style.BRIGHT}[status] print(status + message + Style.RESET_ALL)
[ "def", "cprint", "(", "message", ",", "status", "=", "None", ")", ":", "# TODO use less obscure dict, probably \"error\", \"warn\", \"success\" as keys", "status", "=", "{", "'warn'", ":", "Fore", ".", "YELLOW", ",", "'err'", ":", "Fore", ".", "RED", ",", "'ok'", ...
color printing based on status: None -> BRIGHT 'ok' -> GREEN 'err' -> RED 'warn' -> YELLOW
[ "color", "printing", "based", "on", "status", ":" ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/shell.py#L111-L123
hayd/ctox
ctox/pkg.py
prev_deps
def prev_deps(env): """Naively gets the dependancies from the last time ctox was run.""" # TODO something more clever. if not os.path.isfile(env.envctoxfile): return [] with open(env.envctoxfile) as f: return f.read().split()
python
def prev_deps(env): """Naively gets the dependancies from the last time ctox was run.""" # TODO something more clever. if not os.path.isfile(env.envctoxfile): return [] with open(env.envctoxfile) as f: return f.read().split()
[ "def", "prev_deps", "(", "env", ")", ":", "# TODO something more clever.", "if", "not", "os", ".", "path", ".", "isfile", "(", "env", ".", "envctoxfile", ")", ":", "return", "[", "]", "with", "open", "(", "env", ".", "envctoxfile", ")", "as", "f", ":",...
Naively gets the dependancies from the last time ctox was run.
[ "Naively", "gets", "the", "dependancies", "from", "the", "last", "time", "ctox", "was", "run", "." ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/pkg.py#L79-L86
hayd/ctox
ctox/pkg.py
make_dist
def make_dist(toxinidir, toxdir, package): """zip up the package into the toxdir.""" dist = os.path.join(toxdir, "dist") # Suppress warnings. success = safe_shell_out(["python", "setup.py", "sdist", "--quiet", "--formats=zip", "--dist-dir", dist], cwd=toxinidir) if success: return os.path.join(dist, package + ".zip")
python
def make_dist(toxinidir, toxdir, package): """zip up the package into the toxdir.""" dist = os.path.join(toxdir, "dist") # Suppress warnings. success = safe_shell_out(["python", "setup.py", "sdist", "--quiet", "--formats=zip", "--dist-dir", dist], cwd=toxinidir) if success: return os.path.join(dist, package + ".zip")
[ "def", "make_dist", "(", "toxinidir", ",", "toxdir", ",", "package", ")", ":", "dist", "=", "os", ".", "path", ".", "join", "(", "toxdir", ",", "\"dist\"", ")", "# Suppress warnings.", "success", "=", "safe_shell_out", "(", "[", "\"python\"", ",", "\"setup...
zip up the package into the toxdir.
[ "zip", "up", "the", "package", "into", "the", "toxdir", "." ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/pkg.py#L89-L97
hayd/ctox
ctox/pkg.py
print_pretty_command
def print_pretty_command(env, command): """This is a hack for prettier printing. Rather than "{envpython} foo.py" we print "python foo.py". """ cmd = abbr_cmd = command[0] if cmd.startswith(env.envbindir): abbr_cmd = os.path.relpath(cmd, env.envbindir) if abbr_cmd == ".": # TODO are there more edge cases? abbr_cmd = cmd command[0] = abbr_cmd print('(%s)$ %s' % (env.name, ' '.join(['"%s"' % c if " " in c else c for c in command]))) command[0] = cmd return abbr_cmd, cmd, command
python
def print_pretty_command(env, command): """This is a hack for prettier printing. Rather than "{envpython} foo.py" we print "python foo.py". """ cmd = abbr_cmd = command[0] if cmd.startswith(env.envbindir): abbr_cmd = os.path.relpath(cmd, env.envbindir) if abbr_cmd == ".": # TODO are there more edge cases? abbr_cmd = cmd command[0] = abbr_cmd print('(%s)$ %s' % (env.name, ' '.join(['"%s"' % c if " " in c else c for c in command]))) command[0] = cmd return abbr_cmd, cmd, command
[ "def", "print_pretty_command", "(", "env", ",", "command", ")", ":", "cmd", "=", "abbr_cmd", "=", "command", "[", "0", "]", "if", "cmd", ".", "startswith", "(", "env", ".", "envbindir", ")", ":", "abbr_cmd", "=", "os", ".", "path", ".", "relpath", "(...
This is a hack for prettier printing. Rather than "{envpython} foo.py" we print "python foo.py".
[ "This", "is", "a", "hack", "for", "prettier", "printing", "." ]
train
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/pkg.py#L158-L175
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood.maximizeLikelihood
def maximizeLikelihood(self, optimize_brlen=False, approx_grad=False, logliktol=1.0e-2, nparamsretry=1, printfunc=None): """Maximize the log likelihood. Maximizes log likelihood with respect to model parameters and potentially branch lengths depending on `optimize_brlen`. If optimizing the branch lengths, iterates between optimizing the model parameters and branch lengths. Uses the L-BFGS-B method implemented in `scipy.optimize`. There is no return variable, but after call object attributes will correspond to maximimum likelihood values. Args: `optimize_brlen` (bool) Do we optimize branch lengths? `approx_grad` (bool) If `True`, then we numerically approximate the gradient rather than using the analytical values. `logliktol` (float) When using `optimize_brlen`, keep iterating between optimization of parameters and branch lengths until change in log likelihood is less than `logliktol`. `nparamsretry` (int >= 0) Number of times to retry parameter optimization from different initial values if it fails the first time. `printfunc` (`None` or a function) If not `None`, then we print using `printfunc` the detailed results of the optimization at each step. For instance, `printfunc` might be `sys.stderr.write` or `logger.info`. Returns: A string giving a summary of the maximization. """ # Some useful notes on optimization: # http://www.scipy-lectures.org/advanced/mathematical_optimization/ assert len(self.paramsarray) > 0, "No parameters to optimize" assert nparamsretry >= 0 assert logliktol > 0 def paramsfunc(x): """Negative log likelihood when `x` is params.""" self.paramsarray = x return -self.loglik def paramsdfunc(x): """Negative gradient log likelihood with respect to params.""" self.paramsarray = x return -self.dloglikarray def tfunc(x): """Negative log likelihood when `x` is branch lengths.""" self.t = x return -self.loglik def tdfunc(x): """Negative gradient loglik with respect to branch lengths.""" self.t = x return -self.dloglik_dt if approx_grad: paramsdfunc = False tdfunc = False self.dtcurrent = False self.dparamscurrent = False def _printResult(opttype, result, i, old, new): """Print summary of optimization result.""" if printfunc is not None: printfunc('Step {0}, optimized {1}.\n' 'Likelihood went from {2} to {3}.\n' 'Max magnitude in Jacobian is {4}.\n' 'Full optimization result:\n{5}\n'.format( i, opttype, old, new, scipy.absolute(result.jac).max(), result)) oldloglik = self.loglik converged = False firstbrlenpass = True options = {'ftol':1.0e-7} # optimization options summary = [] i = 1 while not converged: if (not self.dparamscurrent) and (not approx_grad): self.dtcurrent = False self.dparamscurrent = True nparamstry = 0 origparamsarray = self.paramsarray.copy() paramsconverged = False while not paramsconverged: result = scipy.optimize.minimize(paramsfunc, self.paramsarray, method='L-BFGS-B', jac=paramsdfunc, bounds=self.paramsarraybounds, options=options) _printResult('params', result, i, oldloglik, self.loglik) msg = ('Step {0}: optimized parameters, loglik went from ' '{1:.2f} to {2:.2f} ({3} iterations, {4} function ' 'evals)'.format(i, oldloglik, self.loglik, result.nit, result.nfev)) summary.append(msg) if result.success and (not (oldloglik - self.loglik > logliktol)): paramsconverged = True jacmax = scipy.absolute(result.jac).max() if (jacmax > 1000) and not (firstbrlenpass and optimize_brlen): warnings.warn("Optimizer reports convergence, " "but max element in Jacobian is {0}\n" "Summary of optimization:\n{1}".format( jacmax, summary)) else: if not result.success: resultmessage = result.message else: resultmessage = ('loglik increased in param optimization ' 'from {0} to {1}'.format(oldloglik, self.loglik)) nparamstry += 1 failmsg = ("Optimization failure {0}\n{1}\n{2}".format( nparamstry, resultmessage, '\n'.join(summary))) if nparamstry > nparamsretry: raise RuntimeError(failmsg) else: warnings.warn(failmsg + '\n\n' + "Re-trying with different initial params.") scipy.random.seed(nparamstry) # seed at geometric mean of original value, max # bound, min bound, and random number between max and min minarray = scipy.array([self.paramsarraybounds[j][0] for j in range(len(self.paramsarray))]) maxarray = scipy.array([self.paramsarraybounds[j][1] for j in range(len(self.paramsarray))]) randarray = scipy.random.uniform(minarray, maxarray) newarray = (minarray * maxarray * randarray * origparamsarray)**(1 / 4.) # geometric mean assert newarray.shape == self.paramsarray.shape assert (newarray > minarray).all() assert (newarray < maxarray).all() self.paramsarray = newarray i += 1 assert oldloglik - self.loglik <= logliktol if (self.loglik - oldloglik >= logliktol) or firstbrlenpass: firstbrlenpass = False oldloglik = self.loglik if optimize_brlen: if not approx_grad: self.dparamscurrent = False self.dtcurrent = True result = scipy.optimize.minimize(tfunc, self.t, method='L-BFGS-B', jac=tdfunc, options=options, bounds=[(ALMOST_ZERO, None)] * len(self.t)) _printResult('branches', result, i, oldloglik, self.loglik) summary.append('Step {0}: optimized branches, loglik ' 'went from {1:.2f} to {2:.2f} ({3} iterations, ' '{4} function evals)'.format(i, oldloglik, self.loglik, result.nit, result.nfev)) i += 1 assert result.success, ("Optimization failed\n{0}" "\n{1}\n{2}".format(result.message, self.t, '\n'.join(summary))) if oldloglik - self.loglik > logliktol: raise RuntimeError("loglik increased during t " "optimization: {0} to {1}".format( oldloglik, self.loglik)) elif self.loglik - oldloglik >= logliktol: oldloglik = self.loglik else: converged = True else: converged = True else: converged = True return '\n'.join(summary)
python
def maximizeLikelihood(self, optimize_brlen=False, approx_grad=False, logliktol=1.0e-2, nparamsretry=1, printfunc=None): """Maximize the log likelihood. Maximizes log likelihood with respect to model parameters and potentially branch lengths depending on `optimize_brlen`. If optimizing the branch lengths, iterates between optimizing the model parameters and branch lengths. Uses the L-BFGS-B method implemented in `scipy.optimize`. There is no return variable, but after call object attributes will correspond to maximimum likelihood values. Args: `optimize_brlen` (bool) Do we optimize branch lengths? `approx_grad` (bool) If `True`, then we numerically approximate the gradient rather than using the analytical values. `logliktol` (float) When using `optimize_brlen`, keep iterating between optimization of parameters and branch lengths until change in log likelihood is less than `logliktol`. `nparamsretry` (int >= 0) Number of times to retry parameter optimization from different initial values if it fails the first time. `printfunc` (`None` or a function) If not `None`, then we print using `printfunc` the detailed results of the optimization at each step. For instance, `printfunc` might be `sys.stderr.write` or `logger.info`. Returns: A string giving a summary of the maximization. """ # Some useful notes on optimization: # http://www.scipy-lectures.org/advanced/mathematical_optimization/ assert len(self.paramsarray) > 0, "No parameters to optimize" assert nparamsretry >= 0 assert logliktol > 0 def paramsfunc(x): """Negative log likelihood when `x` is params.""" self.paramsarray = x return -self.loglik def paramsdfunc(x): """Negative gradient log likelihood with respect to params.""" self.paramsarray = x return -self.dloglikarray def tfunc(x): """Negative log likelihood when `x` is branch lengths.""" self.t = x return -self.loglik def tdfunc(x): """Negative gradient loglik with respect to branch lengths.""" self.t = x return -self.dloglik_dt if approx_grad: paramsdfunc = False tdfunc = False self.dtcurrent = False self.dparamscurrent = False def _printResult(opttype, result, i, old, new): """Print summary of optimization result.""" if printfunc is not None: printfunc('Step {0}, optimized {1}.\n' 'Likelihood went from {2} to {3}.\n' 'Max magnitude in Jacobian is {4}.\n' 'Full optimization result:\n{5}\n'.format( i, opttype, old, new, scipy.absolute(result.jac).max(), result)) oldloglik = self.loglik converged = False firstbrlenpass = True options = {'ftol':1.0e-7} # optimization options summary = [] i = 1 while not converged: if (not self.dparamscurrent) and (not approx_grad): self.dtcurrent = False self.dparamscurrent = True nparamstry = 0 origparamsarray = self.paramsarray.copy() paramsconverged = False while not paramsconverged: result = scipy.optimize.minimize(paramsfunc, self.paramsarray, method='L-BFGS-B', jac=paramsdfunc, bounds=self.paramsarraybounds, options=options) _printResult('params', result, i, oldloglik, self.loglik) msg = ('Step {0}: optimized parameters, loglik went from ' '{1:.2f} to {2:.2f} ({3} iterations, {4} function ' 'evals)'.format(i, oldloglik, self.loglik, result.nit, result.nfev)) summary.append(msg) if result.success and (not (oldloglik - self.loglik > logliktol)): paramsconverged = True jacmax = scipy.absolute(result.jac).max() if (jacmax > 1000) and not (firstbrlenpass and optimize_brlen): warnings.warn("Optimizer reports convergence, " "but max element in Jacobian is {0}\n" "Summary of optimization:\n{1}".format( jacmax, summary)) else: if not result.success: resultmessage = result.message else: resultmessage = ('loglik increased in param optimization ' 'from {0} to {1}'.format(oldloglik, self.loglik)) nparamstry += 1 failmsg = ("Optimization failure {0}\n{1}\n{2}".format( nparamstry, resultmessage, '\n'.join(summary))) if nparamstry > nparamsretry: raise RuntimeError(failmsg) else: warnings.warn(failmsg + '\n\n' + "Re-trying with different initial params.") scipy.random.seed(nparamstry) # seed at geometric mean of original value, max # bound, min bound, and random number between max and min minarray = scipy.array([self.paramsarraybounds[j][0] for j in range(len(self.paramsarray))]) maxarray = scipy.array([self.paramsarraybounds[j][1] for j in range(len(self.paramsarray))]) randarray = scipy.random.uniform(minarray, maxarray) newarray = (minarray * maxarray * randarray * origparamsarray)**(1 / 4.) # geometric mean assert newarray.shape == self.paramsarray.shape assert (newarray > minarray).all() assert (newarray < maxarray).all() self.paramsarray = newarray i += 1 assert oldloglik - self.loglik <= logliktol if (self.loglik - oldloglik >= logliktol) or firstbrlenpass: firstbrlenpass = False oldloglik = self.loglik if optimize_brlen: if not approx_grad: self.dparamscurrent = False self.dtcurrent = True result = scipy.optimize.minimize(tfunc, self.t, method='L-BFGS-B', jac=tdfunc, options=options, bounds=[(ALMOST_ZERO, None)] * len(self.t)) _printResult('branches', result, i, oldloglik, self.loglik) summary.append('Step {0}: optimized branches, loglik ' 'went from {1:.2f} to {2:.2f} ({3} iterations, ' '{4} function evals)'.format(i, oldloglik, self.loglik, result.nit, result.nfev)) i += 1 assert result.success, ("Optimization failed\n{0}" "\n{1}\n{2}".format(result.message, self.t, '\n'.join(summary))) if oldloglik - self.loglik > logliktol: raise RuntimeError("loglik increased during t " "optimization: {0} to {1}".format( oldloglik, self.loglik)) elif self.loglik - oldloglik >= logliktol: oldloglik = self.loglik else: converged = True else: converged = True else: converged = True return '\n'.join(summary)
[ "def", "maximizeLikelihood", "(", "self", ",", "optimize_brlen", "=", "False", ",", "approx_grad", "=", "False", ",", "logliktol", "=", "1.0e-2", ",", "nparamsretry", "=", "1", ",", "printfunc", "=", "None", ")", ":", "# Some useful notes on optimization:", "# h...
Maximize the log likelihood. Maximizes log likelihood with respect to model parameters and potentially branch lengths depending on `optimize_brlen`. If optimizing the branch lengths, iterates between optimizing the model parameters and branch lengths. Uses the L-BFGS-B method implemented in `scipy.optimize`. There is no return variable, but after call object attributes will correspond to maximimum likelihood values. Args: `optimize_brlen` (bool) Do we optimize branch lengths? `approx_grad` (bool) If `True`, then we numerically approximate the gradient rather than using the analytical values. `logliktol` (float) When using `optimize_brlen`, keep iterating between optimization of parameters and branch lengths until change in log likelihood is less than `logliktol`. `nparamsretry` (int >= 0) Number of times to retry parameter optimization from different initial values if it fails the first time. `printfunc` (`None` or a function) If not `None`, then we print using `printfunc` the detailed results of the optimization at each step. For instance, `printfunc` might be `sys.stderr.write` or `logger.info`. Returns: A string giving a summary of the maximization.
[ "Maximize", "the", "log", "likelihood", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L327-L500
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood.tree
def tree(self): """Tree with branch lengths in codon substitutions per site. The tree is a `Bio.Phylo.BaseTree.Tree` object. This is the current tree after whatever optimizations have been performed so far. """ bs = self.model.branchScale for node in self._tree.find_clades(): if node != self._tree.root: node.branch_length = self.t[self.name_to_nodeindex[node]] * bs return self._tree
python
def tree(self): """Tree with branch lengths in codon substitutions per site. The tree is a `Bio.Phylo.BaseTree.Tree` object. This is the current tree after whatever optimizations have been performed so far. """ bs = self.model.branchScale for node in self._tree.find_clades(): if node != self._tree.root: node.branch_length = self.t[self.name_to_nodeindex[node]] * bs return self._tree
[ "def", "tree", "(", "self", ")", ":", "bs", "=", "self", ".", "model", ".", "branchScale", "for", "node", "in", "self", ".", "_tree", ".", "find_clades", "(", ")", ":", "if", "node", "!=", "self", ".", "_tree", ".", "root", ":", "node", ".", "bra...
Tree with branch lengths in codon substitutions per site. The tree is a `Bio.Phylo.BaseTree.Tree` object. This is the current tree after whatever optimizations have been performed so far.
[ "Tree", "with", "branch", "lengths", "in", "codon", "substitutions", "per", "site", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L503-L515
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood.paramsarraybounds
def paramsarraybounds(self): """Bounds for parameters in `paramsarray`.""" bounds = [] for (i, param) in self._index_to_param.items(): if isinstance(param, str): bounds.append(self.model.PARAMLIMITS[param]) elif isinstance(param, tuple): bounds.append(self.model.PARAMLIMITS[param[0]]) else: raise ValueError("Invalid param type") bounds = [(tup[0] + ALMOST_ZERO, tup[1] - ALMOST_ZERO) for tup in bounds] assert len(bounds) == len(self._index_to_param) return tuple(bounds)
python
def paramsarraybounds(self): """Bounds for parameters in `paramsarray`.""" bounds = [] for (i, param) in self._index_to_param.items(): if isinstance(param, str): bounds.append(self.model.PARAMLIMITS[param]) elif isinstance(param, tuple): bounds.append(self.model.PARAMLIMITS[param[0]]) else: raise ValueError("Invalid param type") bounds = [(tup[0] + ALMOST_ZERO, tup[1] - ALMOST_ZERO) for tup in bounds] assert len(bounds) == len(self._index_to_param) return tuple(bounds)
[ "def", "paramsarraybounds", "(", "self", ")", ":", "bounds", "=", "[", "]", "for", "(", "i", ",", "param", ")", "in", "self", ".", "_index_to_param", ".", "items", "(", ")", ":", "if", "isinstance", "(", "param", ",", "str", ")", ":", "bounds", "."...
Bounds for parameters in `paramsarray`.
[ "Bounds", "for", "parameters", "in", "paramsarray", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L518-L530
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood.paramsarray
def paramsarray(self): """All free model parameters as 1-dimensional `numpy.ndarray`. You are allowed to update model parameters by direct assignment of this property.""" # Return copy of `_paramsarray` because setter checks if changed if self._paramsarray is not None: return self._paramsarray.copy() nparams = len(self._index_to_param) self._paramsarray = scipy.ndarray(shape=(nparams,), dtype='float') for (i, param) in self._index_to_param.items(): if isinstance(param, str): self._paramsarray[i] = getattr(self.model, param) elif isinstance(param, tuple): self._paramsarray[i] = getattr(self.model, param[0])[param[1]] else: raise ValueError("Invalid param type") return self._paramsarray.copy()
python
def paramsarray(self): """All free model parameters as 1-dimensional `numpy.ndarray`. You are allowed to update model parameters by direct assignment of this property.""" # Return copy of `_paramsarray` because setter checks if changed if self._paramsarray is not None: return self._paramsarray.copy() nparams = len(self._index_to_param) self._paramsarray = scipy.ndarray(shape=(nparams,), dtype='float') for (i, param) in self._index_to_param.items(): if isinstance(param, str): self._paramsarray[i] = getattr(self.model, param) elif isinstance(param, tuple): self._paramsarray[i] = getattr(self.model, param[0])[param[1]] else: raise ValueError("Invalid param type") return self._paramsarray.copy()
[ "def", "paramsarray", "(", "self", ")", ":", "# Return copy of `_paramsarray` because setter checks if changed", "if", "self", ".", "_paramsarray", "is", "not", "None", ":", "return", "self", ".", "_paramsarray", ".", "copy", "(", ")", "nparams", "=", "len", "(", ...
All free model parameters as 1-dimensional `numpy.ndarray`. You are allowed to update model parameters by direct assignment of this property.
[ "All", "free", "model", "parameters", "as", "1", "-", "dimensional", "numpy", ".", "ndarray", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L533-L550
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood.paramsarray
def paramsarray(self, value): """Set new `paramsarray` and update via `updateParams`.""" nparams = len(self._index_to_param) assert (isinstance(value, scipy.ndarray) and value.ndim == 1), ( "paramsarray must be 1-dim ndarray") assert len(value) == nparams, ("Assigning paramsarray to ndarray " "of the wrong length.") if (self._paramsarray is not None) and all(value == self._paramsarray): return # do not need to do anything if value has not changed # build `newvalues` to pass to `updateParams` newvalues = {} vectorized_params = {} for (i, param) in self._index_to_param.items(): if isinstance(param, str): newvalues[param] = float(value[i]) elif isinstance(param, tuple): (iparam, iparamindex) = param if iparam in vectorized_params: assert iparamindex not in vectorized_params[iparam] vectorized_params[iparam][iparamindex] = float(value[i]) else: vectorized_params[iparam] = {iparamindex:float(value[i])} else: raise ValueError("Invalid param type") for (param, paramd) in vectorized_params.items(): assert set(paramd.keys()) == set(range(len(paramd))) newvalues[param] = scipy.array([paramd[i] for i in range(len(paramd))], dtype='float') self.updateParams(newvalues) self._paramsarray = self.paramsarray
python
def paramsarray(self, value): """Set new `paramsarray` and update via `updateParams`.""" nparams = len(self._index_to_param) assert (isinstance(value, scipy.ndarray) and value.ndim == 1), ( "paramsarray must be 1-dim ndarray") assert len(value) == nparams, ("Assigning paramsarray to ndarray " "of the wrong length.") if (self._paramsarray is not None) and all(value == self._paramsarray): return # do not need to do anything if value has not changed # build `newvalues` to pass to `updateParams` newvalues = {} vectorized_params = {} for (i, param) in self._index_to_param.items(): if isinstance(param, str): newvalues[param] = float(value[i]) elif isinstance(param, tuple): (iparam, iparamindex) = param if iparam in vectorized_params: assert iparamindex not in vectorized_params[iparam] vectorized_params[iparam][iparamindex] = float(value[i]) else: vectorized_params[iparam] = {iparamindex:float(value[i])} else: raise ValueError("Invalid param type") for (param, paramd) in vectorized_params.items(): assert set(paramd.keys()) == set(range(len(paramd))) newvalues[param] = scipy.array([paramd[i] for i in range(len(paramd))], dtype='float') self.updateParams(newvalues) self._paramsarray = self.paramsarray
[ "def", "paramsarray", "(", "self", ",", "value", ")", ":", "nparams", "=", "len", "(", "self", ".", "_index_to_param", ")", "assert", "(", "isinstance", "(", "value", ",", "scipy", ".", "ndarray", ")", "and", "value", ".", "ndim", "==", "1", ")", ","...
Set new `paramsarray` and update via `updateParams`.
[ "Set", "new", "paramsarray", "and", "update", "via", "updateParams", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L553-L582
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood.dtcurrent
def dtcurrent(self, value): """Set value of `dtcurrent`, update derivatives if needed.""" assert isinstance(value, bool) if value and self.dparamscurrent: raise RuntimeError("Can't set both dparamscurrent and dtcurrent True") if value != self.dtcurrent: self._dtcurrent = value self._updateInternals()
python
def dtcurrent(self, value): """Set value of `dtcurrent`, update derivatives if needed.""" assert isinstance(value, bool) if value and self.dparamscurrent: raise RuntimeError("Can't set both dparamscurrent and dtcurrent True") if value != self.dtcurrent: self._dtcurrent = value self._updateInternals()
[ "def", "dtcurrent", "(", "self", ",", "value", ")", ":", "assert", "isinstance", "(", "value", ",", "bool", ")", "if", "value", "and", "self", ".", "dparamscurrent", ":", "raise", "RuntimeError", "(", "\"Can't set both dparamscurrent and dtcurrent True\"", ")", ...
Set value of `dtcurrent`, update derivatives if needed.
[ "Set", "value", "of", "dtcurrent", "update", "derivatives", "if", "needed", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L605-L612
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood.t
def t(self, value): """Set new branch lengths, update likelihood and derivatives.""" assert (isinstance(value, scipy.ndarray) and (value.dtype == 'float') and (value.shape == self.t.shape)) if (self._t != value).any(): self._t = value.copy() self._updateInternals()
python
def t(self, value): """Set new branch lengths, update likelihood and derivatives.""" assert (isinstance(value, scipy.ndarray) and (value.dtype == 'float') and (value.shape == self.t.shape)) if (self._t != value).any(): self._t = value.copy() self._updateInternals()
[ "def", "t", "(", "self", ",", "value", ")", ":", "assert", "(", "isinstance", "(", "value", ",", "scipy", ".", "ndarray", ")", "and", "(", "value", ".", "dtype", "==", "'float'", ")", "and", "(", "value", ".", "shape", "==", "self", ".", "t", "."...
Set new branch lengths, update likelihood and derivatives.
[ "Set", "new", "branch", "lengths", "update", "likelihood", "and", "derivatives", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L620-L626
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood.dloglikarray
def dloglikarray(self): """Derivative of `loglik` with respect to `paramsarray`.""" assert self.dparamscurrent, "dloglikarray requires paramscurrent == True" nparams = len(self._index_to_param) dloglikarray = scipy.ndarray(shape=(nparams,), dtype='float') for (i, param) in self._index_to_param.items(): if isinstance(param, str): dloglikarray[i] = self.dloglik[param] elif isinstance(param, tuple): dloglikarray[i] = self.dloglik[param[0]][param[1]] return dloglikarray
python
def dloglikarray(self): """Derivative of `loglik` with respect to `paramsarray`.""" assert self.dparamscurrent, "dloglikarray requires paramscurrent == True" nparams = len(self._index_to_param) dloglikarray = scipy.ndarray(shape=(nparams,), dtype='float') for (i, param) in self._index_to_param.items(): if isinstance(param, str): dloglikarray[i] = self.dloglik[param] elif isinstance(param, tuple): dloglikarray[i] = self.dloglik[param[0]][param[1]] return dloglikarray
[ "def", "dloglikarray", "(", "self", ")", ":", "assert", "self", ".", "dparamscurrent", ",", "\"dloglikarray requires paramscurrent == True\"", "nparams", "=", "len", "(", "self", ".", "_index_to_param", ")", "dloglikarray", "=", "scipy", ".", "ndarray", "(", "shap...
Derivative of `loglik` with respect to `paramsarray`.
[ "Derivative", "of", "loglik", "with", "respect", "to", "paramsarray", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L641-L651
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood.updateParams
def updateParams(self, newvalues): """Update model parameters and re-compute likelihoods. This method is the **only** acceptable way to update model parameters. The likelihood is re-computed as needed by this method. Args: `newvalues` (dict) A dictionary keyed by param name and with value as new value to set. Each parameter name must either be a valid model parameter (in `model.freeparams`). """ for (param, value) in newvalues.items(): if param not in self.model.freeparams: raise RuntimeError("Can't handle param: {0}".format( param)) if newvalues: self.model.updateParams(newvalues) self._updateInternals() self._paramsarray = None
python
def updateParams(self, newvalues): """Update model parameters and re-compute likelihoods. This method is the **only** acceptable way to update model parameters. The likelihood is re-computed as needed by this method. Args: `newvalues` (dict) A dictionary keyed by param name and with value as new value to set. Each parameter name must either be a valid model parameter (in `model.freeparams`). """ for (param, value) in newvalues.items(): if param not in self.model.freeparams: raise RuntimeError("Can't handle param: {0}".format( param)) if newvalues: self.model.updateParams(newvalues) self._updateInternals() self._paramsarray = None
[ "def", "updateParams", "(", "self", ",", "newvalues", ")", ":", "for", "(", "param", ",", "value", ")", "in", "newvalues", ".", "items", "(", ")", ":", "if", "param", "not", "in", "self", ".", "model", ".", "freeparams", ":", "raise", "RuntimeError", ...
Update model parameters and re-compute likelihoods. This method is the **only** acceptable way to update model parameters. The likelihood is re-computed as needed by this method. Args: `newvalues` (dict) A dictionary keyed by param name and with value as new value to set. Each parameter name must either be a valid model parameter (in `model.freeparams`).
[ "Update", "model", "parameters", "and", "re", "-", "compute", "likelihoods", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L653-L673
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood._updateInternals
def _updateInternals(self): """Update internal attributes related to likelihood. Should be called any time branch lengths or model parameters are changed. """ rootnode = self.nnodes - 1 if self._distributionmodel: catweights = self.model.catweights else: catweights = scipy.ones(1, dtype='float') # When there are multiple categories, it is acceptable # for some (but not all) of them to have underflow at # any given site. Note that we still include a check for # Underflow by ensuring that none of the site likelihoods is # zero. undererrstate = 'ignore' if len(catweights) > 1 else 'raise' with scipy.errstate(over='raise', under=undererrstate, divide='raise', invalid='raise'): self.underflowlogscale.fill(0.0) self._computePartialLikelihoods() sitelik = scipy.zeros(self.nsites, dtype='float') assert (self.L[rootnode] >= 0).all(), str(self.L[rootnode]) for k in self._catindices: sitelik += scipy.sum(self._stationarystate(k) * self.L[rootnode][k], axis=1) * catweights[k] assert (sitelik > 0).all(), "Underflow:\n{0}\n{1}".format( sitelik, self.underflowlogscale) self.siteloglik = scipy.log(sitelik) + self.underflowlogscale self.loglik = scipy.sum(self.siteloglik) + self.model.logprior if self.dparamscurrent: self._dloglik = {} for param in self.model.freeparams: if self._distributionmodel and (param in self.model.distributionparams): name = self.model.distributedparam weighted_dk = (self.model.d_distributionparams[param] * catweights) else: name = param weighted_dk = catweights dsiteloglik = 0 for k in self._catindices: dsiteloglik += (scipy.sum( self._dstationarystate(k, name) * self.L[rootnode][k] + self.dL[name][rootnode][k] * self._stationarystate(k), axis=-1) * weighted_dk[k]) dsiteloglik /= sitelik self._dloglik[param] = (scipy.sum(dsiteloglik, axis=-1) + self.model.dlogprior(param)) if self.dtcurrent: self._dloglik_dt = 0 dLnroot_dt = scipy.array([self.dL_dt[n2][rootnode] for n2 in sorted(self.dL_dt.keys())]) for k in self._catindices: if isinstance(k, int): dLnrootk_dt = dLnroot_dt.swapaxes(0, 1)[k] else: assert k == slice(None) dLnrootk_dt = dLnroot_dt self._dloglik_dt += catweights[k] * scipy.sum( self._stationarystate(k) * dLnrootk_dt, axis=-1) self._dloglik_dt /= sitelik self._dloglik_dt = scipy.sum(self._dloglik_dt, axis=-1) assert self._dloglik_dt.shape == self.t.shape
python
def _updateInternals(self): """Update internal attributes related to likelihood. Should be called any time branch lengths or model parameters are changed. """ rootnode = self.nnodes - 1 if self._distributionmodel: catweights = self.model.catweights else: catweights = scipy.ones(1, dtype='float') # When there are multiple categories, it is acceptable # for some (but not all) of them to have underflow at # any given site. Note that we still include a check for # Underflow by ensuring that none of the site likelihoods is # zero. undererrstate = 'ignore' if len(catweights) > 1 else 'raise' with scipy.errstate(over='raise', under=undererrstate, divide='raise', invalid='raise'): self.underflowlogscale.fill(0.0) self._computePartialLikelihoods() sitelik = scipy.zeros(self.nsites, dtype='float') assert (self.L[rootnode] >= 0).all(), str(self.L[rootnode]) for k in self._catindices: sitelik += scipy.sum(self._stationarystate(k) * self.L[rootnode][k], axis=1) * catweights[k] assert (sitelik > 0).all(), "Underflow:\n{0}\n{1}".format( sitelik, self.underflowlogscale) self.siteloglik = scipy.log(sitelik) + self.underflowlogscale self.loglik = scipy.sum(self.siteloglik) + self.model.logprior if self.dparamscurrent: self._dloglik = {} for param in self.model.freeparams: if self._distributionmodel and (param in self.model.distributionparams): name = self.model.distributedparam weighted_dk = (self.model.d_distributionparams[param] * catweights) else: name = param weighted_dk = catweights dsiteloglik = 0 for k in self._catindices: dsiteloglik += (scipy.sum( self._dstationarystate(k, name) * self.L[rootnode][k] + self.dL[name][rootnode][k] * self._stationarystate(k), axis=-1) * weighted_dk[k]) dsiteloglik /= sitelik self._dloglik[param] = (scipy.sum(dsiteloglik, axis=-1) + self.model.dlogprior(param)) if self.dtcurrent: self._dloglik_dt = 0 dLnroot_dt = scipy.array([self.dL_dt[n2][rootnode] for n2 in sorted(self.dL_dt.keys())]) for k in self._catindices: if isinstance(k, int): dLnrootk_dt = dLnroot_dt.swapaxes(0, 1)[k] else: assert k == slice(None) dLnrootk_dt = dLnroot_dt self._dloglik_dt += catweights[k] * scipy.sum( self._stationarystate(k) * dLnrootk_dt, axis=-1) self._dloglik_dt /= sitelik self._dloglik_dt = scipy.sum(self._dloglik_dt, axis=-1) assert self._dloglik_dt.shape == self.t.shape
[ "def", "_updateInternals", "(", "self", ")", ":", "rootnode", "=", "self", ".", "nnodes", "-", "1", "if", "self", ".", "_distributionmodel", ":", "catweights", "=", "self", ".", "model", ".", "catweights", "else", ":", "catweights", "=", "scipy", ".", "o...
Update internal attributes related to likelihood. Should be called any time branch lengths or model parameters are changed.
[ "Update", "internal", "attributes", "related", "to", "likelihood", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L675-L741
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood._M
def _M(self, k, t, tips=None, gaps=None): """Returns matrix exponential `M`.""" if self._distributionmodel: return self.model.M(k, t, tips, gaps) else: return self.model.M(t, tips, gaps)
python
def _M(self, k, t, tips=None, gaps=None): """Returns matrix exponential `M`.""" if self._distributionmodel: return self.model.M(k, t, tips, gaps) else: return self.model.M(t, tips, gaps)
[ "def", "_M", "(", "self", ",", "k", ",", "t", ",", "tips", "=", "None", ",", "gaps", "=", "None", ")", ":", "if", "self", ".", "_distributionmodel", ":", "return", "self", ".", "model", ".", "M", "(", "k", ",", "t", ",", "tips", ",", "gaps", ...
Returns matrix exponential `M`.
[ "Returns", "matrix", "exponential", "M", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L743-L748
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood._dM
def _dM(self, k, t, param, M, tips=None, gaps=None): """Returns derivative of matrix exponential.""" if self._distributionmodel: return self.model.dM(k, t, param, M, tips, gaps) else: return self.model.dM(t, param, M, tips, gaps)
python
def _dM(self, k, t, param, M, tips=None, gaps=None): """Returns derivative of matrix exponential.""" if self._distributionmodel: return self.model.dM(k, t, param, M, tips, gaps) else: return self.model.dM(t, param, M, tips, gaps)
[ "def", "_dM", "(", "self", ",", "k", ",", "t", ",", "param", ",", "M", ",", "tips", "=", "None", ",", "gaps", "=", "None", ")", ":", "if", "self", ".", "_distributionmodel", ":", "return", "self", ".", "model", ".", "dM", "(", "k", ",", "t", ...
Returns derivative of matrix exponential.
[ "Returns", "derivative", "of", "matrix", "exponential", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L750-L755
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood._stationarystate
def _stationarystate(self, k): """Returns the stationarystate .""" if self._distributionmodel: return self.model.stationarystate(k) else: return self.model.stationarystate
python
def _stationarystate(self, k): """Returns the stationarystate .""" if self._distributionmodel: return self.model.stationarystate(k) else: return self.model.stationarystate
[ "def", "_stationarystate", "(", "self", ",", "k", ")", ":", "if", "self", ".", "_distributionmodel", ":", "return", "self", ".", "model", ".", "stationarystate", "(", "k", ")", "else", ":", "return", "self", ".", "model", ".", "stationarystate" ]
Returns the stationarystate .
[ "Returns", "the", "stationarystate", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L757-L762
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood._dstationarystate
def _dstationarystate(self, k, param): """Returns the dstationarystate .""" if self._distributionmodel: return self.model.dstationarystate(k, param) else: return self.model.dstationarystate(param)
python
def _dstationarystate(self, k, param): """Returns the dstationarystate .""" if self._distributionmodel: return self.model.dstationarystate(k, param) else: return self.model.dstationarystate(param)
[ "def", "_dstationarystate", "(", "self", ",", "k", ",", "param", ")", ":", "if", "self", ".", "_distributionmodel", ":", "return", "self", ".", "model", ".", "dstationarystate", "(", "k", ",", "param", ")", "else", ":", "return", "self", ".", "model", ...
Returns the dstationarystate .
[ "Returns", "the", "dstationarystate", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L764-L769
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood._paramlist_PartialLikelihoods
def _paramlist_PartialLikelihoods(self): """List of parameters looped over in `_computePartialLikelihoods`.""" if self._distributionmodel: return [param for param in self.model.freeparams + [self.model.distributedparam] if param not in self.model.distributionparams] else: return self.model.freeparams
python
def _paramlist_PartialLikelihoods(self): """List of parameters looped over in `_computePartialLikelihoods`.""" if self._distributionmodel: return [param for param in self.model.freeparams + [self.model.distributedparam] if param not in self.model.distributionparams] else: return self.model.freeparams
[ "def", "_paramlist_PartialLikelihoods", "(", "self", ")", ":", "if", "self", ".", "_distributionmodel", ":", "return", "[", "param", "for", "param", "in", "self", ".", "model", ".", "freeparams", "+", "[", "self", ".", "model", ".", "distributedparam", "]", ...
List of parameters looped over in `_computePartialLikelihoods`.
[ "List", "of", "parameters", "looped", "over", "in", "_computePartialLikelihoods", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L780-L787
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood._computePartialLikelihoods
def _computePartialLikelihoods(self): """Update `L`, `dL`, `dL_dt`.""" for n in range(self.ntips, self.nnodes): ni = n - self.ntips # internal node number nright = self.rdescend[ni] nleft = self.ldescend[ni] if nright < self.ntips: istipr = True else: istipr = False if nleft < self.ntips: istipl = True else: istipl = False tright = self.t[nright] tleft = self.t[nleft] self.L[n] = scipy.ndarray(self._Lshape, dtype='float') if self.dparamscurrent: for param in self._paramlist_PartialLikelihoods: self.dL[param][n] = scipy.ndarray(self._dLshape[param], dtype='float') if self.dtcurrent: for n2 in self.dL_dt.keys(): self.dL_dt[n2][n] = scipy.zeros(self._Lshape, dtype='float') for k in self._catindices: if istipr: Mright = MLright = self._M(k, tright, self.tips[nright], self.gaps[nright]) else: Mright = self._M(k, tright) MLright = broadcastMatrixVectorMultiply(Mright, self.L[nright][k]) if istipl: Mleft = MLleft = self._M(k, tleft, self.tips[nleft], self.gaps[nleft]) else: Mleft = self._M(k, tleft) MLleft = broadcastMatrixVectorMultiply(Mleft, self.L[nleft][k]) self.L[n][k] = MLright * MLleft if self.dtcurrent: for (tx, Mx, nx, MLxother, istipx) in [ (tright, Mright, nright, MLleft, istipr), (tleft, Mleft, nleft, MLright, istipl)]: if istipx: tipsx = self.tips[nx] gapsx = self.gaps[nx] else: tipsx = gapsx = None dM_dt = self._dM(k, tx, 't', Mx, tipsx, gapsx) if istipx: LdM_dt = dM_dt else: LdM_dt = broadcastMatrixVectorMultiply( dM_dt, self.L[nx][k]) self.dL_dt[nx][n][k] = LdM_dt * MLxother for ndx in self.descendants[nx]: self.dL_dt[ndx][n][k] = broadcastMatrixVectorMultiply( Mx, self.dL_dt[ndx][nx][k]) * MLxother if self.dparamscurrent: for param in self._paramlist_PartialLikelihoods: if istipr: dMright = self._dM(k, tright, param, Mright, self.tips[nright], self.gaps[nright]) else: dMright = self._dM(k, tright, param, Mright) if istipl: dMleft = self._dM(k, tleft, param, Mleft, self.tips[nleft], self.gaps[nleft]) else: dMleft = self._dM(k, tleft, param, Mleft) for j in self._sub_index_param(param): if istipr: dMLright = dMright[j] MdLright = 0 else: dMLright = broadcastMatrixVectorMultiply( dMright[j], self.L[nright][k]) MdLright = broadcastMatrixVectorMultiply( Mright, self.dL[param][nright][k][j]) if istipl: dMLleft = dMleft[j] MdLleft = 0 else: dMLleft = broadcastMatrixVectorMultiply( dMleft[j], self.L[nleft][k]) MdLleft = broadcastMatrixVectorMultiply( Mleft, self.dL[param][nleft][k][j]) self.dL[param][n][k][j] = ((dMLright + MdLright) * MLleft + MLright * (dMLleft + MdLleft)) if ni > 0 and ni % self.underflowfreq == 0: # rescale by same amount for each category k scale = scipy.amax(scipy.array([scipy.amax(self.L[n][k], axis=1) for k in self._catindices]), axis=0) assert scale.shape == (self.nsites,) self.underflowlogscale += scipy.log(scale) for k in self._catindices: self.L[n][k] /= scale[:, scipy.newaxis] if self.dtcurrent: for n2 in self.dL_dt.keys(): self.dL_dt[n2][n][k] /= scale[:, scipy.newaxis] if self.dparamscurrent: for param in self._paramlist_PartialLikelihoods: for j in self._sub_index_param(param): self.dL[param][n][k][j] /= scale[:, scipy.newaxis] # free unneeded memory by deleting already used values for ntodel in [nright, nleft]: if ntodel in self.L: del self.L[ntodel] if self.dparamscurrent: for param in self._paramlist_PartialLikelihoods: if ntodel in self.dL[param]: del self.dL[param][ntodel] if self.dtcurrent: for n2 in self.dL_dt.keys(): if ntodel in self.dL_dt[n2]: del self.dL_dt[n2][ntodel]
python
def _computePartialLikelihoods(self): """Update `L`, `dL`, `dL_dt`.""" for n in range(self.ntips, self.nnodes): ni = n - self.ntips # internal node number nright = self.rdescend[ni] nleft = self.ldescend[ni] if nright < self.ntips: istipr = True else: istipr = False if nleft < self.ntips: istipl = True else: istipl = False tright = self.t[nright] tleft = self.t[nleft] self.L[n] = scipy.ndarray(self._Lshape, dtype='float') if self.dparamscurrent: for param in self._paramlist_PartialLikelihoods: self.dL[param][n] = scipy.ndarray(self._dLshape[param], dtype='float') if self.dtcurrent: for n2 in self.dL_dt.keys(): self.dL_dt[n2][n] = scipy.zeros(self._Lshape, dtype='float') for k in self._catindices: if istipr: Mright = MLright = self._M(k, tright, self.tips[nright], self.gaps[nright]) else: Mright = self._M(k, tright) MLright = broadcastMatrixVectorMultiply(Mright, self.L[nright][k]) if istipl: Mleft = MLleft = self._M(k, tleft, self.tips[nleft], self.gaps[nleft]) else: Mleft = self._M(k, tleft) MLleft = broadcastMatrixVectorMultiply(Mleft, self.L[nleft][k]) self.L[n][k] = MLright * MLleft if self.dtcurrent: for (tx, Mx, nx, MLxother, istipx) in [ (tright, Mright, nright, MLleft, istipr), (tleft, Mleft, nleft, MLright, istipl)]: if istipx: tipsx = self.tips[nx] gapsx = self.gaps[nx] else: tipsx = gapsx = None dM_dt = self._dM(k, tx, 't', Mx, tipsx, gapsx) if istipx: LdM_dt = dM_dt else: LdM_dt = broadcastMatrixVectorMultiply( dM_dt, self.L[nx][k]) self.dL_dt[nx][n][k] = LdM_dt * MLxother for ndx in self.descendants[nx]: self.dL_dt[ndx][n][k] = broadcastMatrixVectorMultiply( Mx, self.dL_dt[ndx][nx][k]) * MLxother if self.dparamscurrent: for param in self._paramlist_PartialLikelihoods: if istipr: dMright = self._dM(k, tright, param, Mright, self.tips[nright], self.gaps[nright]) else: dMright = self._dM(k, tright, param, Mright) if istipl: dMleft = self._dM(k, tleft, param, Mleft, self.tips[nleft], self.gaps[nleft]) else: dMleft = self._dM(k, tleft, param, Mleft) for j in self._sub_index_param(param): if istipr: dMLright = dMright[j] MdLright = 0 else: dMLright = broadcastMatrixVectorMultiply( dMright[j], self.L[nright][k]) MdLright = broadcastMatrixVectorMultiply( Mright, self.dL[param][nright][k][j]) if istipl: dMLleft = dMleft[j] MdLleft = 0 else: dMLleft = broadcastMatrixVectorMultiply( dMleft[j], self.L[nleft][k]) MdLleft = broadcastMatrixVectorMultiply( Mleft, self.dL[param][nleft][k][j]) self.dL[param][n][k][j] = ((dMLright + MdLright) * MLleft + MLright * (dMLleft + MdLleft)) if ni > 0 and ni % self.underflowfreq == 0: # rescale by same amount for each category k scale = scipy.amax(scipy.array([scipy.amax(self.L[n][k], axis=1) for k in self._catindices]), axis=0) assert scale.shape == (self.nsites,) self.underflowlogscale += scipy.log(scale) for k in self._catindices: self.L[n][k] /= scale[:, scipy.newaxis] if self.dtcurrent: for n2 in self.dL_dt.keys(): self.dL_dt[n2][n][k] /= scale[:, scipy.newaxis] if self.dparamscurrent: for param in self._paramlist_PartialLikelihoods: for j in self._sub_index_param(param): self.dL[param][n][k][j] /= scale[:, scipy.newaxis] # free unneeded memory by deleting already used values for ntodel in [nright, nleft]: if ntodel in self.L: del self.L[ntodel] if self.dparamscurrent: for param in self._paramlist_PartialLikelihoods: if ntodel in self.dL[param]: del self.dL[param][ntodel] if self.dtcurrent: for n2 in self.dL_dt.keys(): if ntodel in self.dL_dt[n2]: del self.dL_dt[n2][ntodel]
[ "def", "_computePartialLikelihoods", "(", "self", ")", ":", "for", "n", "in", "range", "(", "self", ".", "ntips", ",", "self", ".", "nnodes", ")", ":", "ni", "=", "n", "-", "self", ".", "ntips", "# internal node number", "nright", "=", "self", ".", "rd...
Update `L`, `dL`, `dL_dt`.
[ "Update", "L", "dL", "dL_dt", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L789-L909
jbloomlab/phydms
phydmslib/treelikelihood.py
TreeLikelihood._sub_index_param
def _sub_index_param(self, param): """Returns list of sub-indexes for `param`. Used in computing partial likelihoods; loop over these indices.""" if self._distributionmodel and (param == self.model.distributedparam): indices = [()] else: paramvalue = getattr(self.model, param) if isinstance(paramvalue, float): indices = [()] elif (isinstance(paramvalue, scipy.ndarray) and paramvalue.ndim == 1 and paramvalue.shape[0] > 1): indices = [(j,) for j in range(len(paramvalue))] else: raise RuntimeError("Invalid param: {0}, {1}".format( param, paramvalue)) return indices
python
def _sub_index_param(self, param): """Returns list of sub-indexes for `param`. Used in computing partial likelihoods; loop over these indices.""" if self._distributionmodel and (param == self.model.distributedparam): indices = [()] else: paramvalue = getattr(self.model, param) if isinstance(paramvalue, float): indices = [()] elif (isinstance(paramvalue, scipy.ndarray) and paramvalue.ndim == 1 and paramvalue.shape[0] > 1): indices = [(j,) for j in range(len(paramvalue))] else: raise RuntimeError("Invalid param: {0}, {1}".format( param, paramvalue)) return indices
[ "def", "_sub_index_param", "(", "self", ",", "param", ")", ":", "if", "self", ".", "_distributionmodel", "and", "(", "param", "==", "self", ".", "model", ".", "distributedparam", ")", ":", "indices", "=", "[", "(", ")", "]", "else", ":", "paramvalue", ...
Returns list of sub-indexes for `param`. Used in computing partial likelihoods; loop over these indices.
[ "Returns", "list", "of", "sub", "-", "indexes", "for", "param", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L911-L928
markfinger/python-js-host
js_host/js_host.py
JSHost.send_request
def send_request(self, *args, **kwargs): """ Intercept connection errors which suggest that a managed host has crashed and raise an exception indicating the location of the log """ try: return super(JSHost, self).send_request(*args, **kwargs) except RequestsConnectionError as e: if ( self.manager and self.has_connected and self.logfile and 'unsafe' not in kwargs ): raise ProcessError( '{} appears to have crashed, you can inspect the log file at {}'.format( self.get_name(), self.logfile, ) ) raise six.reraise(RequestsConnectionError, RequestsConnectionError(*e.args), sys.exc_info()[2])
python
def send_request(self, *args, **kwargs): """ Intercept connection errors which suggest that a managed host has crashed and raise an exception indicating the location of the log """ try: return super(JSHost, self).send_request(*args, **kwargs) except RequestsConnectionError as e: if ( self.manager and self.has_connected and self.logfile and 'unsafe' not in kwargs ): raise ProcessError( '{} appears to have crashed, you can inspect the log file at {}'.format( self.get_name(), self.logfile, ) ) raise six.reraise(RequestsConnectionError, RequestsConnectionError(*e.args), sys.exc_info()[2])
[ "def", "send_request", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "super", "(", "JSHost", ",", "self", ")", ".", "send_request", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "RequestsConnectio...
Intercept connection errors which suggest that a managed host has crashed and raise an exception indicating the location of the log
[ "Intercept", "connection", "errors", "which", "suggest", "that", "a", "managed", "host", "has", "crashed", "and", "raise", "an", "exception", "indicating", "the", "location", "of", "the", "log" ]
train
https://github.com/markfinger/python-js-host/blob/7727138c1eae779335d55fb4d7734698225a6322/js_host/js_host.py#L70-L90
all-umass/graphs
graphs/construction/b_matching.py
b_matching
def b_matching(D, k, max_iter=1000, damping=1, conv_thresh=1e-4, weighted=False, verbose=False): ''' "Belief-Propagation for Weighted b-Matchings on Arbitrary Graphs and its Relation to Linear Programs with Integer Solutions" Bayati et al. Finds the minimal weight perfect b-matching using min-sum loopy-BP. @param D pairwise distance matrix @param k number of neighbors per vertex (scalar or array-like) Based on the code at http://www.cs.columbia.edu/~bert/code/bmatching/bdmatch ''' INTERVAL = 2 oscillation = 10 cbuff = np.zeros(100, dtype=float) cbuffpos = 0 N = D.shape[0] assert D.shape[1] == N, 'Input distance matrix must be square' mask = ~np.eye(N, dtype=bool) # Assume all nonzero except for diagonal W = -D[mask].reshape((N, -1)).astype(float) degrees = np.clip(np.atleast_1d(k), 0, N-1) if degrees.size == 1: # broadcast scalar up to length-N array degrees = np.repeat(degrees, N) else: assert degrees.shape == (N,), 'Input degrees must have length N' # TODO: remove these later inds = np.tile(np.arange(N), (N, 1)) backinds = inds.copy() inds = inds[mask].reshape((N, -1)) backinds = backinds.T.ravel()[:(N*(N-1))].reshape((N, -1)) # Run Belief Revision change = 1.0 B = W.copy() for n_iter in range(1, max_iter+1): oldB = B.copy() update_belief(oldB, B, W, degrees, damping, inds, backinds) # check for convergence if n_iter % INTERVAL == 0: # track changes c = np.abs(B[:,0]).sum() # c may be infinite here, and that's ok with np.errstate(invalid='ignore'): if np.any(np.abs(c - cbuff) < conv_thresh): oscillation -= 1 cbuff[cbuffpos] = c cbuffpos = (cbuffpos + 1) % len(cbuff) change = diff_belief(B, oldB) if np.isnan(change): warnings.warn("change is NaN! " "BP will quit but solution could be invalid. " "Problem may be infeasible.") break if change < conv_thresh or oscillation < 1: break else: warnings.warn("Hit iteration limit (%d) before converging" % max_iter) if verbose: # pragma: no cover if change < conv_thresh: print("Converged to stable beliefs in %d iterations" % n_iter) elif oscillation < 1: print("Stopped after reaching oscillation in %d iterations" % n_iter) print("No feasible solution found or there are multiple maxima.") print("Outputting best approximate solution. Try damping.") # recover result from B thresholds = np.zeros(N) for i,d in enumerate(degrees): Brow = B[i] if d >= N - 1: thresholds[i] = -np.inf elif d < 1: thresholds[i] = np.inf else: thresholds[i] = Brow[quickselect(-Brow, d-1)] ii,jj = np.where(B >= thresholds[:,None]) pairs = np.column_stack((ii, inds[ii,jj])) w = D[ii, pairs[:,1]] if weighted else None return Graph.from_edge_pairs(pairs, num_vertices=N, weights=w)
python
def b_matching(D, k, max_iter=1000, damping=1, conv_thresh=1e-4, weighted=False, verbose=False): ''' "Belief-Propagation for Weighted b-Matchings on Arbitrary Graphs and its Relation to Linear Programs with Integer Solutions" Bayati et al. Finds the minimal weight perfect b-matching using min-sum loopy-BP. @param D pairwise distance matrix @param k number of neighbors per vertex (scalar or array-like) Based on the code at http://www.cs.columbia.edu/~bert/code/bmatching/bdmatch ''' INTERVAL = 2 oscillation = 10 cbuff = np.zeros(100, dtype=float) cbuffpos = 0 N = D.shape[0] assert D.shape[1] == N, 'Input distance matrix must be square' mask = ~np.eye(N, dtype=bool) # Assume all nonzero except for diagonal W = -D[mask].reshape((N, -1)).astype(float) degrees = np.clip(np.atleast_1d(k), 0, N-1) if degrees.size == 1: # broadcast scalar up to length-N array degrees = np.repeat(degrees, N) else: assert degrees.shape == (N,), 'Input degrees must have length N' # TODO: remove these later inds = np.tile(np.arange(N), (N, 1)) backinds = inds.copy() inds = inds[mask].reshape((N, -1)) backinds = backinds.T.ravel()[:(N*(N-1))].reshape((N, -1)) # Run Belief Revision change = 1.0 B = W.copy() for n_iter in range(1, max_iter+1): oldB = B.copy() update_belief(oldB, B, W, degrees, damping, inds, backinds) # check for convergence if n_iter % INTERVAL == 0: # track changes c = np.abs(B[:,0]).sum() # c may be infinite here, and that's ok with np.errstate(invalid='ignore'): if np.any(np.abs(c - cbuff) < conv_thresh): oscillation -= 1 cbuff[cbuffpos] = c cbuffpos = (cbuffpos + 1) % len(cbuff) change = diff_belief(B, oldB) if np.isnan(change): warnings.warn("change is NaN! " "BP will quit but solution could be invalid. " "Problem may be infeasible.") break if change < conv_thresh or oscillation < 1: break else: warnings.warn("Hit iteration limit (%d) before converging" % max_iter) if verbose: # pragma: no cover if change < conv_thresh: print("Converged to stable beliefs in %d iterations" % n_iter) elif oscillation < 1: print("Stopped after reaching oscillation in %d iterations" % n_iter) print("No feasible solution found or there are multiple maxima.") print("Outputting best approximate solution. Try damping.") # recover result from B thresholds = np.zeros(N) for i,d in enumerate(degrees): Brow = B[i] if d >= N - 1: thresholds[i] = -np.inf elif d < 1: thresholds[i] = np.inf else: thresholds[i] = Brow[quickselect(-Brow, d-1)] ii,jj = np.where(B >= thresholds[:,None]) pairs = np.column_stack((ii, inds[ii,jj])) w = D[ii, pairs[:,1]] if weighted else None return Graph.from_edge_pairs(pairs, num_vertices=N, weights=w)
[ "def", "b_matching", "(", "D", ",", "k", ",", "max_iter", "=", "1000", ",", "damping", "=", "1", ",", "conv_thresh", "=", "1e-4", ",", "weighted", "=", "False", ",", "verbose", "=", "False", ")", ":", "INTERVAL", "=", "2", "oscillation", "=", "10", ...
"Belief-Propagation for Weighted b-Matchings on Arbitrary Graphs and its Relation to Linear Programs with Integer Solutions" Bayati et al. Finds the minimal weight perfect b-matching using min-sum loopy-BP. @param D pairwise distance matrix @param k number of neighbors per vertex (scalar or array-like) Based on the code at http://www.cs.columbia.edu/~bert/code/bmatching/bdmatch
[ "Belief", "-", "Propagation", "for", "Weighted", "b", "-", "Matchings", "on", "Arbitrary", "Graphs", "and", "its", "Relation", "to", "Linear", "Programs", "with", "Integer", "Solutions", "Bayati", "et", "al", ".", "Finds", "the", "minimal", "weight", "perfect"...
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/b_matching.py#L10-L94
all-umass/graphs
graphs/construction/b_matching.py
_updateB
def _updateB(oldB, B, W, degrees, damping, inds, backinds): # pragma: no cover '''belief update function.''' for j,d in enumerate(degrees): kk = inds[j] bk = backinds[j] if d == 0: B[kk,bk] = -np.inf continue belief = W[kk,bk] + W[j] oldBj = oldB[j] if d == oldBj.shape[0]: bth = quickselect(-oldBj, d-1) bplus = -1 else: bth,bplus = quickselect(-oldBj, d-1, d) belief -= np.where(oldBj >= oldBj[bth], oldBj[bplus], oldBj[bth]) B[kk,bk] = damping*belief + (1-damping)*oldB[kk,bk]
python
def _updateB(oldB, B, W, degrees, damping, inds, backinds): # pragma: no cover '''belief update function.''' for j,d in enumerate(degrees): kk = inds[j] bk = backinds[j] if d == 0: B[kk,bk] = -np.inf continue belief = W[kk,bk] + W[j] oldBj = oldB[j] if d == oldBj.shape[0]: bth = quickselect(-oldBj, d-1) bplus = -1 else: bth,bplus = quickselect(-oldBj, d-1, d) belief -= np.where(oldBj >= oldBj[bth], oldBj[bplus], oldBj[bth]) B[kk,bk] = damping*belief + (1-damping)*oldB[kk,bk]
[ "def", "_updateB", "(", "oldB", ",", "B", ",", "W", ",", "degrees", ",", "damping", ",", "inds", ",", "backinds", ")", ":", "# pragma: no cover\r", "for", "j", ",", "d", "in", "enumerate", "(", "degrees", ")", ":", "kk", "=", "inds", "[", "j", "]",...
belief update function.
[ "belief", "update", "function", "." ]
train
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/b_matching.py#L127-L146
tallforasmurf/byteplay
examples/make_constants.py
_func_copy
def _func_copy(f, newcode) : ''' Return a copy of function f with a different __code__ Because I can't find proper documentation on the correct signature of the types.FunctionType() constructor, I pass the minimum arguments then set the important dunder-values by direct assignment. Note you cannot assign __closure__, it is a "read-only attribute". Ergo, you should not apply _make_constants() to a function that has a closure! ''' newf = types.FunctionType( newcode, f.__globals__ ) newf.__annotations__ = f.__annotations__ # newf.__closure__ = f.__closure__ newf.__defaults__ = f.__defaults__ newf.__doc__ = f.__doc__ newf.__name__ = f.__name__ newf.__kwdefaults__ = f.__kwdefaults__ newf.__qualname__ = f.__qualname__ return newf
python
def _func_copy(f, newcode) : ''' Return a copy of function f with a different __code__ Because I can't find proper documentation on the correct signature of the types.FunctionType() constructor, I pass the minimum arguments then set the important dunder-values by direct assignment. Note you cannot assign __closure__, it is a "read-only attribute". Ergo, you should not apply _make_constants() to a function that has a closure! ''' newf = types.FunctionType( newcode, f.__globals__ ) newf.__annotations__ = f.__annotations__ # newf.__closure__ = f.__closure__ newf.__defaults__ = f.__defaults__ newf.__doc__ = f.__doc__ newf.__name__ = f.__name__ newf.__kwdefaults__ = f.__kwdefaults__ newf.__qualname__ = f.__qualname__ return newf
[ "def", "_func_copy", "(", "f", ",", "newcode", ")", ":", "newf", "=", "types", ".", "FunctionType", "(", "newcode", ",", "f", ".", "__globals__", ")", "newf", ".", "__annotations__", "=", "f", ".", "__annotations__", "# newf.__closure__ = f.__closure__\r", "ne...
Return a copy of function f with a different __code__ Because I can't find proper documentation on the correct signature of the types.FunctionType() constructor, I pass the minimum arguments then set the important dunder-values by direct assignment. Note you cannot assign __closure__, it is a "read-only attribute". Ergo, you should not apply _make_constants() to a function that has a closure!
[ "Return", "a", "copy", "of", "function", "f", "with", "a", "different", "__code__", "Because", "I", "can", "t", "find", "proper", "documentation", "on", "the", "correct", "signature", "of", "the", "types", ".", "FunctionType", "()", "constructor", "I", "pass...
train
https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/examples/make_constants.py#L83-L103
tallforasmurf/byteplay
examples/make_constants.py
bind_all
def bind_all(mc, builtin_only=False, stoplist=[], verbose=False): """Recursively apply constant binding to functions in a module or class. Use as the last line of the module (after everything is defined, but before test code). In modules that need modifiable globals, set builtin_only to True. """ import types try: d = vars(mc) except TypeError: return for k, v in d.items(): if isinstance( v, types.FunctionType ) : if verbose : print( 'make_constants(', v.__name__, ')' ) newv = _make_constants(v, builtin_only, stoplist, verbose) setattr(mc, k, newv) elif type(v) in ( type, types.ModuleType ): bind_all(v, builtin_only, stoplist, verbose)
python
def bind_all(mc, builtin_only=False, stoplist=[], verbose=False): """Recursively apply constant binding to functions in a module or class. Use as the last line of the module (after everything is defined, but before test code). In modules that need modifiable globals, set builtin_only to True. """ import types try: d = vars(mc) except TypeError: return for k, v in d.items(): if isinstance( v, types.FunctionType ) : if verbose : print( 'make_constants(', v.__name__, ')' ) newv = _make_constants(v, builtin_only, stoplist, verbose) setattr(mc, k, newv) elif type(v) in ( type, types.ModuleType ): bind_all(v, builtin_only, stoplist, verbose)
[ "def", "bind_all", "(", "mc", ",", "builtin_only", "=", "False", ",", "stoplist", "=", "[", "]", ",", "verbose", "=", "False", ")", ":", "import", "types", "try", ":", "d", "=", "vars", "(", "mc", ")", "except", "TypeError", ":", "return", "for", "...
Recursively apply constant binding to functions in a module or class. Use as the last line of the module (after everything is defined, but before test code). In modules that need modifiable globals, set builtin_only to True.
[ "Recursively", "apply", "constant", "binding", "to", "functions", "in", "a", "module", "or", "class", ".", "Use", "as", "the", "last", "line", "of", "the", "module", "(", "after", "everything", "is", "defined", "but", "before", "test", "code", ")", ".", ...
train
https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/examples/make_constants.py#L227-L247
tallforasmurf/byteplay
examples/make_constants.py
make_constants
def make_constants(builtin_only=False, stoplist=[], verbose=False): """ Return a decorator for optimizing global references. Verify that the first argument is a function. """ if type(builtin_only) == type(make_constants): raise ValueError("The make_constants decorator must have arguments.") return lambda f: _make_constants(f, builtin_only, stoplist, verbose)
python
def make_constants(builtin_only=False, stoplist=[], verbose=False): """ Return a decorator for optimizing global references. Verify that the first argument is a function. """ if type(builtin_only) == type(make_constants): raise ValueError("The make_constants decorator must have arguments.") return lambda f: _make_constants(f, builtin_only, stoplist, verbose)
[ "def", "make_constants", "(", "builtin_only", "=", "False", ",", "stoplist", "=", "[", "]", ",", "verbose", "=", "False", ")", ":", "if", "type", "(", "builtin_only", ")", "==", "type", "(", "make_constants", ")", ":", "raise", "ValueError", "(", "\"The ...
Return a decorator for optimizing global references. Verify that the first argument is a function.
[ "Return", "a", "decorator", "for", "optimizing", "global", "references", ".", "Verify", "that", "the", "first", "argument", "is", "a", "function", "." ]
train
https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/examples/make_constants.py#L250-L257
havardgulldahl/mopidycli
mopidycli/cli.py
state
def state(): '''Get The playback state: 'playing', 'paused', or 'stopped'. If PLAYING or PAUSED, show information on current track. Calls PlaybackController.get_state(), and if state is PLAYING or PAUSED, get PlaybackController.get_current_track() and PlaybackController.get_time_position()''' server = getServer() state = server.core.playback.get_state() logging.debug('Got playback state: %r', state) if state.upper() == 'STOPPED': print('Playback is currently stopped') else: track = server.core.playback.get_current_track() logging.debug('Track is %r', track) logging.debug('Track loaded is %r', jsonrpclib.jsonclass.load(track)) pos = server.core.playback.get_time_position() logging.debug('Pos is %r', pos) print('{} track: "{}", by {} (at {})'.format(state.title(), track['name'], ','.join([a['name'] for a in track['artists']]), formatTimeposition(pos)) )
python
def state(): '''Get The playback state: 'playing', 'paused', or 'stopped'. If PLAYING or PAUSED, show information on current track. Calls PlaybackController.get_state(), and if state is PLAYING or PAUSED, get PlaybackController.get_current_track() and PlaybackController.get_time_position()''' server = getServer() state = server.core.playback.get_state() logging.debug('Got playback state: %r', state) if state.upper() == 'STOPPED': print('Playback is currently stopped') else: track = server.core.playback.get_current_track() logging.debug('Track is %r', track) logging.debug('Track loaded is %r', jsonrpclib.jsonclass.load(track)) pos = server.core.playback.get_time_position() logging.debug('Pos is %r', pos) print('{} track: "{}", by {} (at {})'.format(state.title(), track['name'], ','.join([a['name'] for a in track['artists']]), formatTimeposition(pos)) )
[ "def", "state", "(", ")", ":", "server", "=", "getServer", "(", ")", "state", "=", "server", ".", "core", ".", "playback", ".", "get_state", "(", ")", "logging", ".", "debug", "(", "'Got playback state: %r'", ",", "state", ")", "if", "state", ".", "upp...
Get The playback state: 'playing', 'paused', or 'stopped'. If PLAYING or PAUSED, show information on current track. Calls PlaybackController.get_state(), and if state is PLAYING or PAUSED, get PlaybackController.get_current_track() and PlaybackController.get_time_position()
[ "Get", "The", "playback", "state", ":", "playing", "paused", "or", "stopped", "." ]
train
https://github.com/havardgulldahl/mopidycli/blob/e869b07c2bec7b7a4c2ffc4ab7d5caa28c346f31/mopidycli/cli.py#L31-L55
havardgulldahl/mopidycli
mopidycli/cli.py
pause
def pause(): '''Pause playback. Calls PlaybackController.pause()''' server = getServer() server.core.playback.pause() pos = server.core.playback.get_time_position() print('Paused at {}'.format(formatTimeposition(pos)))
python
def pause(): '''Pause playback. Calls PlaybackController.pause()''' server = getServer() server.core.playback.pause() pos = server.core.playback.get_time_position() print('Paused at {}'.format(formatTimeposition(pos)))
[ "def", "pause", "(", ")", ":", "server", "=", "getServer", "(", ")", "server", ".", "core", ".", "playback", ".", "pause", "(", ")", "pos", "=", "server", ".", "core", ".", "playback", ".", "get_time_position", "(", ")", "print", "(", "'Paused at {}'",...
Pause playback. Calls PlaybackController.pause()
[ "Pause", "playback", "." ]
train
https://github.com/havardgulldahl/mopidycli/blob/e869b07c2bec7b7a4c2ffc4ab7d5caa28c346f31/mopidycli/cli.py#L65-L73
havardgulldahl/mopidycli
mopidycli/cli.py
tracklist
def tracklist(): '''Get tracklist Calls TracklistController.get_tl_tracks() ''' _c = 0 server = getServer() _current = server.core.tracklist.index() for t in server.core.tracklist.get_tl_tracks(): logging.debug('Got tl trak: %r', t) currently = ' -- CURRENT' if t['tlid'] == _current else '' print('{}: {}{}'.format(t['tlid'], t['track']['name'], currently)) _c = _c+1 print('==='*6) print('{} tracks in tracklist'.format(_c))
python
def tracklist(): '''Get tracklist Calls TracklistController.get_tl_tracks() ''' _c = 0 server = getServer() _current = server.core.tracklist.index() for t in server.core.tracklist.get_tl_tracks(): logging.debug('Got tl trak: %r', t) currently = ' -- CURRENT' if t['tlid'] == _current else '' print('{}: {}{}'.format(t['tlid'], t['track']['name'], currently)) _c = _c+1 print('==='*6) print('{} tracks in tracklist'.format(_c))
[ "def", "tracklist", "(", ")", ":", "_c", "=", "0", "server", "=", "getServer", "(", ")", "_current", "=", "server", ".", "core", ".", "tracklist", ".", "index", "(", ")", "for", "t", "in", "server", ".", "core", ".", "tracklist", ".", "get_tl_tracks"...
Get tracklist Calls TracklistController.get_tl_tracks()
[ "Get", "tracklist" ]
train
https://github.com/havardgulldahl/mopidycli/blob/e869b07c2bec7b7a4c2ffc4ab7d5caa28c346f31/mopidycli/cli.py#L103-L117
havardgulldahl/mopidycli
mopidycli/cli.py
play_backend_uri
def play_backend_uri(argv=None): '''Get album or track from backend uri and play all tracks found. uri is a string which represents some directory belonging to a backend. Calls LibraryController.browse(uri) to get an album and LibraryController.lookup(uri) to get track''' if argv is None: argv = sys.argv[1:] parser = argparse.ArgumentParser(description='Browse directories and tracks at the given uri and play them/it.') parser.add_argument('uri', help='The key that represents some directory belonging to a backend. E.g. plex:album:2323 or spotify:album:xxxx') parser.add_argument('-l', '--loglevel', help='Logging level. Default: %(default)s.', choices=('debug', 'info', 'warning', 'error'), default='warning') args = parse_args_and_apply_logging_level(parser, argv) server = getServer() hits = server.core.library.browse(args.uri) # browse(): Returns a list of mopidy.models.Ref objects for the directories and tracks at the given uri. logging.info('Got hits from browse(): %r', hits) if len(hits) == 0: # try track lookup hits = server.core.library.lookup(args.uri) logging.info('Got hits from lookup() : %r', hits) if len(hits) == 0: print('No hits for "{}"'.format(args.uri)) else: server.core.tracklist.clear() logging.debug('got special uris: %r', [t['uri'] for t in hits]) server.core.tracklist.add(uris=[t['uri'] for t in hits]) server.core.playback.play()
python
def play_backend_uri(argv=None): '''Get album or track from backend uri and play all tracks found. uri is a string which represents some directory belonging to a backend. Calls LibraryController.browse(uri) to get an album and LibraryController.lookup(uri) to get track''' if argv is None: argv = sys.argv[1:] parser = argparse.ArgumentParser(description='Browse directories and tracks at the given uri and play them/it.') parser.add_argument('uri', help='The key that represents some directory belonging to a backend. E.g. plex:album:2323 or spotify:album:xxxx') parser.add_argument('-l', '--loglevel', help='Logging level. Default: %(default)s.', choices=('debug', 'info', 'warning', 'error'), default='warning') args = parse_args_and_apply_logging_level(parser, argv) server = getServer() hits = server.core.library.browse(args.uri) # browse(): Returns a list of mopidy.models.Ref objects for the directories and tracks at the given uri. logging.info('Got hits from browse(): %r', hits) if len(hits) == 0: # try track lookup hits = server.core.library.lookup(args.uri) logging.info('Got hits from lookup() : %r', hits) if len(hits) == 0: print('No hits for "{}"'.format(args.uri)) else: server.core.tracklist.clear() logging.debug('got special uris: %r', [t['uri'] for t in hits]) server.core.tracklist.add(uris=[t['uri'] for t in hits]) server.core.playback.play()
[ "def", "play_backend_uri", "(", "argv", "=", "None", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Browse directories and tracks ...
Get album or track from backend uri and play all tracks found. uri is a string which represents some directory belonging to a backend. Calls LibraryController.browse(uri) to get an album and LibraryController.lookup(uri) to get track
[ "Get", "album", "or", "track", "from", "backend", "uri", "and", "play", "all", "tracks", "found", "." ]
train
https://github.com/havardgulldahl/mopidycli/blob/e869b07c2bec7b7a4c2ffc4ab7d5caa28c346f31/mopidycli/cli.py#L128-L159
chbrown/argv
argv/iterables.py
peekable.peek
def peek(self, default=None): '''Returns `default` is there is no subsequent item''' try: result = self.pointer.next() # immediately push it back onto the front of the iterable self.pointer = itertools.chain([result], self.pointer) return result except StopIteration: # nothing to put back; iterating doesn't change anything past the end return default
python
def peek(self, default=None): '''Returns `default` is there is no subsequent item''' try: result = self.pointer.next() # immediately push it back onto the front of the iterable self.pointer = itertools.chain([result], self.pointer) return result except StopIteration: # nothing to put back; iterating doesn't change anything past the end return default
[ "def", "peek", "(", "self", ",", "default", "=", "None", ")", ":", "try", ":", "result", "=", "self", ".", "pointer", ".", "next", "(", ")", "# immediately push it back onto the front of the iterable", "self", ".", "pointer", "=", "itertools", ".", "chain", ...
Returns `default` is there is no subsequent item
[ "Returns", "default", "is", "there", "is", "no", "subsequent", "item" ]
train
https://github.com/chbrown/argv/blob/5e2b0424a060027c029ad9c16d90bd14a2ff53f8/argv/iterables.py#L14-L23
ubccr/pinky
pinky/canonicalization/equiv.py
compute_equiv_class
def compute_equiv_class(atom): """(atom)->Computes a unique integer for an atom""" try: equiv_class = atom.number + \ 1000*(atom.charge+10) + \ 100000*(atom.hcount) + \ 1000000*(atom.weight) except TypeError: raise ValueError, \ "Can't compute number from atom.number %s atom.charge %s atom.hcount %s"\ " atom.weight %s"%(atom.number, atom.charge, atom.hcount, atom.weight) return equiv_class
python
def compute_equiv_class(atom): """(atom)->Computes a unique integer for an atom""" try: equiv_class = atom.number + \ 1000*(atom.charge+10) + \ 100000*(atom.hcount) + \ 1000000*(atom.weight) except TypeError: raise ValueError, \ "Can't compute number from atom.number %s atom.charge %s atom.hcount %s"\ " atom.weight %s"%(atom.number, atom.charge, atom.hcount, atom.weight) return equiv_class
[ "def", "compute_equiv_class", "(", "atom", ")", ":", "try", ":", "equiv_class", "=", "atom", ".", "number", "+", "1000", "*", "(", "atom", ".", "charge", "+", "10", ")", "+", "100000", "*", "(", "atom", ".", "hcount", ")", "+", "1000000", "*", "(",...
(atom)->Computes a unique integer for an atom
[ "(", "atom", ")", "-", ">", "Computes", "a", "unique", "integer", "for", "an", "atom" ]
train
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/canonicalization/equiv.py#L39-L50
pedroburon/tbk
tbk/webpay/payment.py
Payment.token
def token(self): """ Token given by Transbank for payment initialization url. Will raise PaymentError when an error ocurred. """ if not self._token: self._token = self.fetch_token() logger.payment(self) return self._token
python
def token(self): """ Token given by Transbank for payment initialization url. Will raise PaymentError when an error ocurred. """ if not self._token: self._token = self.fetch_token() logger.payment(self) return self._token
[ "def", "token", "(", "self", ")", ":", "if", "not", "self", ".", "_token", ":", "self", ".", "_token", "=", "self", ".", "fetch_token", "(", ")", "logger", ".", "payment", "(", "self", ")", "return", "self", ".", "_token" ]
Token given by Transbank for payment initialization url. Will raise PaymentError when an error ocurred.
[ "Token", "given", "by", "Transbank", "for", "payment", "initialization", "url", "." ]
train
https://github.com/pedroburon/tbk/blob/ecd6741e0bae06269eb4ac885c3ffcb7902ee40e/tbk/webpay/payment.py#L76-L85
pedroburon/tbk
tbk/webpay/payment.py
Payment.transaction_id
def transaction_id(self): """ Transaction ID for Transbank, a secure random int between 0 and 999999999. """ if not self._transaction_id: self._transaction_id = random.randint(0, 10000000000 - 1) return self._transaction_id
python
def transaction_id(self): """ Transaction ID for Transbank, a secure random int between 0 and 999999999. """ if not self._transaction_id: self._transaction_id = random.randint(0, 10000000000 - 1) return self._transaction_id
[ "def", "transaction_id", "(", "self", ")", ":", "if", "not", "self", ".", "_transaction_id", ":", "self", ".", "_transaction_id", "=", "random", ".", "randint", "(", "0", ",", "10000000000", "-", "1", ")", "return", "self", ".", "_transaction_id" ]
Transaction ID for Transbank, a secure random int between 0 and 999999999.
[ "Transaction", "ID", "for", "Transbank", "a", "secure", "random", "int", "between", "0", "and", "999999999", "." ]
train
https://github.com/pedroburon/tbk/blob/ecd6741e0bae06269eb4ac885c3ffcb7902ee40e/tbk/webpay/payment.py#L152-L158
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
PhenomDWaveforms._sanity_check
def _sanity_check(self): """Check if parameters are okay. Sanity check makes sure each parameter is within an allowable range. Raises: ValueError: Problem with a specific parameter. """ if any(self.m1 < 0.0): raise ValueError("Mass 1 is negative.") if any(self.m2 < 0.0): raise ValueError("Mass 2 is negative.") if any(self.chi_1 < -1.0) or any(self.chi_1 > 1.0): raise ValueError("Chi 1 is outside [-1.0, 1.0].") if any(self.chi_2 < -1.0) or any(self.chi_2 > 1.0): raise ValueError("Chi 2 is outside [-1.0, 1.0].") if any(self.z <= 0.0): raise ValueError("Redshift is zero or negative.") if any(self.dist <= 0.0): raise ValueError("Distance is zero or negative.") if any(self.st < 0.0): raise ValueError("Start Time is negative.") if any(self.et < 0.0): raise ValueError("End Time is negative.") if len(np.where(self.st < self.et)[0]) != 0: raise ValueError("Start Time is less than End time.") if any(self.m1/self.m2 > 1.0000001e4) or any(self.m1/self.m2 < 9.999999e-5): raise ValueError("Mass Ratio too far from unity.") return
python
def _sanity_check(self): """Check if parameters are okay. Sanity check makes sure each parameter is within an allowable range. Raises: ValueError: Problem with a specific parameter. """ if any(self.m1 < 0.0): raise ValueError("Mass 1 is negative.") if any(self.m2 < 0.0): raise ValueError("Mass 2 is negative.") if any(self.chi_1 < -1.0) or any(self.chi_1 > 1.0): raise ValueError("Chi 1 is outside [-1.0, 1.0].") if any(self.chi_2 < -1.0) or any(self.chi_2 > 1.0): raise ValueError("Chi 2 is outside [-1.0, 1.0].") if any(self.z <= 0.0): raise ValueError("Redshift is zero or negative.") if any(self.dist <= 0.0): raise ValueError("Distance is zero or negative.") if any(self.st < 0.0): raise ValueError("Start Time is negative.") if any(self.et < 0.0): raise ValueError("End Time is negative.") if len(np.where(self.st < self.et)[0]) != 0: raise ValueError("Start Time is less than End time.") if any(self.m1/self.m2 > 1.0000001e4) or any(self.m1/self.m2 < 9.999999e-5): raise ValueError("Mass Ratio too far from unity.") return
[ "def", "_sanity_check", "(", "self", ")", ":", "if", "any", "(", "self", ".", "m1", "<", "0.0", ")", ":", "raise", "ValueError", "(", "\"Mass 1 is negative.\"", ")", "if", "any", "(", "self", ".", "m2", "<", "0.0", ")", ":", "raise", "ValueError", "(...
Check if parameters are okay. Sanity check makes sure each parameter is within an allowable range. Raises: ValueError: Problem with a specific parameter.
[ "Check", "if", "parameters", "are", "okay", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L86-L124
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
PhenomDWaveforms._broadcast_and_set_attrs
def _broadcast_and_set_attrs(self, local_dict): """Cast all inputs to correct dimensions. This method fixes inputs who have different lengths. Namely one input as an array and others that are scalara or of len-1. Raises: Value Error: Multiple length arrays of len>1 """ del local_dict['self'] self.remove_axis = False max_length = 0 for key in local_dict: try: length = len(local_dict[key]) if length > max_length: max_length = length except TypeError: pass if max_length == 0: self.remove_axis = True for key in local_dict: setattr(self, key, np.array([local_dict[key]])) # check for bad length arrays else: for key in local_dict: try: if len(local_dict[key]) < max_length and len(local_dict[key]) > 1: raise ValueError("Casting parameters not correct." + " Need all at a maximum shape and the rest being" + "len-1 arrays or scalars") except TypeError: pass # broadcast arrays for key in local_dict: try: if len(local_dict[key]) == max_length: setattr(self, key, local_dict[key]) elif len(local_dict[key]) == 1: setattr(self, key, np.full((max_length,), local_dict[key][0])) except TypeError: setattr(self, key, np.full((max_length,), local_dict[key])) return
python
def _broadcast_and_set_attrs(self, local_dict): """Cast all inputs to correct dimensions. This method fixes inputs who have different lengths. Namely one input as an array and others that are scalara or of len-1. Raises: Value Error: Multiple length arrays of len>1 """ del local_dict['self'] self.remove_axis = False max_length = 0 for key in local_dict: try: length = len(local_dict[key]) if length > max_length: max_length = length except TypeError: pass if max_length == 0: self.remove_axis = True for key in local_dict: setattr(self, key, np.array([local_dict[key]])) # check for bad length arrays else: for key in local_dict: try: if len(local_dict[key]) < max_length and len(local_dict[key]) > 1: raise ValueError("Casting parameters not correct." + " Need all at a maximum shape and the rest being" + "len-1 arrays or scalars") except TypeError: pass # broadcast arrays for key in local_dict: try: if len(local_dict[key]) == max_length: setattr(self, key, local_dict[key]) elif len(local_dict[key]) == 1: setattr(self, key, np.full((max_length,), local_dict[key][0])) except TypeError: setattr(self, key, np.full((max_length,), local_dict[key])) return
[ "def", "_broadcast_and_set_attrs", "(", "self", ",", "local_dict", ")", ":", "del", "local_dict", "[", "'self'", "]", "self", ".", "remove_axis", "=", "False", "max_length", "=", "0", "for", "key", "in", "local_dict", ":", "try", ":", "length", "=", "len",...
Cast all inputs to correct dimensions. This method fixes inputs who have different lengths. Namely one input as an array and others that are scalara or of len-1. Raises: Value Error: Multiple length arrays of len>1
[ "Cast", "all", "inputs", "to", "correct", "dimensions", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L126-L173
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
PhenomDWaveforms._create_waveforms
def _create_waveforms(self): """Create frequency domain waveforms. Method to create waveforms for PhenomDWaveforms class. It adds waveform information in the form of attributes. """ c_obj = ctypes.CDLL(self.exec_call) # prepare ctypes arrays freq_amp_cast = ctypes.c_double*self.num_points*self.length freqs = freq_amp_cast() hc = freq_amp_cast() fmrg_fpeak_cast = ctypes.c_double*self.length fmrg = fmrg_fpeak_cast() fpeak = fmrg_fpeak_cast() # Find hc c_obj.Amplitude(ctypes.byref(freqs), ctypes.byref(hc), ctypes.byref(fmrg), ctypes.byref(fpeak), self.m1.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.m2.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.chi_1.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.chi_2.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.dist.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.z.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.st.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.et.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), ctypes.c_int(self.length), ctypes.c_int(self.num_points)) # turn output into numpy arrays self.freqs, self.hc = np.ctypeslib.as_array(freqs), np.ctypeslib.as_array(hc) self.fmrg, self.fpeak = np.ctypeslib.as_array(fmrg), np.ctypeslib.as_array(fpeak) # remove an axis if inputs were scalar. if self.remove_axis: self.freqs, self.hc, = np.squeeze(self.freqs), np.squeeze(self.hc) self.fmrg, self.fpeak = self.fmrg[0], self.fpeak[0] return
python
def _create_waveforms(self): """Create frequency domain waveforms. Method to create waveforms for PhenomDWaveforms class. It adds waveform information in the form of attributes. """ c_obj = ctypes.CDLL(self.exec_call) # prepare ctypes arrays freq_amp_cast = ctypes.c_double*self.num_points*self.length freqs = freq_amp_cast() hc = freq_amp_cast() fmrg_fpeak_cast = ctypes.c_double*self.length fmrg = fmrg_fpeak_cast() fpeak = fmrg_fpeak_cast() # Find hc c_obj.Amplitude(ctypes.byref(freqs), ctypes.byref(hc), ctypes.byref(fmrg), ctypes.byref(fpeak), self.m1.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.m2.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.chi_1.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.chi_2.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.dist.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.z.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.st.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.et.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), ctypes.c_int(self.length), ctypes.c_int(self.num_points)) # turn output into numpy arrays self.freqs, self.hc = np.ctypeslib.as_array(freqs), np.ctypeslib.as_array(hc) self.fmrg, self.fpeak = np.ctypeslib.as_array(fmrg), np.ctypeslib.as_array(fpeak) # remove an axis if inputs were scalar. if self.remove_axis: self.freqs, self.hc, = np.squeeze(self.freqs), np.squeeze(self.hc) self.fmrg, self.fpeak = self.fmrg[0], self.fpeak[0] return
[ "def", "_create_waveforms", "(", "self", ")", ":", "c_obj", "=", "ctypes", ".", "CDLL", "(", "self", ".", "exec_call", ")", "# prepare ctypes arrays", "freq_amp_cast", "=", "ctypes", ".", "c_double", "*", "self", ".", "num_points", "*", "self", ".", "length"...
Create frequency domain waveforms. Method to create waveforms for PhenomDWaveforms class. It adds waveform information in the form of attributes.
[ "Create", "frequency", "domain", "waveforms", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L175-L216
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
EccentricBinaries._sanity_check
def _sanity_check(self): """Check if parameters are okay. Sanity check makes sure each parameter is within an allowable range. Raises: ValueError: Problem with a specific parameter. """ if any(self.m1 < 0.0): raise ValueError("Mass 1 is negative.") if any(self.m2 < 0.0): raise ValueError("Mass 2 is negative.") if any(self.z <= 0.0): raise ValueError("Redshift is zero or negative.") if any(self.dist <= 0.0): raise ValueError("Distance is zero or negative.") if any(self.initial_point < 0.0): raise ValueError("initial_point is negative.") if any(self.t_obs < 0.0): raise ValueError("t_obs is negative.") if any(self.e0 <= 0.0): raise ValueError("e0 must be greater than zero when using EccentricBinaries class.") if any(self.e0 > 1.0): raise ValueError("e0 greater than 1.") return
python
def _sanity_check(self): """Check if parameters are okay. Sanity check makes sure each parameter is within an allowable range. Raises: ValueError: Problem with a specific parameter. """ if any(self.m1 < 0.0): raise ValueError("Mass 1 is negative.") if any(self.m2 < 0.0): raise ValueError("Mass 2 is negative.") if any(self.z <= 0.0): raise ValueError("Redshift is zero or negative.") if any(self.dist <= 0.0): raise ValueError("Distance is zero or negative.") if any(self.initial_point < 0.0): raise ValueError("initial_point is negative.") if any(self.t_obs < 0.0): raise ValueError("t_obs is negative.") if any(self.e0 <= 0.0): raise ValueError("e0 must be greater than zero when using EccentricBinaries class.") if any(self.e0 > 1.0): raise ValueError("e0 greater than 1.") return
[ "def", "_sanity_check", "(", "self", ")", ":", "if", "any", "(", "self", ".", "m1", "<", "0.0", ")", ":", "raise", "ValueError", "(", "\"Mass 1 is negative.\"", ")", "if", "any", "(", "self", ".", "m2", "<", "0.0", ")", ":", "raise", "ValueError", "(...
Check if parameters are okay. Sanity check makes sure each parameter is within an allowable range. Raises: ValueError: Problem with a specific parameter.
[ "Check", "if", "parameters", "are", "okay", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L392-L424
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
EccentricBinaries._convert_units
def _convert_units(self): """Convert units to geometrized units. Change to G=c=1 (geometrized) units for ease in calculations. """ self.m1 = self.m1*M_sun*ct.G/ct.c**2 self.m2 = self.m2*M_sun*ct.G/ct.c**2 initial_cond_type_conversion = { 'time': ct.c*ct.Julian_year, 'frequency': 1./ct.c, 'separation': ct.parsec, } self.initial_point = self.initial_point*initial_cond_type_conversion[self.initial_cond_type] self.t_obs = self.t_obs*ct.c*ct.Julian_year return
python
def _convert_units(self): """Convert units to geometrized units. Change to G=c=1 (geometrized) units for ease in calculations. """ self.m1 = self.m1*M_sun*ct.G/ct.c**2 self.m2 = self.m2*M_sun*ct.G/ct.c**2 initial_cond_type_conversion = { 'time': ct.c*ct.Julian_year, 'frequency': 1./ct.c, 'separation': ct.parsec, } self.initial_point = self.initial_point*initial_cond_type_conversion[self.initial_cond_type] self.t_obs = self.t_obs*ct.c*ct.Julian_year return
[ "def", "_convert_units", "(", "self", ")", ":", "self", ".", "m1", "=", "self", ".", "m1", "*", "M_sun", "*", "ct", ".", "G", "/", "ct", ".", "c", "**", "2", "self", ".", "m2", "=", "self", ".", "m2", "*", "M_sun", "*", "ct", ".", "G", "/",...
Convert units to geometrized units. Change to G=c=1 (geometrized) units for ease in calculations.
[ "Convert", "units", "to", "geometrized", "units", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L426-L443
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
EccentricBinaries._t_of_e
def _t_of_e(self, a0=None, t_start=None, f0=None, ef=None, t_obs=5.0): """Rearranged versions of Peters equations This function calculates the semi-major axis and eccentricity over time. """ if ef is None: ef = np.ones_like(self.e0)*0.0000001 beta = 64.0/5.0*self.m1*self.m2*(self.m1+self.m2) e_vals = np.asarray([np.linspace(ef[i], self.e0[i], self.num_points) for i in range(len(self.e0))]) integrand = self._find_integrand(e_vals) integral = np.asarray([np.trapz(integrand[:, i:], x=e_vals[:, i:]) for i in range(e_vals.shape[1])]).T if a0 is None and f0 is None: a0 = (19./12.*t_start*beta*1/integral[:, 0])**(1./4.) * self._f_e(e_vals[:, -1]) elif a0 is None: a0 = ((self.m1 + self.m2)/self.f0**2)**(1./3.) c0 = self._c0_func(a0, self.e0) a_vals = c0[:, np.newaxis]*self._f_e(e_vals) delta_t = 12./19*c0[:, np.newaxis]**4/beta[:, np.newaxis]*integral return e_vals, a_vals, delta_t
python
def _t_of_e(self, a0=None, t_start=None, f0=None, ef=None, t_obs=5.0): """Rearranged versions of Peters equations This function calculates the semi-major axis and eccentricity over time. """ if ef is None: ef = np.ones_like(self.e0)*0.0000001 beta = 64.0/5.0*self.m1*self.m2*(self.m1+self.m2) e_vals = np.asarray([np.linspace(ef[i], self.e0[i], self.num_points) for i in range(len(self.e0))]) integrand = self._find_integrand(e_vals) integral = np.asarray([np.trapz(integrand[:, i:], x=e_vals[:, i:]) for i in range(e_vals.shape[1])]).T if a0 is None and f0 is None: a0 = (19./12.*t_start*beta*1/integral[:, 0])**(1./4.) * self._f_e(e_vals[:, -1]) elif a0 is None: a0 = ((self.m1 + self.m2)/self.f0**2)**(1./3.) c0 = self._c0_func(a0, self.e0) a_vals = c0[:, np.newaxis]*self._f_e(e_vals) delta_t = 12./19*c0[:, np.newaxis]**4/beta[:, np.newaxis]*integral return e_vals, a_vals, delta_t
[ "def", "_t_of_e", "(", "self", ",", "a0", "=", "None", ",", "t_start", "=", "None", ",", "f0", "=", "None", ",", "ef", "=", "None", ",", "t_obs", "=", "5.0", ")", ":", "if", "ef", "is", "None", ":", "ef", "=", "np", ".", "ones_like", "(", "se...
Rearranged versions of Peters equations This function calculates the semi-major axis and eccentricity over time.
[ "Rearranged", "versions", "of", "Peters", "equations" ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L463-L493
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
EccentricBinaries._chirp_mass
def _chirp_mass(self): """Chirp mass calculation """ return (self.m1*self.m2)**(3./5.)/(self.m1+self.m2)**(1./5.)
python
def _chirp_mass(self): """Chirp mass calculation """ return (self.m1*self.m2)**(3./5.)/(self.m1+self.m2)**(1./5.)
[ "def", "_chirp_mass", "(", "self", ")", ":", "return", "(", "self", ".", "m1", "*", "self", ".", "m2", ")", "**", "(", "3.", "/", "5.", ")", "/", "(", "self", ".", "m1", "+", "self", ".", "m2", ")", "**", "(", "1.", "/", "5.", ")" ]
Chirp mass calculation
[ "Chirp", "mass", "calculation" ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L495-L499
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
EccentricBinaries._g_func
def _g_func(self): """Eq. 20 in Peters and Mathews 1963. """ return (self.n**4./32. * ((jv(self.n-2., self.n*self.e_vals) - 2. * self.e_vals*jv(self.n-1., self.n*self.e_vals) + 2./self.n * jv(self.n, self.n*self.e_vals) + 2.*self.e_vals*jv(self.n+1., self.n*self.e_vals) - jv(self.n+2., self.n*self.e_vals))**2. + (1.-self.e_vals**2.) * (jv(self.n-2., self.n*self.e_vals) - 2.*jv(self.n, self.n*self.e_vals) + jv(self.n+2., self.n*self.e_vals))**2. + 4./(3.*self.n**2.)*(jv(self.n, self.n*self.e_vals))**2.))
python
def _g_func(self): """Eq. 20 in Peters and Mathews 1963. """ return (self.n**4./32. * ((jv(self.n-2., self.n*self.e_vals) - 2. * self.e_vals*jv(self.n-1., self.n*self.e_vals) + 2./self.n * jv(self.n, self.n*self.e_vals) + 2.*self.e_vals*jv(self.n+1., self.n*self.e_vals) - jv(self.n+2., self.n*self.e_vals))**2. + (1.-self.e_vals**2.) * (jv(self.n-2., self.n*self.e_vals) - 2.*jv(self.n, self.n*self.e_vals) + jv(self.n+2., self.n*self.e_vals))**2. + 4./(3.*self.n**2.)*(jv(self.n, self.n*self.e_vals))**2.))
[ "def", "_g_func", "(", "self", ")", ":", "return", "(", "self", ".", "n", "**", "4.", "/", "32.", "*", "(", "(", "jv", "(", "self", ".", "n", "-", "2.", ",", "self", ".", "n", "*", "self", ".", "e_vals", ")", "-", "2.", "*", "self", ".", ...
Eq. 20 in Peters and Mathews 1963.
[ "Eq", ".", "20", "in", "Peters", "and", "Mathews", "1963", "." ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L508-L521
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
EccentricBinaries._dEndfr
def _dEndfr(self): """Eq. 4 from Orazio and Samsing (2018) Takes f in rest frame. """ Mc = self._chirp_mass() return (np.pi**(2./3.)*Mc**(5./3.)/(3.*(1.+self.z)**(1./3.) * (self.freqs_orb/(1.+self.z))**(1./3.))*(2./self.n)**(2./3.) * self._g_func()/self._f_func())
python
def _dEndfr(self): """Eq. 4 from Orazio and Samsing (2018) Takes f in rest frame. """ Mc = self._chirp_mass() return (np.pi**(2./3.)*Mc**(5./3.)/(3.*(1.+self.z)**(1./3.) * (self.freqs_orb/(1.+self.z))**(1./3.))*(2./self.n)**(2./3.) * self._g_func()/self._f_func())
[ "def", "_dEndfr", "(", "self", ")", ":", "Mc", "=", "self", ".", "_chirp_mass", "(", ")", "return", "(", "np", ".", "pi", "**", "(", "2.", "/", "3.", ")", "*", "Mc", "**", "(", "5.", "/", "3.", ")", "/", "(", "3.", "*", "(", "1.", "+", "s...
Eq. 4 from Orazio and Samsing (2018) Takes f in rest frame.
[ "Eq", ".", "4", "from", "Orazio", "and", "Samsing", "(", "2018", ")" ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L523-L532
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
EccentricBinaries._hcn_func
def _hcn_func(self): """Eq. 56 from Barack and Cutler 2004 """ self.hc = 1./(np.pi*self.dist)*np.sqrt(2.*self._dEndfr()) return
python
def _hcn_func(self): """Eq. 56 from Barack and Cutler 2004 """ self.hc = 1./(np.pi*self.dist)*np.sqrt(2.*self._dEndfr()) return
[ "def", "_hcn_func", "(", "self", ")", ":", "self", ".", "hc", "=", "1.", "/", "(", "np", ".", "pi", "*", "self", ".", "dist", ")", "*", "np", ".", "sqrt", "(", "2.", "*", "self", ".", "_dEndfr", "(", ")", ")", "return" ]
Eq. 56 from Barack and Cutler 2004
[ "Eq", ".", "56", "from", "Barack", "and", "Cutler", "2004" ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L534-L539
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
EccentricBinaries._create_waveforms
def _create_waveforms(self): """Create the eccentric waveforms """ # find eccentricity and semi major axis over time until e=0. e_vals, a_vals, t_vals = self._t_of_e(a0=self.a0, f0=self.f0, t_start=self.t_start, ef=None, t_obs=self.t_obs) f_mrg = 0.02/(self.m1 + self.m2) a_mrg = ((self.m1+self.m2)/f_mrg**2)**(1/3) # limit highest frequency to ISCO even though this is not innermost orbit for eccentric # binaries # find where binary goes farther than observation time or merger frequency limit. a_ind_start = np.asarray([np.where(a_vals[i] > a_mrg[i])[0][0] for i in range(len(a_vals))]) t_ind_start = np.asarray([np.where(t_vals[i] < self.t_obs[i])[0][0] for i in range(len(t_vals))]) ind_start = (a_ind_start*(a_ind_start >= t_ind_start) + t_ind_start*(a_ind_start < t_ind_start)) self.ef = np.asarray([e_vals[i][ind] for i, ind in enumerate(ind_start)]) # higher resolution over the eccentricities seen during observation self.e_vals, self.a_vals, self.t_vals = self._t_of_e(a0=a_vals[:, -1], ef=self.ef, t_obs=self.t_obs) self.freqs_orb = np.sqrt((self.m1[:, np.newaxis]+self.m2[:, np.newaxis])/self.a_vals**3) # tile for efficient calculation across modes. for attr in ['e_vals', 'a_vals', 't_vals', 'freqs_orb']: arr = getattr(self, attr) new_arr = (np.flip( np.tile(arr, self.n_max).reshape(len(arr)*self.n_max, len(arr[0])), -1)) setattr(self, attr, new_arr) for attr in ['m1', 'm2', 'z', 'dist']: arr = getattr(self, attr) new_arr = np.repeat(arr, self.n_max)[:, np.newaxis] setattr(self, attr, new_arr) # setup modes self.n = np.tile(np.arange(1, self.n_max + 1), self.length)[:, np.newaxis] self._hcn_func() # reshape hc self.hc = self.hc.reshape(self.length, self.n_max, self.hc.shape[-1]) self.freqs = np.reshape(self.n*self.freqs_orb/(1+self.z) * ct.c, (self.length, self.n_max, self.freqs_orb.shape[-1])) self.hc, self.freqs = np.squeeze(self.hc), np.squeeze(self.freqs) return
python
def _create_waveforms(self): """Create the eccentric waveforms """ # find eccentricity and semi major axis over time until e=0. e_vals, a_vals, t_vals = self._t_of_e(a0=self.a0, f0=self.f0, t_start=self.t_start, ef=None, t_obs=self.t_obs) f_mrg = 0.02/(self.m1 + self.m2) a_mrg = ((self.m1+self.m2)/f_mrg**2)**(1/3) # limit highest frequency to ISCO even though this is not innermost orbit for eccentric # binaries # find where binary goes farther than observation time or merger frequency limit. a_ind_start = np.asarray([np.where(a_vals[i] > a_mrg[i])[0][0] for i in range(len(a_vals))]) t_ind_start = np.asarray([np.where(t_vals[i] < self.t_obs[i])[0][0] for i in range(len(t_vals))]) ind_start = (a_ind_start*(a_ind_start >= t_ind_start) + t_ind_start*(a_ind_start < t_ind_start)) self.ef = np.asarray([e_vals[i][ind] for i, ind in enumerate(ind_start)]) # higher resolution over the eccentricities seen during observation self.e_vals, self.a_vals, self.t_vals = self._t_of_e(a0=a_vals[:, -1], ef=self.ef, t_obs=self.t_obs) self.freqs_orb = np.sqrt((self.m1[:, np.newaxis]+self.m2[:, np.newaxis])/self.a_vals**3) # tile for efficient calculation across modes. for attr in ['e_vals', 'a_vals', 't_vals', 'freqs_orb']: arr = getattr(self, attr) new_arr = (np.flip( np.tile(arr, self.n_max).reshape(len(arr)*self.n_max, len(arr[0])), -1)) setattr(self, attr, new_arr) for attr in ['m1', 'm2', 'z', 'dist']: arr = getattr(self, attr) new_arr = np.repeat(arr, self.n_max)[:, np.newaxis] setattr(self, attr, new_arr) # setup modes self.n = np.tile(np.arange(1, self.n_max + 1), self.length)[:, np.newaxis] self._hcn_func() # reshape hc self.hc = self.hc.reshape(self.length, self.n_max, self.hc.shape[-1]) self.freqs = np.reshape(self.n*self.freqs_orb/(1+self.z) * ct.c, (self.length, self.n_max, self.freqs_orb.shape[-1])) self.hc, self.freqs = np.squeeze(self.hc), np.squeeze(self.freqs) return
[ "def", "_create_waveforms", "(", "self", ")", ":", "# find eccentricity and semi major axis over time until e=0.", "e_vals", ",", "a_vals", ",", "t_vals", "=", "self", ".", "_t_of_e", "(", "a0", "=", "self", ".", "a0", ",", "f0", "=", "self", ".", "f0", ",", ...
Create the eccentric waveforms
[ "Create", "the", "eccentric", "waveforms" ]
train
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L541-L597
jbloomlab/phydms
phydmslib/simulate.py
pyvolvePartitions
def pyvolvePartitions(model, divselection=None): """Get list of `pyvolve` partitions for `model`. Args: `model` (`phydmslib.models.Models` object) The model used for the simulations. Currently only certain `Models` are supported (e.g., `YNGKP`, `ExpCM`) `divselection` (`None` or 2-tuple `(divomega, divsites)`) Set this option if you want to simulate a subset of sites as under diversifying selection (e.g., an `omega` different than that used by `model`. In this case, `divomega` is the omega for this subset of sites, and `divsites` is a list of the sites in 1, 2, ... numbering. Returns: `partitions` (`list` of `pyvolve.Partition` objects) Can be fed into `pyvolve.Evolver` to simulate evolution. """ codons = pyvolve.genetics.Genetics().codons codon_dict = pyvolve.genetics.Genetics().codon_dict pyrims = pyvolve.genetics.Genetics().pyrims purines = pyvolve.genetics.Genetics().purines if divselection: (divomega, divsites) = divselection else: divsites = [] assert all([1 <= r <= model.nsites for r in divsites]) partitions = [] for r in range(model.nsites): matrix = scipy.zeros((len(codons), len(codons)), dtype='float') for (xi, x) in enumerate(codons): for (yi, y) in enumerate(codons): ntdiffs = [(x[j], y[j]) for j in range(3) if x[j] != y[j]] if len(ntdiffs) == 1: (xnt, ynt) = ntdiffs[0] qxy = 1.0 if (xnt in purines) == (ynt in purines): qxy *= model.kappa (xaa, yaa) = (codon_dict[x], codon_dict[y]) fxy = 1.0 if xaa != yaa: if type(model) == phydmslib.models.ExpCM_empirical_phi_divpressure: fxy *= model.omega * (1 + model.omega2 * model.deltar[r]) elif r + 1 in divsites: fxy *= divomega else: fxy *= model.omega if type(model) in [phydmslib.models.ExpCM, phydmslib.models.ExpCM_empirical_phi, phydmslib.models.ExpCM_empirical_phi_divpressure]: qxy *= model.phi[NT_TO_INDEX[ynt]] pix = model.pi[r][AA_TO_INDEX[xaa]]**model.beta piy = model.pi[r][AA_TO_INDEX[yaa]]**model.beta if abs(pix - piy) > ALMOST_ZERO: fxy *= math.log(piy / pix) / (1.0 - pix / piy) elif type(model) == phydmslib.models.YNGKP_M0: for p in range(3): qxy *= model.phi[p][NT_TO_INDEX[y[p]]] else: raise ValueError("Can't handle model type {0}".format( type(model))) matrix[xi][yi] = model.mu * qxy * fxy matrix[xi][xi] = -matrix[xi].sum() # create model in way that captures annoying print statements in pyvolve old_stdout = sys.stdout sys.stdout = open(os.devnull, 'w') try: m = pyvolve.Model("custom", {"matrix":matrix}) finally: sys.stdout.close() sys.stdout = old_stdout partitions.append(pyvolve.Partition(models=m, size=1)) return partitions
python
def pyvolvePartitions(model, divselection=None): """Get list of `pyvolve` partitions for `model`. Args: `model` (`phydmslib.models.Models` object) The model used for the simulations. Currently only certain `Models` are supported (e.g., `YNGKP`, `ExpCM`) `divselection` (`None` or 2-tuple `(divomega, divsites)`) Set this option if you want to simulate a subset of sites as under diversifying selection (e.g., an `omega` different than that used by `model`. In this case, `divomega` is the omega for this subset of sites, and `divsites` is a list of the sites in 1, 2, ... numbering. Returns: `partitions` (`list` of `pyvolve.Partition` objects) Can be fed into `pyvolve.Evolver` to simulate evolution. """ codons = pyvolve.genetics.Genetics().codons codon_dict = pyvolve.genetics.Genetics().codon_dict pyrims = pyvolve.genetics.Genetics().pyrims purines = pyvolve.genetics.Genetics().purines if divselection: (divomega, divsites) = divselection else: divsites = [] assert all([1 <= r <= model.nsites for r in divsites]) partitions = [] for r in range(model.nsites): matrix = scipy.zeros((len(codons), len(codons)), dtype='float') for (xi, x) in enumerate(codons): for (yi, y) in enumerate(codons): ntdiffs = [(x[j], y[j]) for j in range(3) if x[j] != y[j]] if len(ntdiffs) == 1: (xnt, ynt) = ntdiffs[0] qxy = 1.0 if (xnt in purines) == (ynt in purines): qxy *= model.kappa (xaa, yaa) = (codon_dict[x], codon_dict[y]) fxy = 1.0 if xaa != yaa: if type(model) == phydmslib.models.ExpCM_empirical_phi_divpressure: fxy *= model.omega * (1 + model.omega2 * model.deltar[r]) elif r + 1 in divsites: fxy *= divomega else: fxy *= model.omega if type(model) in [phydmslib.models.ExpCM, phydmslib.models.ExpCM_empirical_phi, phydmslib.models.ExpCM_empirical_phi_divpressure]: qxy *= model.phi[NT_TO_INDEX[ynt]] pix = model.pi[r][AA_TO_INDEX[xaa]]**model.beta piy = model.pi[r][AA_TO_INDEX[yaa]]**model.beta if abs(pix - piy) > ALMOST_ZERO: fxy *= math.log(piy / pix) / (1.0 - pix / piy) elif type(model) == phydmslib.models.YNGKP_M0: for p in range(3): qxy *= model.phi[p][NT_TO_INDEX[y[p]]] else: raise ValueError("Can't handle model type {0}".format( type(model))) matrix[xi][yi] = model.mu * qxy * fxy matrix[xi][xi] = -matrix[xi].sum() # create model in way that captures annoying print statements in pyvolve old_stdout = sys.stdout sys.stdout = open(os.devnull, 'w') try: m = pyvolve.Model("custom", {"matrix":matrix}) finally: sys.stdout.close() sys.stdout = old_stdout partitions.append(pyvolve.Partition(models=m, size=1)) return partitions
[ "def", "pyvolvePartitions", "(", "model", ",", "divselection", "=", "None", ")", ":", "codons", "=", "pyvolve", ".", "genetics", ".", "Genetics", "(", ")", ".", "codons", "codon_dict", "=", "pyvolve", ".", "genetics", ".", "Genetics", "(", ")", ".", "cod...
Get list of `pyvolve` partitions for `model`. Args: `model` (`phydmslib.models.Models` object) The model used for the simulations. Currently only certain `Models` are supported (e.g., `YNGKP`, `ExpCM`) `divselection` (`None` or 2-tuple `(divomega, divsites)`) Set this option if you want to simulate a subset of sites as under diversifying selection (e.g., an `omega` different than that used by `model`. In this case, `divomega` is the omega for this subset of sites, and `divsites` is a list of the sites in 1, 2, ... numbering. Returns: `partitions` (`list` of `pyvolve.Partition` objects) Can be fed into `pyvolve.Evolver` to simulate evolution.
[ "Get", "list", "of", "pyvolve", "partitions", "for", "model", "." ]
train
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/simulate.py#L18-L95