hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72e6c8ddb3a2702133558af850f4622b3f800aa | 4,987 | py | Python | files_sdk/models/style.py | Files-com/files-sdk-python | 84cedc9be099cd9e4db6249ef7a9d60595487090 | [
"MIT"
] | 14 | 2020-08-05T15:48:06.000Z | 2021-08-18T13:13:39.000Z | files_sdk/models/style.py | Files-com/files-sdk-python | 84cedc9be099cd9e4db6249ef7a9d60595487090 | [
"MIT"
] | 4 | 2020-10-30T14:49:25.000Z | 2021-09-29T17:11:53.000Z | files_sdk/models/style.py | Files-com/files-sdk-python | 84cedc9be099cd9e4db6249ef7a9d60595487090 | [
"MIT"
] | null | null | null | import builtins
import datetime
from files_sdk.api import Api
from files_sdk.exceptions import InvalidParameterError, MissingParameterError, NotImplementedError
class Style:
default_attributes = {
'id': None, # int64 - Style ID
'path': None, # string - Folder path This must be slash-delimited, but it must neither start nor end with a slash. Maximum of 5000 characters.
'logo': None, # Logo
'thumbnail': None, # Logo thumbnail
'file': None, # file - Logo for custom branding.
}
def __init__(self, attributes=None, options=None):
if not isinstance(attributes, dict):
attributes = {}
if not isinstance(options, dict):
options = {}
self.set_attributes(attributes)
self.options = options
def set_attributes(self, attributes):
for (attribute, default_value) in Style.default_attributes.items():
setattr(self, attribute, attributes.get(attribute, default_value))
def get_attributes(self):
return {k: getattr(self, k, None) for k in Style.default_attributes if getattr(self, k, None) is not None}
# Parameters:
# file (required) - file - Logo for custom branding.
def update(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "path") and self.path:
params['path'] = self.path
else:
raise MissingParameterError("Current object doesn't have a path")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
if "file" not in params:
raise MissingParameterError("Parameter missing: file")
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
response, _options = Api.send_request("PATCH", "/styles/{path}".format(path=params['path']), params, self.options)
return response.data
def delete(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "path") and self.path:
params['path'] = self.path
else:
raise MissingParameterError("Current object doesn't have a path")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
response, _options = Api.send_request("DELETE", "/styles/{path}".format(path=params['path']), params, self.options)
return response.data
def destroy(self, params = None):
self.delete(params)
def save(self):
self.update(self.get_attributes())
# Parameters:
# path (required) - string - Style path.
def find(path, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["path"] = path
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
response, options = Api.send_request("GET", "/styles/{path}".format(path=params['path']), params, options)
return Style(response.data, options)
def get(path, params = None, options = None):
find(path, params, options)
# Parameters:
# file (required) - file - Logo for custom branding.
def update(path, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["path"] = path
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
if "file" not in params:
raise MissingParameterError("Parameter missing: file")
response, options = Api.send_request("PATCH", "/styles/{path}".format(path=params['path']), params, options)
return Style(response.data, options)
def delete(path, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["path"] = path
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
response, _options = Api.send_request("DELETE", "/styles/{path}".format(path=params['path']), params, options)
return response.data
def destroy(path, params = None, options = None):
delete(path, params, options)
def new(*args, **kwargs):
return Style(*args, **kwargs) | 40.877049 | 154 | 0.643674 | import builtins
import datetime
from files_sdk.api import Api
from files_sdk.exceptions import InvalidParameterError, MissingParameterError, NotImplementedError
class Style:
default_attributes = {
'id': None,
'path': None,
'logo': None,
'thumbnail': None,
'file': None,
}
def __init__(self, attributes=None, options=None):
if not isinstance(attributes, dict):
attributes = {}
if not isinstance(options, dict):
options = {}
self.set_attributes(attributes)
self.options = options
def set_attributes(self, attributes):
for (attribute, default_value) in Style.default_attributes.items():
setattr(self, attribute, attributes.get(attribute, default_value))
def get_attributes(self):
return {k: getattr(self, k, None) for k in Style.default_attributes if getattr(self, k, None) is not None}
def update(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "path") and self.path:
params['path'] = self.path
else:
raise MissingParameterError("Current object doesn't have a path")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
if "file" not in params:
raise MissingParameterError("Parameter missing: file")
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
response, _options = Api.send_request("PATCH", "/styles/{path}".format(path=params['path']), params, self.options)
return response.data
def delete(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "path") and self.path:
params['path'] = self.path
else:
raise MissingParameterError("Current object doesn't have a path")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
response, _options = Api.send_request("DELETE", "/styles/{path}".format(path=params['path']), params, self.options)
return response.data
def destroy(self, params = None):
self.delete(params)
def save(self):
self.update(self.get_attributes())
def find(path, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["path"] = path
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
response, options = Api.send_request("GET", "/styles/{path}".format(path=params['path']), params, options)
return Style(response.data, options)
def get(path, params = None, options = None):
find(path, params, options)
def update(path, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["path"] = path
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
if "file" not in params:
raise MissingParameterError("Parameter missing: file")
response, options = Api.send_request("PATCH", "/styles/{path}".format(path=params['path']), params, options)
return Style(response.data, options)
def delete(path, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["path"] = path
if "path" in params and not isinstance(params["path"], str):
raise InvalidParameterError("Bad parameter: path must be an str")
if "path" not in params:
raise MissingParameterError("Parameter missing: path")
response, _options = Api.send_request("DELETE", "/styles/{path}".format(path=params['path']), params, options)
return response.data
def destroy(path, params = None, options = None):
delete(path, params, options)
def new(*args, **kwargs):
return Style(*args, **kwargs) | true | true |
f72e6de87d8b0bd0845f41e8704948a16523c24b | 33,336 | py | Python | scipy/io/matlab/mio5.py | ikamensh/scipy | d645404be21b7c0b1e7ba24bf8d525b624aeb848 | [
"BSD-3-Clause"
] | null | null | null | scipy/io/matlab/mio5.py | ikamensh/scipy | d645404be21b7c0b1e7ba24bf8d525b624aeb848 | [
"BSD-3-Clause"
] | null | null | null | scipy/io/matlab/mio5.py | ikamensh/scipy | d645404be21b7c0b1e7ba24bf8d525b624aeb848 | [
"BSD-3-Clause"
] | null | null | null | ''' Classes for read / write of matlab (TM) 5 files
The matfile specification last found here:
https://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf
(as of December 5 2008)
'''
'''
=================================
Note on functions and mat files
=================================
The document above does not give any hints as to the storage of matlab
function handles, or anonymous function handles. I had, therefore, to
guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and
``mxOPAQUE_CLASS`` by looking at example mat files.
``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to
contain a struct matrix with a set pattern of fields. For anonymous
functions, a sub-fields of one of these fields seems to contain the
well-named ``mxOPAQUE_CLASS``. This seems to contain:
* array flags as for any matlab matrix
* 3 int8 strings
* a matrix
It seems that whenever the mat file contains a ``mxOPAQUE_CLASS``
instance, there is also an un-named matrix (name == '') at the end of
the mat file. I'll call this the ``__function_workspace__`` matrix.
When I saved two anonymous functions in a mat file, or appended another
anonymous function to the mat file, there was still only one
``__function_workspace__`` un-named matrix at the end, but larger than
that for a mat file with a single anonymous function, suggesting that
the workspaces for the two functions had been merged.
The ``__function_workspace__`` matrix appears to be of double class
(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in
the format of a mini .mat file, without the first 124 bytes of the file
header (the description and the subsystem_offset), but with the version
U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes,
presumably for 8 byte padding, and then a series of ``miMATRIX``
entries, as in a standard mat file. The ``miMATRIX`` entries appear to
be series of un-named (name == '') matrices, and may also contain arrays
of this same mini-mat format.
I guess that:
* saving an anonymous function back to a mat file will need the
associated ``__function_workspace__`` matrix saved as well for the
anonymous function to work correctly.
* appending to a mat file that has a ``__function_workspace__`` would
involve first pulling off this workspace, appending, checking whether
there were any more anonymous functions appended, and then somehow
merging the relevant workspaces, and saving at the end of the mat
file.
The mat files I was playing with are in ``tests/data``:
* sqr.mat
* parabola.mat
* some_functions.mat
See ``tests/test_mio.py:test_mio_funcs.py`` for the debugging
script I was working with.
'''
# Small fragments of current code adapted from matfile.py by Heiko
# Henkelmann; parts of the code for simplify_cells=True adapted from
# http://blog.nephics.com/2019/08/28/better-loadmat-for-scipy/.
import os
import time
import sys
import zlib
from io import BytesIO
import warnings
import numpy as np
from numpy.compat import asbytes, asstr
import scipy.sparse
from .byteordercodes import native_code, swapped_code
from .miobase import (MatFileReader, docfiller, matdims, read_dtype,
arr_to_chars, arr_dtype_number, MatWriteError,
MatReadError, MatReadWarning)
# Reader object for matlab 5 format variables
from .mio5_utils import VarReader5
# Constants and helper objects
from .mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES,
NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8,
miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS,
mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS,
mxDOUBLE_CLASS, mclass_info, mat_struct)
from .streams import ZlibInputStream
def _has_struct(elem):
"""Determine if elem is an array and if first array item is a struct."""
return (isinstance(elem, np.ndarray) and (elem.size > 0) and
isinstance(elem[0], mat_struct))
def _inspect_cell_array(ndarray):
"""Construct lists from cell arrays (loaded as numpy ndarrays), recursing
into items if they contain mat_struct objects."""
elem_list = []
for sub_elem in ndarray:
if isinstance(sub_elem, mat_struct):
elem_list.append(_matstruct_to_dict(sub_elem))
elif _has_struct(sub_elem):
elem_list.append(_inspect_cell_array(sub_elem))
else:
elem_list.append(sub_elem)
return elem_list
def _matstruct_to_dict(matobj):
"""Construct nested dicts from mat_struct objects."""
d = {}
for f in matobj._fieldnames:
elem = matobj.__dict__[f]
if isinstance(elem, mat_struct):
d[f] = _matstruct_to_dict(elem)
elif _has_struct(elem):
d[f] = _inspect_cell_array(elem)
else:
d[f] = elem
return d
def _simplify_cells(d):
"""Convert mat objects in dict to nested dicts."""
for key in d:
if isinstance(d[key], mat_struct):
d[key] = _matstruct_to_dict(d[key])
elif _has_struct(d[key]):
d[key] = _inspect_cell_array(d[key])
return d
class MatFile5Reader(MatFileReader):
''' Reader for Mat 5 mat files
Adds the following attribute to base class
uint16_codec - char codec to use for uint16 char arrays
(defaults to system default codec)
Uses variable reader that has the following stardard interface (see
abstract class in ``miobase``::
__init__(self, file_reader)
read_header(self)
array_from_header(self)
and added interface::
set_stream(self, stream)
read_full_tag(self)
'''
@docfiller
def __init__(self,
mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
verify_compressed_data_integrity=True,
uint16_codec=None,
simplify_cells=False):
'''Initializer for matlab 5 file format reader
%(matstream_arg)s
%(load_args)s
%(struct_arg)s
uint16_codec : {None, string}
Set codec to use for uint16 char arrays (e.g., 'utf-8').
Use system default codec if None
'''
super(MatFile5Reader, self).__init__(
mat_stream,
byte_order,
mat_dtype,
squeeze_me,
chars_as_strings,
matlab_compatible,
struct_as_record,
verify_compressed_data_integrity,
simplify_cells)
# Set uint16 codec
if not uint16_codec:
uint16_codec = sys.getdefaultencoding()
self.uint16_codec = uint16_codec
# placeholders for readers - see initialize_read method
self._file_reader = None
self._matrix_reader = None
def guess_byte_order(self):
''' Guess byte order.
Sets stream pointer to 0 '''
self.mat_stream.seek(126)
mi = self.mat_stream.read(2)
self.mat_stream.seek(0)
return mi == b'IM' and '<' or '>'
def read_file_header(self):
''' Read in mat 5 file header '''
hdict = {}
hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header']
hdr = read_dtype(self.mat_stream, hdr_dtype)
hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000')
v_major = hdr['version'] >> 8
v_minor = hdr['version'] & 0xFF
hdict['__version__'] = '%d.%d' % (v_major, v_minor)
return hdict
def initialize_read(self):
''' Run when beginning read of variables
Sets up readers from parameters in `self`
'''
# reader for top level stream. We need this extra top-level
# reader because we use the matrix_reader object to contain
# compressed matrices (so they have their own stream)
self._file_reader = VarReader5(self)
# reader for matrix streams
self._matrix_reader = VarReader5(self)
def read_var_header(self):
''' Read header, return header, next position
Header has to define at least .name and .is_global
Parameters
----------
None
Returns
-------
header : object
object that can be passed to self.read_var_array, and that
has attributes .name and .is_global
next_position : int
position in stream of next variable
'''
mdtype, byte_count = self._file_reader.read_full_tag()
if not byte_count > 0:
raise ValueError("Did not read any bytes")
next_pos = self.mat_stream.tell() + byte_count
if mdtype == miCOMPRESSED:
# Make new stream from compressed data
stream = ZlibInputStream(self.mat_stream, byte_count)
self._matrix_reader.set_stream(stream)
check_stream_limit = self.verify_compressed_data_integrity
mdtype, byte_count = self._matrix_reader.read_full_tag()
else:
check_stream_limit = False
self._matrix_reader.set_stream(self.mat_stream)
if not mdtype == miMATRIX:
raise TypeError('Expecting miMATRIX type here, got %d' % mdtype)
header = self._matrix_reader.read_header(check_stream_limit)
return header, next_pos
def read_var_array(self, header, process=True):
''' Read array, given `header`
Parameters
----------
header : header object
object with fields defining variable header
process : {True, False} bool, optional
If True, apply recursive post-processing during loading of
array.
Returns
-------
arr : array
array with post-processing applied or not according to
`process`.
'''
return self._matrix_reader.array_from_header(header, process)
def get_variables(self, variable_names=None):
''' get variables from stream as dictionary
variable_names - optional list of variable names to get
If variable_names is None, then get all variables in file
'''
if isinstance(variable_names, str):
variable_names = [variable_names]
elif variable_names is not None:
variable_names = list(variable_names)
self.mat_stream.seek(0)
# Here we pass all the parameters in self to the reading objects
self.initialize_read()
mdict = self.read_file_header()
mdict['__globals__'] = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = asstr(hdr.name)
if name in mdict:
warnings.warn('Duplicate variable name "%s" in stream'
' - replacing previous with new\n'
'Consider mio5.varmats_from_mat to split '
'file into single variable files' % name,
MatReadWarning, stacklevel=2)
if name == '':
# can only be a matlab 7 function workspace
name = '__function_workspace__'
# We want to keep this raw because mat_dtype processing
# will break the format (uint8 as mxDOUBLE_CLASS)
process = False
else:
process = True
if variable_names is not None and name not in variable_names:
self.mat_stream.seek(next_position)
continue
try:
res = self.read_var_array(hdr, process)
except MatReadError as err:
warnings.warn(
f'Unreadable variable "{name}", because "{err}"',
Warning, stacklevel=2)
res = f"Read error: {err}"
self.mat_stream.seek(next_position)
mdict[name] = res
if hdr.is_global:
mdict['__globals__'].append(name)
if variable_names is not None:
variable_names.remove(name)
if len(variable_names) == 0:
break
if self.simplify_cells:
return _simplify_cells(mdict)
else:
return mdict
def list_variables(self):
''' list variables from stream '''
self.mat_stream.seek(0)
# Here we pass all the parameters in self to the reading objects
self.initialize_read()
self.read_file_header()
vars = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = asstr(hdr.name)
if name == '':
# can only be a matlab 7 function workspace
name = '__function_workspace__'
shape = self._matrix_reader.shape_from_header(hdr)
if hdr.is_logical:
info = 'logical'
else:
info = mclass_info.get(hdr.mclass, 'unknown')
vars.append((name, shape, info))
self.mat_stream.seek(next_position)
return vars
def varmats_from_mat(file_obj):
""" Pull variables out of mat 5 file as a sequence of mat file objects
This can be useful with a difficult mat file, containing unreadable
variables. This routine pulls the variables out in raw form and puts them,
unread, back into a file stream for saving or reading. Another use is the
pathological case where there is more than one variable of the same name in
the file; this routine returns the duplicates, whereas the standard reader
will overwrite duplicates in the returned dictionary.
The file pointer in `file_obj` will be undefined. File pointers for the
returned file-like objects are set at 0.
Parameters
----------
file_obj : file-like
file object containing mat file
Returns
-------
named_mats : list
list contains tuples of (name, BytesIO) where BytesIO is a file-like
object containing mat file contents as for a single variable. The
BytesIO contains a string with the original header and a single var. If
``var_file_obj`` is an individual BytesIO instance, then save as a mat
file with something like ``open('test.mat',
'wb').write(var_file_obj.read())``
Examples
--------
>>> import scipy.io
BytesIO is from the ``io`` module in Python 3, and is ``cStringIO`` for
Python < 3.
>>> mat_fileobj = BytesIO()
>>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'})
>>> varmats = varmats_from_mat(mat_fileobj)
>>> sorted([name for name, str_obj in varmats])
['a', 'b']
"""
rdr = MatFile5Reader(file_obj)
file_obj.seek(0)
# Raw read of top-level file header
hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize
raw_hdr = file_obj.read(hdr_len)
# Initialize variable reading
file_obj.seek(0)
rdr.initialize_read()
rdr.read_file_header()
next_position = file_obj.tell()
named_mats = []
while not rdr.end_of_stream():
start_position = next_position
hdr, next_position = rdr.read_var_header()
name = asstr(hdr.name)
# Read raw variable string
file_obj.seek(start_position)
byte_count = next_position - start_position
var_str = file_obj.read(byte_count)
# write to stringio object
out_obj = BytesIO()
out_obj.write(raw_hdr)
out_obj.write(var_str)
out_obj.seek(0)
named_mats.append((name, out_obj))
return named_mats
class EmptyStructMarker(object):
""" Class to indicate presence of empty matlab struct on output """
def to_writeable(source):
''' Convert input object ``source`` to something we can write
Parameters
----------
source : object
Returns
-------
arr : None or ndarray or EmptyStructMarker
If `source` cannot be converted to something we can write to a matfile,
return None. If `source` is equivalent to an empty dictionary, return
``EmptyStructMarker``. Otherwise return `source` converted to an
ndarray with contents for writing to matfile.
'''
if isinstance(source, np.ndarray):
return source
if source is None:
return None
# Objects that implement mappings
is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and
hasattr(source, 'items'))
# Objects that don't implement mappings, but do have dicts
if isinstance(source, np.generic):
# NumPy scalars are never mappings (PyPy issue workaround)
pass
elif not is_mapping and hasattr(source, '__dict__'):
source = dict((key, value) for key, value in source.__dict__.items()
if not key.startswith('_'))
is_mapping = True
if is_mapping:
dtype = []
values = []
for field, value in source.items():
if (isinstance(field, str) and
field[0] not in '_0123456789'):
dtype.append((str(field), object))
values.append(value)
if dtype:
return np.array([tuple(values)], dtype)
else:
return EmptyStructMarker
# Next try and convert to an array
narr = np.asanyarray(source)
if narr.dtype.type in (object, np.object_) and \
narr.shape == () and narr == source:
# No interesting conversion possible
return None
return narr
# Native byte ordered dtypes for convenience for writers
NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header']
NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full']
NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata']
NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags']
class VarWriter5(object):
''' Generic matlab matrix writing class '''
mat_tag = np.zeros((), NDT_TAG_FULL)
mat_tag['mdtype'] = miMATRIX
def __init__(self, file_writer):
self.file_stream = file_writer.file_stream
self.unicode_strings = file_writer.unicode_strings
self.long_field_names = file_writer.long_field_names
self.oned_as = file_writer.oned_as
# These are used for top level writes, and unset after
self._var_name = None
self._var_is_global = False
def write_bytes(self, arr):
self.file_stream.write(arr.tobytes(order='F'))
def write_string(self, s):
self.file_stream.write(s)
def write_element(self, arr, mdtype=None):
''' write tag and data '''
if mdtype is None:
mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]
# Array needs to be in native byte order
if arr.dtype.byteorder == swapped_code:
arr = arr.byteswap().newbyteorder()
byte_count = arr.size*arr.itemsize
if byte_count <= 4:
self.write_smalldata_element(arr, mdtype, byte_count)
else:
self.write_regular_element(arr, mdtype, byte_count)
def write_smalldata_element(self, arr, mdtype, byte_count):
# write tag with embedded data
tag = np.zeros((), NDT_TAG_SMALL)
tag['byte_count_mdtype'] = (byte_count << 16) + mdtype
# if arr.tobytes is < 4, the element will be zero-padded as needed.
tag['data'] = arr.tobytes(order='F')
self.write_bytes(tag)
def write_regular_element(self, arr, mdtype, byte_count):
# write tag, data
tag = np.zeros((), NDT_TAG_FULL)
tag['mdtype'] = mdtype
tag['byte_count'] = byte_count
self.write_bytes(tag)
self.write_bytes(arr)
# pad to next 64-bit boundary
bc_mod_8 = byte_count % 8
if bc_mod_8:
self.file_stream.write(b'\x00' * (8-bc_mod_8))
def write_header(self,
shape,
mclass,
is_complex=False,
is_logical=False,
nzmax=0):
''' Write header for given data options
shape : sequence
array shape
mclass - mat5 matrix class
is_complex - True if matrix is complex
is_logical - True if matrix is logical
nzmax - max non zero elements for sparse arrays
We get the name and the global flag from the object, and reset
them to defaults after we've used them
'''
# get name and is_global from one-shot object store
name = self._var_name
is_global = self._var_is_global
# initialize the top-level matrix tag, store position
self._mat_tag_pos = self.file_stream.tell()
self.write_bytes(self.mat_tag)
# write array flags (complex, global, logical, class, nzmax)
af = np.zeros((), NDT_ARRAY_FLAGS)
af['data_type'] = miUINT32
af['byte_count'] = 8
flags = is_complex << 3 | is_global << 2 | is_logical << 1
af['flags_class'] = mclass | flags << 8
af['nzmax'] = nzmax
self.write_bytes(af)
# shape
self.write_element(np.array(shape, dtype='i4'))
# write name
name = np.asarray(name)
if name == '': # empty string zero-terminated
self.write_smalldata_element(name, miINT8, 0)
else:
self.write_element(name, miINT8)
# reset the one-shot store to defaults
self._var_name = ''
self._var_is_global = False
def update_matrix_tag(self, start_pos):
curr_pos = self.file_stream.tell()
self.file_stream.seek(start_pos)
byte_count = curr_pos - start_pos - 8
if byte_count >= 2**32:
raise MatWriteError("Matrix too large to save with Matlab "
"5 format")
self.mat_tag['byte_count'] = byte_count
self.write_bytes(self.mat_tag)
self.file_stream.seek(curr_pos)
def write_top(self, arr, name, is_global):
""" Write variable at top level of mat file
Parameters
----------
arr : array_like
array-like object to create writer for
name : str, optional
name as it will appear in matlab workspace
default is empty string
is_global : {False, True}, optional
whether variable will be global on load into matlab
"""
# these are set before the top-level header write, and unset at
# the end of the same write, because they do not apply for lower levels
self._var_is_global = is_global
self._var_name = name
# write the header and data
self.write(arr)
def write(self, arr):
''' Write `arr` to stream at top and sub levels
Parameters
----------
arr : array_like
array-like object to create writer for
'''
# store position, so we can update the matrix tag
mat_tag_pos = self.file_stream.tell()
# First check if these are sparse
if scipy.sparse.issparse(arr):
self.write_sparse(arr)
self.update_matrix_tag(mat_tag_pos)
return
# Try to convert things that aren't arrays
narr = to_writeable(arr)
if narr is None:
raise TypeError('Could not convert %s (type %s) to array'
% (arr, type(arr)))
if isinstance(narr, MatlabObject):
self.write_object(narr)
elif isinstance(narr, MatlabFunction):
raise MatWriteError('Cannot write matlab functions')
elif narr is EmptyStructMarker: # empty struct array
self.write_empty_struct()
elif narr.dtype.fields: # struct array
self.write_struct(narr)
elif narr.dtype.hasobject: # cell array
self.write_cells(narr)
elif narr.dtype.kind in ('U', 'S'):
if self.unicode_strings:
codec = 'UTF8'
else:
codec = 'ascii'
self.write_char(narr, codec)
else:
self.write_numeric(narr)
self.update_matrix_tag(mat_tag_pos)
def write_numeric(self, arr):
imagf = arr.dtype.kind == 'c'
logif = arr.dtype.kind == 'b'
try:
mclass = NP_TO_MXTYPES[arr.dtype.str[1:]]
except KeyError:
# No matching matlab type, probably complex256 / float128 / float96
# Cast data to complex128 / float64.
if imagf:
arr = arr.astype('c128')
elif logif:
arr = arr.astype('i1') # Should only contain 0/1
else:
arr = arr.astype('f8')
mclass = mxDOUBLE_CLASS
self.write_header(matdims(arr, self.oned_as),
mclass,
is_complex=imagf,
is_logical=logif)
if imagf:
self.write_element(arr.real)
self.write_element(arr.imag)
else:
self.write_element(arr)
def write_char(self, arr, codec='ascii'):
''' Write string array `arr` with given `codec`
'''
if arr.size == 0 or np.all(arr == ''):
# This an empty string array or a string array containing
# only empty strings. Matlab cannot distinguish between a
# string array that is empty, and a string array containing
# only empty strings, because it stores strings as arrays of
# char. There is no way of having an array of char that is
# not empty, but contains an empty string. We have to
# special-case the array-with-empty-strings because even
# empty strings have zero padding, which would otherwise
# appear in matlab as a string with a space.
shape = (0,) * np.max([arr.ndim, 2])
self.write_header(shape, mxCHAR_CLASS)
self.write_smalldata_element(arr, miUTF8, 0)
return
# non-empty string.
#
# Convert to char array
arr = arr_to_chars(arr)
# We have to write the shape directly, because we are going
# recode the characters, and the resulting stream of chars
# may have a different length
shape = arr.shape
self.write_header(shape, mxCHAR_CLASS)
if arr.dtype.kind == 'U' and arr.size:
# Make one long string from all the characters. We need to
# transpose here, because we're flattening the array, before
# we write the bytes. The bytes have to be written in
# Fortran order.
n_chars = np.prod(shape)
st_arr = np.ndarray(shape=(),
dtype=arr_dtype_number(arr, n_chars),
buffer=arr.T.copy()) # Fortran order
# Recode with codec to give byte string
st = st_arr.item().encode(codec)
# Reconstruct as 1-D byte array
arr = np.ndarray(shape=(len(st),),
dtype='S1',
buffer=st)
self.write_element(arr, mdtype=miUTF8)
def write_sparse(self, arr):
''' Sparse matrices are 2D
'''
A = arr.tocsc() # convert to sparse CSC format
A.sort_indices() # MATLAB expects sorted row indices
is_complex = (A.dtype.kind == 'c')
is_logical = (A.dtype.kind == 'b')
nz = A.nnz
self.write_header(matdims(arr, self.oned_as),
mxSPARSE_CLASS,
is_complex=is_complex,
is_logical=is_logical,
# matlab won't load file with 0 nzmax
nzmax=1 if nz == 0 else nz)
self.write_element(A.indices.astype('i4'))
self.write_element(A.indptr.astype('i4'))
self.write_element(A.data.real)
if is_complex:
self.write_element(A.data.imag)
def write_cells(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxCELL_CLASS)
# loop over data, column major
A = np.atleast_2d(arr).flatten('F')
for el in A:
self.write(el)
def write_empty_struct(self):
self.write_header((1, 1), mxSTRUCT_CLASS)
# max field name length set to 1 in an example matlab struct
self.write_element(np.array(1, dtype=np.int32))
# Field names element is empty
self.write_element(np.array([], dtype=np.int8))
def write_struct(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxSTRUCT_CLASS)
self._write_items(arr)
def _write_items(self, arr):
# write fieldnames
fieldnames = [f[0] for f in arr.dtype.descr]
length = max([len(fieldname) for fieldname in fieldnames])+1
max_length = (self.long_field_names and 64) or 32
if length > max_length:
raise ValueError("Field names are restricted to %d characters" %
(max_length-1))
self.write_element(np.array([length], dtype='i4'))
self.write_element(
np.array(fieldnames, dtype='S%d' % (length)),
mdtype=miINT8)
A = np.atleast_2d(arr).flatten('F')
for el in A:
for f in fieldnames:
self.write(el[f])
def write_object(self, arr):
'''Same as writing structs, except different mx class, and extra
classname element after header
'''
self.write_header(matdims(arr, self.oned_as),
mxOBJECT_CLASS)
self.write_element(np.array(arr.classname, dtype='S'),
mdtype=miINT8)
self._write_items(arr)
class MatFile5Writer(object):
''' Class for writing mat5 files '''
@docfiller
def __init__(self, file_stream,
do_compression=False,
unicode_strings=False,
global_vars=None,
long_field_names=False,
oned_as='row'):
''' Initialize writer for matlab 5 format files
Parameters
----------
%(do_compression)s
%(unicode_strings)s
global_vars : None or sequence of strings, optional
Names of variables to be marked as global for matlab
%(long_fields)s
%(oned_as)s
'''
self.file_stream = file_stream
self.do_compression = do_compression
self.unicode_strings = unicode_strings
if global_vars:
self.global_vars = global_vars
else:
self.global_vars = []
self.long_field_names = long_field_names
self.oned_as = oned_as
self._matrix_writer = None
def write_file_header(self):
# write header
hdr = np.zeros((), NDT_FILE_HDR)
hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \
% (os.name,time.asctime())
hdr['version'] = 0x0100
hdr['endian_test'] = np.ndarray(shape=(),
dtype='S2',
buffer=np.uint16(0x4d49))
self.file_stream.write(hdr.tobytes())
def put_variables(self, mdict, write_header=None):
''' Write variables in `mdict` to stream
Parameters
----------
mdict : mapping
mapping with method ``items`` returns name, contents pairs where
``name`` which will appear in the matlab workspace in file load, and
``contents`` is something writeable to a matlab file, such as a NumPy
array.
write_header : {None, True, False}, optional
If True, then write the matlab file header before writing the
variables. If None (the default) then write the file header
if we are at position 0 in the stream. By setting False
here, and setting the stream position to the end of the file,
you can append variables to a matlab file
'''
# write header if requested, or None and start of file
if write_header is None:
write_header = self.file_stream.tell() == 0
if write_header:
self.write_file_header()
self._matrix_writer = VarWriter5(self)
for name, var in mdict.items():
if name[0] == '_':
continue
is_global = name in self.global_vars
if self.do_compression:
stream = BytesIO()
self._matrix_writer.file_stream = stream
self._matrix_writer.write_top(var, asbytes(name), is_global)
out_str = zlib.compress(stream.getvalue())
tag = np.empty((), NDT_TAG_FULL)
tag['mdtype'] = miCOMPRESSED
tag['byte_count'] = len(out_str)
self.file_stream.write(tag.tobytes())
self.file_stream.write(out_str)
else: # not compressing
self._matrix_writer.write_top(var, asbytes(name), is_global)
| 37.330347 | 81 | 0.603882 |
import os
import time
import sys
import zlib
from io import BytesIO
import warnings
import numpy as np
from numpy.compat import asbytes, asstr
import scipy.sparse
from .byteordercodes import native_code, swapped_code
from .miobase import (MatFileReader, docfiller, matdims, read_dtype,
arr_to_chars, arr_dtype_number, MatWriteError,
MatReadError, MatReadWarning)
from .mio5_utils import VarReader5
from .mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES,
NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8,
miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS,
mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS,
mxDOUBLE_CLASS, mclass_info, mat_struct)
from .streams import ZlibInputStream
def _has_struct(elem):
return (isinstance(elem, np.ndarray) and (elem.size > 0) and
isinstance(elem[0], mat_struct))
def _inspect_cell_array(ndarray):
elem_list = []
for sub_elem in ndarray:
if isinstance(sub_elem, mat_struct):
elem_list.append(_matstruct_to_dict(sub_elem))
elif _has_struct(sub_elem):
elem_list.append(_inspect_cell_array(sub_elem))
else:
elem_list.append(sub_elem)
return elem_list
def _matstruct_to_dict(matobj):
d = {}
for f in matobj._fieldnames:
elem = matobj.__dict__[f]
if isinstance(elem, mat_struct):
d[f] = _matstruct_to_dict(elem)
elif _has_struct(elem):
d[f] = _inspect_cell_array(elem)
else:
d[f] = elem
return d
def _simplify_cells(d):
for key in d:
if isinstance(d[key], mat_struct):
d[key] = _matstruct_to_dict(d[key])
elif _has_struct(d[key]):
d[key] = _inspect_cell_array(d[key])
return d
class MatFile5Reader(MatFileReader):
@docfiller
def __init__(self,
mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
verify_compressed_data_integrity=True,
uint16_codec=None,
simplify_cells=False):
super(MatFile5Reader, self).__init__(
mat_stream,
byte_order,
mat_dtype,
squeeze_me,
chars_as_strings,
matlab_compatible,
struct_as_record,
verify_compressed_data_integrity,
simplify_cells)
if not uint16_codec:
uint16_codec = sys.getdefaultencoding()
self.uint16_codec = uint16_codec
self._file_reader = None
self._matrix_reader = None
def guess_byte_order(self):
self.mat_stream.seek(126)
mi = self.mat_stream.read(2)
self.mat_stream.seek(0)
return mi == b'IM' and '<' or '>'
def read_file_header(self):
hdict = {}
hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header']
hdr = read_dtype(self.mat_stream, hdr_dtype)
hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000')
v_major = hdr['version'] >> 8
v_minor = hdr['version'] & 0xFF
hdict['__version__'] = '%d.%d' % (v_major, v_minor)
return hdict
def initialize_read(self):
self._file_reader = VarReader5(self)
self._matrix_reader = VarReader5(self)
def read_var_header(self):
mdtype, byte_count = self._file_reader.read_full_tag()
if not byte_count > 0:
raise ValueError("Did not read any bytes")
next_pos = self.mat_stream.tell() + byte_count
if mdtype == miCOMPRESSED:
stream = ZlibInputStream(self.mat_stream, byte_count)
self._matrix_reader.set_stream(stream)
check_stream_limit = self.verify_compressed_data_integrity
mdtype, byte_count = self._matrix_reader.read_full_tag()
else:
check_stream_limit = False
self._matrix_reader.set_stream(self.mat_stream)
if not mdtype == miMATRIX:
raise TypeError('Expecting miMATRIX type here, got %d' % mdtype)
header = self._matrix_reader.read_header(check_stream_limit)
return header, next_pos
def read_var_array(self, header, process=True):
return self._matrix_reader.array_from_header(header, process)
def get_variables(self, variable_names=None):
if isinstance(variable_names, str):
variable_names = [variable_names]
elif variable_names is not None:
variable_names = list(variable_names)
self.mat_stream.seek(0)
self.initialize_read()
mdict = self.read_file_header()
mdict['__globals__'] = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = asstr(hdr.name)
if name in mdict:
warnings.warn('Duplicate variable name "%s" in stream'
' - replacing previous with new\n'
'Consider mio5.varmats_from_mat to split '
'file into single variable files' % name,
MatReadWarning, stacklevel=2)
if name == '':
name = '__function_workspace__'
process = False
else:
process = True
if variable_names is not None and name not in variable_names:
self.mat_stream.seek(next_position)
continue
try:
res = self.read_var_array(hdr, process)
except MatReadError as err:
warnings.warn(
f'Unreadable variable "{name}", because "{err}"',
Warning, stacklevel=2)
res = f"Read error: {err}"
self.mat_stream.seek(next_position)
mdict[name] = res
if hdr.is_global:
mdict['__globals__'].append(name)
if variable_names is not None:
variable_names.remove(name)
if len(variable_names) == 0:
break
if self.simplify_cells:
return _simplify_cells(mdict)
else:
return mdict
def list_variables(self):
self.mat_stream.seek(0)
self.initialize_read()
self.read_file_header()
vars = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = asstr(hdr.name)
if name == '':
name = '__function_workspace__'
shape = self._matrix_reader.shape_from_header(hdr)
if hdr.is_logical:
info = 'logical'
else:
info = mclass_info.get(hdr.mclass, 'unknown')
vars.append((name, shape, info))
self.mat_stream.seek(next_position)
return vars
def varmats_from_mat(file_obj):
rdr = MatFile5Reader(file_obj)
file_obj.seek(0)
hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize
raw_hdr = file_obj.read(hdr_len)
file_obj.seek(0)
rdr.initialize_read()
rdr.read_file_header()
next_position = file_obj.tell()
named_mats = []
while not rdr.end_of_stream():
start_position = next_position
hdr, next_position = rdr.read_var_header()
name = asstr(hdr.name)
file_obj.seek(start_position)
byte_count = next_position - start_position
var_str = file_obj.read(byte_count)
out_obj = BytesIO()
out_obj.write(raw_hdr)
out_obj.write(var_str)
out_obj.seek(0)
named_mats.append((name, out_obj))
return named_mats
class EmptyStructMarker(object):
def to_writeable(source):
if isinstance(source, np.ndarray):
return source
if source is None:
return None
is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and
hasattr(source, 'items'))
if isinstance(source, np.generic):
# NumPy scalars are never mappings (PyPy issue workaround)
pass
elif not is_mapping and hasattr(source, '__dict__'):
source = dict((key, value) for key, value in source.__dict__.items()
if not key.startswith('_'))
is_mapping = True
if is_mapping:
dtype = []
values = []
for field, value in source.items():
if (isinstance(field, str) and
field[0] not in '_0123456789'):
dtype.append((str(field), object))
values.append(value)
if dtype:
return np.array([tuple(values)], dtype)
else:
return EmptyStructMarker
# Next try and convert to an array
narr = np.asanyarray(source)
if narr.dtype.type in (object, np.object_) and \
narr.shape == () and narr == source:
# No interesting conversion possible
return None
return narr
# Native byte ordered dtypes for convenience for writers
NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header']
NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full']
NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata']
NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags']
class VarWriter5(object):
mat_tag = np.zeros((), NDT_TAG_FULL)
mat_tag['mdtype'] = miMATRIX
def __init__(self, file_writer):
self.file_stream = file_writer.file_stream
self.unicode_strings = file_writer.unicode_strings
self.long_field_names = file_writer.long_field_names
self.oned_as = file_writer.oned_as
# These are used for top level writes, and unset after
self._var_name = None
self._var_is_global = False
def write_bytes(self, arr):
self.file_stream.write(arr.tobytes(order='F'))
def write_string(self, s):
self.file_stream.write(s)
def write_element(self, arr, mdtype=None):
if mdtype is None:
mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]
# Array needs to be in native byte order
if arr.dtype.byteorder == swapped_code:
arr = arr.byteswap().newbyteorder()
byte_count = arr.size*arr.itemsize
if byte_count <= 4:
self.write_smalldata_element(arr, mdtype, byte_count)
else:
self.write_regular_element(arr, mdtype, byte_count)
def write_smalldata_element(self, arr, mdtype, byte_count):
# write tag with embedded data
tag = np.zeros((), NDT_TAG_SMALL)
tag['byte_count_mdtype'] = (byte_count << 16) + mdtype
# if arr.tobytes is < 4, the element will be zero-padded as needed.
tag['data'] = arr.tobytes(order='F')
self.write_bytes(tag)
def write_regular_element(self, arr, mdtype, byte_count):
# write tag, data
tag = np.zeros((), NDT_TAG_FULL)
tag['mdtype'] = mdtype
tag['byte_count'] = byte_count
self.write_bytes(tag)
self.write_bytes(arr)
# pad to next 64-bit boundary
bc_mod_8 = byte_count % 8
if bc_mod_8:
self.file_stream.write(b'\x00' * (8-bc_mod_8))
def write_header(self,
shape,
mclass,
is_complex=False,
is_logical=False,
nzmax=0):
# get name and is_global from one-shot object store
name = self._var_name
is_global = self._var_is_global
# initialize the top-level matrix tag, store position
self._mat_tag_pos = self.file_stream.tell()
self.write_bytes(self.mat_tag)
# write array flags (complex, global, logical, class, nzmax)
af = np.zeros((), NDT_ARRAY_FLAGS)
af['data_type'] = miUINT32
af['byte_count'] = 8
flags = is_complex << 3 | is_global << 2 | is_logical << 1
af['flags_class'] = mclass | flags << 8
af['nzmax'] = nzmax
self.write_bytes(af)
# shape
self.write_element(np.array(shape, dtype='i4'))
# write name
name = np.asarray(name)
if name == '': # empty string zero-terminated
self.write_smalldata_element(name, miINT8, 0)
else:
self.write_element(name, miINT8)
# reset the one-shot store to defaults
self._var_name = ''
self._var_is_global = False
def update_matrix_tag(self, start_pos):
curr_pos = self.file_stream.tell()
self.file_stream.seek(start_pos)
byte_count = curr_pos - start_pos - 8
if byte_count >= 2**32:
raise MatWriteError("Matrix too large to save with Matlab "
"5 format")
self.mat_tag['byte_count'] = byte_count
self.write_bytes(self.mat_tag)
self.file_stream.seek(curr_pos)
def write_top(self, arr, name, is_global):
# these are set before the top-level header write, and unset at
# the end of the same write, because they do not apply for lower levels
self._var_is_global = is_global
self._var_name = name
# write the header and data
self.write(arr)
def write(self, arr):
# store position, so we can update the matrix tag
mat_tag_pos = self.file_stream.tell()
# First check if these are sparse
if scipy.sparse.issparse(arr):
self.write_sparse(arr)
self.update_matrix_tag(mat_tag_pos)
return
# Try to convert things that aren't arrays
narr = to_writeable(arr)
if narr is None:
raise TypeError('Could not convert %s (type %s) to array'
% (arr, type(arr)))
if isinstance(narr, MatlabObject):
self.write_object(narr)
elif isinstance(narr, MatlabFunction):
raise MatWriteError('Cannot write matlab functions')
elif narr is EmptyStructMarker:
self.write_empty_struct()
elif narr.dtype.fields:
self.write_struct(narr)
elif narr.dtype.hasobject:
self.write_cells(narr)
elif narr.dtype.kind in ('U', 'S'):
if self.unicode_strings:
codec = 'UTF8'
else:
codec = 'ascii'
self.write_char(narr, codec)
else:
self.write_numeric(narr)
self.update_matrix_tag(mat_tag_pos)
def write_numeric(self, arr):
imagf = arr.dtype.kind == 'c'
logif = arr.dtype.kind == 'b'
try:
mclass = NP_TO_MXTYPES[arr.dtype.str[1:]]
except KeyError:
if imagf:
arr = arr.astype('c128')
elif logif:
arr = arr.astype('i1')
else:
arr = arr.astype('f8')
mclass = mxDOUBLE_CLASS
self.write_header(matdims(arr, self.oned_as),
mclass,
is_complex=imagf,
is_logical=logif)
if imagf:
self.write_element(arr.real)
self.write_element(arr.imag)
else:
self.write_element(arr)
def write_char(self, arr, codec='ascii'):
if arr.size == 0 or np.all(arr == ''):
shape = (0,) * np.max([arr.ndim, 2])
self.write_header(shape, mxCHAR_CLASS)
self.write_smalldata_element(arr, miUTF8, 0)
return
arr = arr_to_chars(arr)
shape = arr.shape
self.write_header(shape, mxCHAR_CLASS)
if arr.dtype.kind == 'U' and arr.size:
# we write the bytes. The bytes have to be written in
# Fortran order.
n_chars = np.prod(shape)
st_arr = np.ndarray(shape=(),
dtype=arr_dtype_number(arr, n_chars),
buffer=arr.T.copy()) # Fortran order
# Recode with codec to give byte string
st = st_arr.item().encode(codec)
# Reconstruct as 1-D byte array
arr = np.ndarray(shape=(len(st),),
dtype='S1',
buffer=st)
self.write_element(arr, mdtype=miUTF8)
def write_sparse(self, arr):
A = arr.tocsc() # convert to sparse CSC format
A.sort_indices() # MATLAB expects sorted row indices
is_complex = (A.dtype.kind == 'c')
is_logical = (A.dtype.kind == 'b')
nz = A.nnz
self.write_header(matdims(arr, self.oned_as),
mxSPARSE_CLASS,
is_complex=is_complex,
is_logical=is_logical,
# matlab won't load file with 0 nzmax
nzmax=1 if nz == 0 else nz)
self.write_element(A.indices.astype('i4'))
self.write_element(A.indptr.astype('i4'))
self.write_element(A.data.real)
if is_complex:
self.write_element(A.data.imag)
def write_cells(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxCELL_CLASS)
A = np.atleast_2d(arr).flatten('F')
for el in A:
self.write(el)
def write_empty_struct(self):
self.write_header((1, 1), mxSTRUCT_CLASS)
self.write_element(np.array(1, dtype=np.int32))
self.write_element(np.array([], dtype=np.int8))
def write_struct(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxSTRUCT_CLASS)
self._write_items(arr)
def _write_items(self, arr):
fieldnames = [f[0] for f in arr.dtype.descr]
length = max([len(fieldname) for fieldname in fieldnames])+1
max_length = (self.long_field_names and 64) or 32
if length > max_length:
raise ValueError("Field names are restricted to %d characters" %
(max_length-1))
self.write_element(np.array([length], dtype='i4'))
self.write_element(
np.array(fieldnames, dtype='S%d' % (length)),
mdtype=miINT8)
A = np.atleast_2d(arr).flatten('F')
for el in A:
for f in fieldnames:
self.write(el[f])
def write_object(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxOBJECT_CLASS)
self.write_element(np.array(arr.classname, dtype='S'),
mdtype=miINT8)
self._write_items(arr)
class MatFile5Writer(object):
@docfiller
def __init__(self, file_stream,
do_compression=False,
unicode_strings=False,
global_vars=None,
long_field_names=False,
oned_as='row'):
self.file_stream = file_stream
self.do_compression = do_compression
self.unicode_strings = unicode_strings
if global_vars:
self.global_vars = global_vars
else:
self.global_vars = []
self.long_field_names = long_field_names
self.oned_as = oned_as
self._matrix_writer = None
def write_file_header(self):
hdr = np.zeros((), NDT_FILE_HDR)
hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \
% (os.name,time.asctime())
hdr['version'] = 0x0100
hdr['endian_test'] = np.ndarray(shape=(),
dtype='S2',
buffer=np.uint16(0x4d49))
self.file_stream.write(hdr.tobytes())
def put_variables(self, mdict, write_header=None):
if write_header is None:
write_header = self.file_stream.tell() == 0
if write_header:
self.write_file_header()
self._matrix_writer = VarWriter5(self)
for name, var in mdict.items():
if name[0] == '_':
continue
is_global = name in self.global_vars
if self.do_compression:
stream = BytesIO()
self._matrix_writer.file_stream = stream
self._matrix_writer.write_top(var, asbytes(name), is_global)
out_str = zlib.compress(stream.getvalue())
tag = np.empty((), NDT_TAG_FULL)
tag['mdtype'] = miCOMPRESSED
tag['byte_count'] = len(out_str)
self.file_stream.write(tag.tobytes())
self.file_stream.write(out_str)
else:
self._matrix_writer.write_top(var, asbytes(name), is_global)
| true | true |
f72e6eb586f0c6e20ddb5b1d5a840e99e6971d7d | 575 | py | Python | pola/users/urls.py | bpaszcza/pola-backend | 60c142d24b0630dbe22e73cf9bfaf9df297abd7c | [
"BSD-3-Clause"
] | 30 | 2015-08-13T01:05:36.000Z | 2022-01-22T03:02:50.000Z | pola/users/urls.py | bpaszcza/pola-backend | 60c142d24b0630dbe22e73cf9bfaf9df297abd7c | [
"BSD-3-Clause"
] | 1,428 | 2015-10-08T07:38:26.000Z | 2022-03-31T08:36:08.000Z | pola/users/urls.py | bpaszcza/pola-backend | 60c142d24b0630dbe22e73cf9bfaf9df297abd7c | [
"BSD-3-Clause"
] | 13 | 2015-12-27T22:35:25.000Z | 2022-02-01T15:55:58.000Z | from django.conf.urls import url
from . import views
urlpatterns = [
# URL pattern for the UserListView
url(regex=r'^$', view=views.UserListView.as_view(), name='list'),
# URL pattern for the UserRedirectView
url(regex=r'^~redirect/$', view=views.UserRedirectView.as_view(), name='redirect'),
# URL pattern for the UserDetailView
url(regex=r'^(?P<username>[\w.@+-]+)/$', view=views.UserDetailView.as_view(), name='detail'),
# URL pattern for the UserUpdateView
url(regex=r'^~update/$', view=views.UserUpdateView.as_view(), name='update'),
]
| 38.333333 | 97 | 0.683478 | from django.conf.urls import url
from . import views
urlpatterns = [
url(regex=r'^$', view=views.UserListView.as_view(), name='list'),
url(regex=r'^~redirect/$', view=views.UserRedirectView.as_view(), name='redirect'),
url(regex=r'^(?P<username>[\w.@+-]+)/$', view=views.UserDetailView.as_view(), name='detail'),
url(regex=r'^~update/$', view=views.UserUpdateView.as_view(), name='update'),
]
| true | true |
f72e6f0ca0dd7ba992d3af00812a666bd784e293 | 9,546 | py | Python | BeesEtAl/Gholami.py | FJFranklin/BeesEtAl | 3fd21d044e77b4a1df56ac2f405e2084bebd54e1 | [
"MIT"
] | 1 | 2020-08-04T00:13:54.000Z | 2020-08-04T00:13:54.000Z | BeesEtAl/Gholami.py | FJFranklin/BeesEtAl | 3fd21d044e77b4a1df56ac2f405e2084bebd54e1 | [
"MIT"
] | null | null | null | BeesEtAl/Gholami.py | FJFranklin/BeesEtAl | 3fd21d044e77b4a1df56ac2f405e2084bebd54e1 | [
"MIT"
] | null | null | null | # *** References ***
# Gholami & Mohammadi, A Novel Combination of Bees and Firefly Algorithm to Optimize Continuous Problems
# Türker Tuncer, LDW-SCSA: Logistic Dynamic Weight based Sine Cosine Search Algorithm for Numerical Functions Optimization
# https://arxiv.org/ftp/arxiv/papers/1809/1809.03055.pdf
# Hartmut Pohlheim, Examples of Objective Functions
# http://www.geatbx.com/download/GEATbx_ObjFunExpl_v38.pdf
# Wikipedia, Test functions for optimization
# https://en.wikipedia.org/wiki/Test_functions_for_optimization
import numpy as np
from .Base_Coster import Base_Coster
class F1(Base_Coster):
"""
Function F1 from Gholami & Mohammadi FA-BA Hybrid paper
De Jong / Sphere (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -5.12 * np.ones(Ndim), 5.12 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.power(self.XA, 2))
def meso(self):
None
class F2(Base_Coster):
"""
Function F2 from Gholami & Mohammadi FA-BA Hybrid paper
Schwefel 2.22 (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -10 * np.ones(Ndim), 10 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.abs(self.XA)) + np.prod(np.abs(self.XA))
def meso(self):
None
class F3(Base_Coster):
"""
Function F3 from Gholami & Mohammadi FA-BA Hybrid paper
Schwefel 1.2 - Rotated hyper-ellipsoid (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -65.536 * np.ones(Ndim), 65.536 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = 0
for i in range(0, len(self.XA)):
self.cost = self.cost + (sum(self.XA[0:(i+1)]))**2
def meso(self):
None
class F4(Base_Coster):
"""
Function F4 from Gholami & Mohammadi FA-BA Hybrid paper
Schwefel 2.21 (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -100 * np.ones(Ndim), 100 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = max(np.abs(self.XA))
def meso(self):
None
class F5(Base_Coster):
"""
Function F5 from Gholami & Mohammadi FA-BA Hybrid paper
Rosenbrock (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -2.048 * np.ones(Ndim), 2.048 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(100 * np.power(self.XA[1:len(self.XA)] - np.power(self.XA[0:(len(self.XA)-1)], 2), 2) + np.power(1 - self.XA[0:(len(self.XA)-1)], 2))
def meso(self):
None
class F6(Base_Coster):
"""
Function F6 from Gholami & Mohammadi FA-BA Hybrid paper
Step (ND) cost function; optimum @ (-0.5,...
"""
@staticmethod
def extents(Ndim):
return -100 * np.ones(Ndim), 100 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.floor(np.power(self.XA + 0.5, 2)))
def meso(self):
None
class F7(Base_Coster):
"""
Function F7 from Gholami & Mohammadi FA-BA Hybrid paper
Noise (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -1.28 * np.ones(Ndim), 1.28 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.power(self.XA, 4) * np.asarray(range(1, 1 + len(self.XA)))) + np.random.rand(1)
def meso(self):
None
class F8(Base_Coster):
"""
Function F8 from Gholami & Mohammadi FA-BA Hybrid paper
Schwefel (ND) cost function
"""
@staticmethod
def extents(Ndim):
return -500 * np.ones(Ndim), 500 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = -sum(self.XA * np.sin(np.sqrt(abs(self.XA))))
def meso(self):
None
class F9(Base_Coster):
"""
Function F9 from Gholami & Mohammadi FA-BA Hybrid paper
Rastrigin (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -5.12 * np.ones(Ndim), 5.12 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.power(self.XA, 2) - 10 * np.cos(2 * np.pi * self.XA) + 10)
def meso(self):
None
class F10(Base_Coster):
"""
Function F10 from Gholami & Mohammadi FA-BA Hybrid paper
Ackley (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -32.768 * np.ones(Ndim), 32.768 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
@staticmethod
def rms(X):
return np.sqrt(X.dot(X) / len(X))
def evaluate_cost(self):
self.cost = np.exp(1) + 20 * (1 - np.exp(-F10.rms(self.XA) / 5)) - np.exp(sum(np.cos(2 * np.pi * self.XA)) / len(self.XA))
def meso(self):
None
class F11(Base_Coster):
"""
Function F11 from Gholami & Mohammadi FA-BA Hybrid paper
Griewangk (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -600 * np.ones(Ndim), 600 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.power(self.XA, 2)) / 4000 - np.prod(np.cos(np.power(self.XA, 2) / np.power(range(1, 1+len(self.XA)), 0.5))) + 1
def meso(self):
None
class F12(Base_Coster):
"""
Function F12 from Gholami & Mohammadi FA-BA Hybrid paper
Generalised Penalised 1 (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -50 * np.ones(Ndim), 50 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
@staticmethod
def u(xi, a, k, m):
if xi > a:
v = k * (xi - a)**m
elif xi < -a:
v = k * (-xi - a)**m
else:
v = 0
return v
def evaluate_cost(self):
y = 1 + (self.XA + 1) / 4
c = 0
for i in range(0, len(self.XA)):
c = c + F12.u(self.XA[i], 10, 100, 4)
self.cost = sum(np.power(y[0:(len(self.XA)-1)] - 1, 2) * (1 + 10 * np.power(np.sin(np.pi * y[1:len(self.XA)]), 2)))
self.cost = (self.cost + 10 * np.sin(np.pi * y[0]) + (y[len(self.XA)-1] - 1)**2) * np.pi / len(self.XA) + c
def meso(self):
None
def Gholami_TestFunction_Extents(number, Ndim=30):
minima = None
maxima = None
if number == 1:
minima, maxima = F1.extents(Ndim)
if number == 2:
minima, maxima = F2.extents(Ndim)
if number == 3:
minima, maxima = F3.extents(Ndim)
if number == 4:
minima, maxima = F4.extents(Ndim)
if number == 5:
minima, maxima = F5.extents(Ndim)
if number == 6:
minima, maxima = F6.extents(Ndim)
if number == 7:
minima, maxima = F7.extents(Ndim)
if number == 8:
minima, maxima = F8.extents(Ndim)
if number == 9:
minima, maxima = F9.extents(Ndim)
if number == 10:
minima, maxima = F10.extents(Ndim)
if number == 11:
minima, maxima = F11.extents(Ndim)
if number == 12:
minima, maxima = F12.extents(Ndim)
return minima, maxima
def Gholami_TestFunction_Coster(number, base_optimiser):
coster = None
if number == 1:
coster = F1(base_optimiser)
if number == 2:
coster = F2(base_optimiser)
if number == 3:
coster = F3(base_optimiser)
if number == 4:
coster = F4(base_optimiser)
if number == 5:
coster = F5(base_optimiser)
if number == 6:
coster = F6(base_optimiser)
if number == 7:
coster = F7(base_optimiser)
if number == 8:
coster = F8(base_optimiser)
if number == 9:
coster = F9(base_optimiser)
if number == 10:
coster = F10(base_optimiser)
if number == 11:
coster = F11(base_optimiser)
if number == 12:
coster = F12(base_optimiser)
return coster
| 26.153425 | 157 | 0.602137 |
import numpy as np
from .Base_Coster import Base_Coster
class F1(Base_Coster):
@staticmethod
def extents(Ndim):
return -5.12 * np.ones(Ndim), 5.12 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.power(self.XA, 2))
def meso(self):
None
class F2(Base_Coster):
@staticmethod
def extents(Ndim):
return -10 * np.ones(Ndim), 10 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.abs(self.XA)) + np.prod(np.abs(self.XA))
def meso(self):
None
class F3(Base_Coster):
@staticmethod
def extents(Ndim):
return -65.536 * np.ones(Ndim), 65.536 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = 0
for i in range(0, len(self.XA)):
self.cost = self.cost + (sum(self.XA[0:(i+1)]))**2
def meso(self):
None
class F4(Base_Coster):
@staticmethod
def extents(Ndim):
return -100 * np.ones(Ndim), 100 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = max(np.abs(self.XA))
def meso(self):
None
class F5(Base_Coster):
@staticmethod
def extents(Ndim):
return -2.048 * np.ones(Ndim), 2.048 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(100 * np.power(self.XA[1:len(self.XA)] - np.power(self.XA[0:(len(self.XA)-1)], 2), 2) + np.power(1 - self.XA[0:(len(self.XA)-1)], 2))
def meso(self):
None
class F6(Base_Coster):
@staticmethod
def extents(Ndim):
return -100 * np.ones(Ndim), 100 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.floor(np.power(self.XA + 0.5, 2)))
def meso(self):
None
class F7(Base_Coster):
@staticmethod
def extents(Ndim):
return -1.28 * np.ones(Ndim), 1.28 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.power(self.XA, 4) * np.asarray(range(1, 1 + len(self.XA)))) + np.random.rand(1)
def meso(self):
None
class F8(Base_Coster):
@staticmethod
def extents(Ndim):
return -500 * np.ones(Ndim), 500 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = -sum(self.XA * np.sin(np.sqrt(abs(self.XA))))
def meso(self):
None
class F9(Base_Coster):
@staticmethod
def extents(Ndim):
return -5.12 * np.ones(Ndim), 5.12 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.power(self.XA, 2) - 10 * np.cos(2 * np.pi * self.XA) + 10)
def meso(self):
None
class F10(Base_Coster):
@staticmethod
def extents(Ndim):
return -32.768 * np.ones(Ndim), 32.768 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
@staticmethod
def rms(X):
return np.sqrt(X.dot(X) / len(X))
def evaluate_cost(self):
self.cost = np.exp(1) + 20 * (1 - np.exp(-F10.rms(self.XA) / 5)) - np.exp(sum(np.cos(2 * np.pi * self.XA)) / len(self.XA))
def meso(self):
None
class F11(Base_Coster):
@staticmethod
def extents(Ndim):
return -600 * np.ones(Ndim), 600 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.power(self.XA, 2)) / 4000 - np.prod(np.cos(np.power(self.XA, 2) / np.power(range(1, 1+len(self.XA)), 0.5))) + 1
def meso(self):
None
class F12(Base_Coster):
@staticmethod
def extents(Ndim):
return -50 * np.ones(Ndim), 50 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
@staticmethod
def u(xi, a, k, m):
if xi > a:
v = k * (xi - a)**m
elif xi < -a:
v = k * (-xi - a)**m
else:
v = 0
return v
def evaluate_cost(self):
y = 1 + (self.XA + 1) / 4
c = 0
for i in range(0, len(self.XA)):
c = c + F12.u(self.XA[i], 10, 100, 4)
self.cost = sum(np.power(y[0:(len(self.XA)-1)] - 1, 2) * (1 + 10 * np.power(np.sin(np.pi * y[1:len(self.XA)]), 2)))
self.cost = (self.cost + 10 * np.sin(np.pi * y[0]) + (y[len(self.XA)-1] - 1)**2) * np.pi / len(self.XA) + c
def meso(self):
None
def Gholami_TestFunction_Extents(number, Ndim=30):
minima = None
maxima = None
if number == 1:
minima, maxima = F1.extents(Ndim)
if number == 2:
minima, maxima = F2.extents(Ndim)
if number == 3:
minima, maxima = F3.extents(Ndim)
if number == 4:
minima, maxima = F4.extents(Ndim)
if number == 5:
minima, maxima = F5.extents(Ndim)
if number == 6:
minima, maxima = F6.extents(Ndim)
if number == 7:
minima, maxima = F7.extents(Ndim)
if number == 8:
minima, maxima = F8.extents(Ndim)
if number == 9:
minima, maxima = F9.extents(Ndim)
if number == 10:
minima, maxima = F10.extents(Ndim)
if number == 11:
minima, maxima = F11.extents(Ndim)
if number == 12:
minima, maxima = F12.extents(Ndim)
return minima, maxima
def Gholami_TestFunction_Coster(number, base_optimiser):
coster = None
if number == 1:
coster = F1(base_optimiser)
if number == 2:
coster = F2(base_optimiser)
if number == 3:
coster = F3(base_optimiser)
if number == 4:
coster = F4(base_optimiser)
if number == 5:
coster = F5(base_optimiser)
if number == 6:
coster = F6(base_optimiser)
if number == 7:
coster = F7(base_optimiser)
if number == 8:
coster = F8(base_optimiser)
if number == 9:
coster = F9(base_optimiser)
if number == 10:
coster = F10(base_optimiser)
if number == 11:
coster = F11(base_optimiser)
if number == 12:
coster = F12(base_optimiser)
return coster
| true | true |
f72e70c39c18d6e4ccb7179f2c40bbfbc8e403ba | 666 | py | Python | microservice-model-template/sources/flask/simulation/__init__.py | evalinani/onesait-cloud-platform-examples | 7b69e05d3f39261b97081b345df9b6e1823207f6 | [
"Apache-2.0"
] | 10 | 2019-05-14T13:23:28.000Z | 2019-09-18T08:52:15.000Z | microservice-model-template/sources/flask/simulation/__init__.py | evalinani/onesait-cloud-platform-examples | 7b69e05d3f39261b97081b345df9b6e1823207f6 | [
"Apache-2.0"
] | 6 | 2019-10-14T13:48:05.000Z | 2021-08-16T18:09:42.000Z | microservice-model-template/sources/flask/simulation/__init__.py | evalinani/onesait-cloud-platform-examples | 7b69e05d3f39261b97081b345df9b6e1823207f6 | [
"Apache-2.0"
] | 8 | 2019-04-23T09:59:42.000Z | 2021-06-30T21:04:17.000Z | # Copyright Indra Soluciones Tecnologías de la Información, S.L.U.
# 2013-2019 SPAIN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .datasimulator import Simulator
| 44.4 | 74 | 0.771772 |
from .datasimulator import Simulator
| true | true |
f72e71fc8efd3c5b1b273dfb7620f0e96f5eebf8 | 2,709 | py | Python | predict_risk_1/machine_learning_models/KNN.py | VenkateshBH99/django_local_library | db834cbe6ec475a2d3224b3ea9b56b1fa3519e9f | [
"Apache-2.0"
] | null | null | null | predict_risk_1/machine_learning_models/KNN.py | VenkateshBH99/django_local_library | db834cbe6ec475a2d3224b3ea9b56b1fa3519e9f | [
"Apache-2.0"
] | null | null | null | predict_risk_1/machine_learning_models/KNN.py | VenkateshBH99/django_local_library | db834cbe6ec475a2d3224b3ea9b56b1fa3519e9f | [
"Apache-2.0"
] | null | null | null | # Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
# Importing the dataset
dataset = pd.read_csv('kidney_disease2.csv')
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:,24].values
#handling missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer = imputer.fit(X[:,:24])
X[:,:24] = imputer.transform(X[:,:24])
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.5, random_state =101)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
train=list(X_train)
X_train = sc.fit_transform(X_train)
from sklearn.externals import joblib
# Save it
scaler_file = "standard_scalar_KNN.pkl"
joblib.dump(sc, scaler_file)
X_test = sc.transform(X_test)
#EXPLORING THE DATASET
import seaborn as sn
sn.countplot(x='classification',data=dataset)
dataset.classification.value_counts()
print("------",dataset.classification.value_counts(),"----------")
# Fitting Decision Tree Classification to the Training set
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors=3)
classifier.fit(X_train, y_train)
from sklearn.externals import joblib
filename ='KNN_model.pkl'
joblib.dump(classifier,filename)
# Predicting the Test set results
print(X_test)
y_pred = classifier.predict(X_test)
print(y_pred)
print(y_test)
#ACCURACY SCORE
from sklearn.metrics import accuracy_score
accuracy_score(y_test,y_pred)
##CONFUSION MATRIX
from sklearn.metrics import classification_report, confusion_matrix
cm=confusion_matrix(y_test, y_pred)
#Interpretation:
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
#ROC
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
logit_roc_auc = roc_auc_score(y_test, classifier.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, classifier.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()
##PREDICTION FOR NEW DATASET
Newdataset = pd.read_csv('newdata.csv')
sca=StandardScaler()
train=sca.fit_transform(train)
Newdataset=sca.transform(Newdataset)
print(Newdataset)
ynew=classifier.predict(Newdataset)
print("---------",ynew,"------------")
| 27.927835 | 93 | 0.773717 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
dataset = pd.read_csv('kidney_disease2.csv')
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:,24].values
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer = imputer.fit(X[:,:24])
X[:,:24] = imputer.transform(X[:,:24])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.5, random_state =101)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
train=list(X_train)
X_train = sc.fit_transform(X_train)
from sklearn.externals import joblib
scaler_file = "standard_scalar_KNN.pkl"
joblib.dump(sc, scaler_file)
X_test = sc.transform(X_test)
import seaborn as sn
sn.countplot(x='classification',data=dataset)
dataset.classification.value_counts()
print("------",dataset.classification.value_counts(),"----------")
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors=3)
classifier.fit(X_train, y_train)
from sklearn.externals import joblib
filename ='KNN_model.pkl'
joblib.dump(classifier,filename)
print(X_test)
y_pred = classifier.predict(X_test)
print(y_pred)
print(y_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test,y_pred)
rics import classification_report, confusion_matrix
cm=confusion_matrix(y_test, y_pred)
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
logit_roc_auc = roc_auc_score(y_test, classifier.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, classifier.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()
'newdata.csv')
sca=StandardScaler()
train=sca.fit_transform(train)
Newdataset=sca.transform(Newdataset)
print(Newdataset)
ynew=classifier.predict(Newdataset)
print("---------",ynew,"------------")
| true | true |
f72e720965d81ae52683fcd6c1169ce40b6a7ed3 | 7,270 | py | Python | intera_examples/scripts/joint_position_joystick.py | UtQ67/intera_sdk | a9340f9f2b9287b7580cbca86fd450677542108b | [
"Apache-2.0"
] | 38 | 2017-01-20T15:44:22.000Z | 2022-01-28T15:15:40.000Z | intera_examples/scripts/joint_position_joystick.py | UtQ67/intera_sdk | a9340f9f2b9287b7580cbca86fd450677542108b | [
"Apache-2.0"
] | 47 | 2016-12-16T19:41:03.000Z | 2022-03-21T14:04:04.000Z | intera_examples/scripts/joint_position_joystick.py | UtQ67/intera_sdk | a9340f9f2b9287b7580cbca86fd450677542108b | [
"Apache-2.0"
] | 52 | 2017-02-03T13:26:23.000Z | 2021-03-16T14:25:51.000Z | #! /usr/bin/env python
# Copyright (c) 2013-2018, Rethink Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SDK Joint Position Example: joystick
"""
import argparse
import rospy
import intera_interface
import intera_external_devices
from intera_interface import CHECK_VERSION
def rotate(l):
"""
Rotates a list left.
@param l: the list
"""
if len(l):
v = l[0]
l[:-1] = l[1:]
l[-1] = v
def set_j(cmd, limb, joints, index, delta):
"""
Set the selected joint to current pos + delta.
@param cmd: the joint command dictionary
@param limb: the limb to get the pos from
@param joints: a list of joint names
@param index: the index in the list of names
@param delta: delta to update the joint by
joint/index is to make this work in the bindings.
"""
joint = joints[index]
cmd[joint] = delta + limb.joint_angle(joint)
def map_joystick(joystick, side):
"""
Maps joystick input to joint position commands.
@param joystick: an instance of a Joystick
"""
limb = intera_interface.Limb(side)
gripper = None
try:
gripper = intera_interface.Gripper(side + '_gripper')
except:
rospy.loginfo("Could not detect a connected electric gripper.")
def set_g(action):
if gripper is not None:
if action == "close":
gripper.close()
elif action == "open":
gripper.open()
elif action == "calibrate":
gripper.calibrate()
limb_cmd = {}
#available joints
joints = limb.joint_names()
#abbreviations
jhi = lambda s: joystick.stick_value(s) > 0
jlo = lambda s: joystick.stick_value(s) < 0
bdn = joystick.button_down
bup = joystick.button_up
def print_help(bindings_list):
print("Press Ctrl-C to quit.")
for bindings in bindings_list:
for (test, _cmd, doc) in bindings:
if callable(doc):
doc = doc()
print("%s: %s" % (str(test[1][0]), doc))
bindings_list = []
bindings = [
((jlo, ['leftStickHorz']), (set_j, [limb_cmd, limb, joints, 0, 0.1]),
lambda: "Increase " + joints[0]),
((jhi, ['leftStickHorz']), (set_j, [limb_cmd, limb, joints, 0, -0.1]),
lambda: "Decrease " + joints[0]),
((jlo, ['leftStickVert']), (set_j, [limb_cmd, limb, joints, 1, 0.1]),
lambda: "Increase " + joints[1]),
((jhi, ['leftStickVert']), (set_j, [limb_cmd, limb, joints, 1, -0.1]),
lambda: "Decrease " + joints[1]),
((jlo, ['rightStickHorz']), (set_j, [limb_cmd, limb, joints, 2, 0.1]),
lambda: "Increase " + joints[2]),
((jhi, ['rightStickHorz']), (set_j, [limb_cmd, limb, joints, 2, -0.1]),
lambda: "Decrease " + joints[2]),
((jlo, ['rightStickVert']), (set_j, [limb_cmd, limb, joints, 3, 0.1]),
lambda: "Increase " + joints[3]),
((jhi, ['rightStickVert']), (set_j, [limb_cmd, limb, joints, 3, -0.1]),
lambda: "Decrease " + joints[3]),
((bdn, ['leftBumper']), (rotate, [joints]), side + ": cycle joint"),
((bdn, ['function1']), (print_help, [bindings_list]), "help"),
((bdn, ['function2']), (print_help, [bindings_list]), "help"),
]
if gripper:
bindings.extend([
((bdn, ['rightTrigger']), (set_g, ['close'], gripper), side + " gripper close"),
((bup, ['rightTrigger']), (set_g, ['open'], gripper), side + " gripper open"),
((bdn, ['btnLeft']), (set_g, ['calibrate'], gripper), "right calibrate")
])
bindings_list.append(bindings)
rate = rospy.Rate(100)
print_help(bindings_list)
print("Press Ctrl-C to stop. ")
while not rospy.is_shutdown():
for (test, cmd, doc) in bindings:
if test[0](*test[1]):
cmd[0](*cmd[1])
if callable(doc):
print(doc())
else:
print(doc)
if len(limb_cmd):
limb.set_joint_positions(limb_cmd)
limb_cmd.clear()
rate.sleep()
return False
def main():
"""SDK Joint Position Example: Joystick Control
Use a game controller to control the angular joint positions
of Sawyer's arms.
Attach a game controller to your dev machine and run this
example along with the ROS joy_node to control the position
of each joint in Sawyer's arm using the joystick. Be sure to
provide your *joystick* type to setup appropriate key mappings.
Each stick axis maps to a joint angle; which joints are currently
controlled can be incremented by using the mapped increment buttons.
Ex:
(x,y -> e0,e1) >>increment>> (x,y -> e1,e2)
"""
epilog = """
See help inside the example with the "Start" button for controller
key bindings.
"""
rp = intera_interface.RobotParams()
valid_limbs = rp.get_limb_names()
if not valid_limbs:
rp.log_message(("Cannot detect any limb parameters on this robot. "
"Exiting."), "ERROR")
return
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__,
epilog=epilog)
required = parser.add_argument_group('required arguments')
required.add_argument(
'-j', '--joystick', required=True,
choices=['xbox', 'logitech', 'ps3'],
help='specify the type of joystick to use'
)
parser.add_argument(
"-l", "--limb", dest="limb", default=valid_limbs[0],
choices=valid_limbs,
help="Limb on which to run the joint position joystick example"
)
args = parser.parse_args(rospy.myargv()[1:])
joystick = None
if args.joystick == 'xbox':
joystick = intera_external_devices.joystick.XboxController()
elif args.joystick == 'logitech':
joystick = intera_external_devices.joystick.LogitechController()
elif args.joystick == 'ps3':
joystick = intera_external_devices.joystick.PS3Controller()
else:
parser.error("Unsupported joystick type '%s'" % (args.joystick))
print("Initializing node... ")
rospy.init_node("sdk_joint_position_joystick")
print("Getting robot state... ")
rs = intera_interface.RobotEnable(CHECK_VERSION)
init_state = rs.state().enabled
def clean_shutdown():
print("\nExiting example.")
rospy.on_shutdown(clean_shutdown)
rospy.loginfo("Enabling robot...")
rs.enable()
map_joystick(joystick, args.limb)
print("Done.")
if __name__ == '__main__':
main()
| 33.502304 | 92 | 0.602338 |
import argparse
import rospy
import intera_interface
import intera_external_devices
from intera_interface import CHECK_VERSION
def rotate(l):
if len(l):
v = l[0]
l[:-1] = l[1:]
l[-1] = v
def set_j(cmd, limb, joints, index, delta):
joint = joints[index]
cmd[joint] = delta + limb.joint_angle(joint)
def map_joystick(joystick, side):
limb = intera_interface.Limb(side)
gripper = None
try:
gripper = intera_interface.Gripper(side + '_gripper')
except:
rospy.loginfo("Could not detect a connected electric gripper.")
def set_g(action):
if gripper is not None:
if action == "close":
gripper.close()
elif action == "open":
gripper.open()
elif action == "calibrate":
gripper.calibrate()
limb_cmd = {}
joints = limb.joint_names()
jhi = lambda s: joystick.stick_value(s) > 0
jlo = lambda s: joystick.stick_value(s) < 0
bdn = joystick.button_down
bup = joystick.button_up
def print_help(bindings_list):
print("Press Ctrl-C to quit.")
for bindings in bindings_list:
for (test, _cmd, doc) in bindings:
if callable(doc):
doc = doc()
print("%s: %s" % (str(test[1][0]), doc))
bindings_list = []
bindings = [
((jlo, ['leftStickHorz']), (set_j, [limb_cmd, limb, joints, 0, 0.1]),
lambda: "Increase " + joints[0]),
((jhi, ['leftStickHorz']), (set_j, [limb_cmd, limb, joints, 0, -0.1]),
lambda: "Decrease " + joints[0]),
((jlo, ['leftStickVert']), (set_j, [limb_cmd, limb, joints, 1, 0.1]),
lambda: "Increase " + joints[1]),
((jhi, ['leftStickVert']), (set_j, [limb_cmd, limb, joints, 1, -0.1]),
lambda: "Decrease " + joints[1]),
((jlo, ['rightStickHorz']), (set_j, [limb_cmd, limb, joints, 2, 0.1]),
lambda: "Increase " + joints[2]),
((jhi, ['rightStickHorz']), (set_j, [limb_cmd, limb, joints, 2, -0.1]),
lambda: "Decrease " + joints[2]),
((jlo, ['rightStickVert']), (set_j, [limb_cmd, limb, joints, 3, 0.1]),
lambda: "Increase " + joints[3]),
((jhi, ['rightStickVert']), (set_j, [limb_cmd, limb, joints, 3, -0.1]),
lambda: "Decrease " + joints[3]),
((bdn, ['leftBumper']), (rotate, [joints]), side + ": cycle joint"),
((bdn, ['function1']), (print_help, [bindings_list]), "help"),
((bdn, ['function2']), (print_help, [bindings_list]), "help"),
]
if gripper:
bindings.extend([
((bdn, ['rightTrigger']), (set_g, ['close'], gripper), side + " gripper close"),
((bup, ['rightTrigger']), (set_g, ['open'], gripper), side + " gripper open"),
((bdn, ['btnLeft']), (set_g, ['calibrate'], gripper), "right calibrate")
])
bindings_list.append(bindings)
rate = rospy.Rate(100)
print_help(bindings_list)
print("Press Ctrl-C to stop. ")
while not rospy.is_shutdown():
for (test, cmd, doc) in bindings:
if test[0](*test[1]):
cmd[0](*cmd[1])
if callable(doc):
print(doc())
else:
print(doc)
if len(limb_cmd):
limb.set_joint_positions(limb_cmd)
limb_cmd.clear()
rate.sleep()
return False
def main():
epilog = """
See help inside the example with the "Start" button for controller
key bindings.
"""
rp = intera_interface.RobotParams()
valid_limbs = rp.get_limb_names()
if not valid_limbs:
rp.log_message(("Cannot detect any limb parameters on this robot. "
"Exiting."), "ERROR")
return
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__,
epilog=epilog)
required = parser.add_argument_group('required arguments')
required.add_argument(
'-j', '--joystick', required=True,
choices=['xbox', 'logitech', 'ps3'],
help='specify the type of joystick to use'
)
parser.add_argument(
"-l", "--limb", dest="limb", default=valid_limbs[0],
choices=valid_limbs,
help="Limb on which to run the joint position joystick example"
)
args = parser.parse_args(rospy.myargv()[1:])
joystick = None
if args.joystick == 'xbox':
joystick = intera_external_devices.joystick.XboxController()
elif args.joystick == 'logitech':
joystick = intera_external_devices.joystick.LogitechController()
elif args.joystick == 'ps3':
joystick = intera_external_devices.joystick.PS3Controller()
else:
parser.error("Unsupported joystick type '%s'" % (args.joystick))
print("Initializing node... ")
rospy.init_node("sdk_joint_position_joystick")
print("Getting robot state... ")
rs = intera_interface.RobotEnable(CHECK_VERSION)
init_state = rs.state().enabled
def clean_shutdown():
print("\nExiting example.")
rospy.on_shutdown(clean_shutdown)
rospy.loginfo("Enabling robot...")
rs.enable()
map_joystick(joystick, args.limb)
print("Done.")
if __name__ == '__main__':
main()
| true | true |
f72e7223e89eedc346ce7a9e695c19fef535fe15 | 2,266 | py | Python | gamechangerml/src/search/query_expansion/tests/test_qe.py | ekmixon/gamechanger-ml | e7967261a4b2f21b06347020cd7e6a010538eb8f | [
"MIT"
] | 11 | 2021-05-05T17:52:10.000Z | 2022-02-04T15:12:29.000Z | gamechangerml/src/search/query_expansion/tests/test_qe.py | ekmixon/gamechanger-ml | e7967261a4b2f21b06347020cd7e6a010538eb8f | [
"MIT"
] | 76 | 2021-07-24T02:33:16.000Z | 2022-03-20T22:40:46.000Z | gamechangerml/src/search/query_expansion/tests/test_qe.py | ekmixon/gamechanger-ml | e7967261a4b2f21b06347020cd7e6a010538eb8f | [
"MIT"
] | 6 | 2021-06-30T22:18:56.000Z | 2022-03-22T16:54:50.000Z | import logging
import pytest
logger = logging.getLogger(__name__)
def check(expanded, exp_len):
return 1 <= len(expanded) <= exp_len
def test_qe_emb_expand(qe_obj, topn):
q_str = "security clearance"
exp = qe_obj.expand(q_str)
logger.info(exp)
assert check(exp, topn)
def test_qe_emb_empty(qe_obj, topn):
q_str = ""
exp = qe_obj.expand(q_str, topn=topn)
assert len(exp) == 0
def test_qe_emb_oov_1(qe_obj, topn):
q_str = "kljljfalj"
exp = qe_obj.expand(q_str, topn=topn)
assert len(exp) == 0
def test_qe_emb_iv_2(qe_obj, topn):
q_str = "financial reporting"
exp = qe_obj.expand(q_str, topn=topn)
logger.info(exp)
assert check(exp, topn)
# this is in here because it is based off of api function flow not specifically qe
def test_remove_kw_1():
test_term = "network"
test_list = ["network connection", "communications network"]
terms = remove_original_kw(test_list, test_term)
verified = ["connection", "communications"]
assert terms == verified
def test_remove_kw_2():
test_term = "animal"
test_list = ["animals", "animal cruelty"]
terms = remove_original_kw(test_list, test_term)
verified = ["animals", "cruelty"]
assert terms == verified
def test_remove_kw_3():
test_term = "american navy"
test_list = ["british navy", "navy washington"]
terms = remove_original_kw(test_list, test_term)
verified = ["british navy", "navy washington"]
assert terms == verified
def test_remove_kw_4():
test_term = "weapons"
test_list = ["enemy weapons", "weapons explosives"]
terms = remove_original_kw(test_list, test_term)
verified = ["enemy", "explosives"]
assert terms == verified
@pytest.mark.parametrize(
"args",
[
["passport", []],
[
"Find a book, painting, or work of art created in Santa Monica or on the west coast",
["sculpture", "piece"],
], # noqa
["telework policy for remote work", []],
["telework policy work", ["public"]],
],
)
def test_qe_mlm(topn, qe_mlm_obj, args):
query, expected = args
actual = qe_mlm_obj.expand(query, topn=topn, threshold=0.2, min_tokens=3)
logger.info(actual)
assert actual == expected
| 25.75 | 97 | 0.659312 | import logging
import pytest
logger = logging.getLogger(__name__)
def check(expanded, exp_len):
return 1 <= len(expanded) <= exp_len
def test_qe_emb_expand(qe_obj, topn):
q_str = "security clearance"
exp = qe_obj.expand(q_str)
logger.info(exp)
assert check(exp, topn)
def test_qe_emb_empty(qe_obj, topn):
q_str = ""
exp = qe_obj.expand(q_str, topn=topn)
assert len(exp) == 0
def test_qe_emb_oov_1(qe_obj, topn):
q_str = "kljljfalj"
exp = qe_obj.expand(q_str, topn=topn)
assert len(exp) == 0
def test_qe_emb_iv_2(qe_obj, topn):
q_str = "financial reporting"
exp = qe_obj.expand(q_str, topn=topn)
logger.info(exp)
assert check(exp, topn)
def test_remove_kw_1():
test_term = "network"
test_list = ["network connection", "communications network"]
terms = remove_original_kw(test_list, test_term)
verified = ["connection", "communications"]
assert terms == verified
def test_remove_kw_2():
test_term = "animal"
test_list = ["animals", "animal cruelty"]
terms = remove_original_kw(test_list, test_term)
verified = ["animals", "cruelty"]
assert terms == verified
def test_remove_kw_3():
test_term = "american navy"
test_list = ["british navy", "navy washington"]
terms = remove_original_kw(test_list, test_term)
verified = ["british navy", "navy washington"]
assert terms == verified
def test_remove_kw_4():
test_term = "weapons"
test_list = ["enemy weapons", "weapons explosives"]
terms = remove_original_kw(test_list, test_term)
verified = ["enemy", "explosives"]
assert terms == verified
@pytest.mark.parametrize(
"args",
[
["passport", []],
[
"Find a book, painting, or work of art created in Santa Monica or on the west coast",
["sculpture", "piece"],
],
["telework policy for remote work", []],
["telework policy work", ["public"]],
],
)
def test_qe_mlm(topn, qe_mlm_obj, args):
query, expected = args
actual = qe_mlm_obj.expand(query, topn=topn, threshold=0.2, min_tokens=3)
logger.info(actual)
assert actual == expected
| true | true |
f72e72e522ec513de5e3efacb8883404d676ae41 | 1,861 | py | Python | setup.py | glikaj/pako | 27f9846459a94933dfe20884f80d6ec81d8dc092 | [
"MIT"
] | null | null | null | setup.py | glikaj/pako | 27f9846459a94933dfe20884f80d6ec81d8dc092 | [
"MIT"
] | null | null | null | setup.py | glikaj/pako | 27f9846459a94933dfe20884f80d6ec81d8dc092 | [
"MIT"
] | null | null | null | import os
import sys
py_version = sys.version_info[:2]
if py_version < (2, 7):
raise RuntimeError('On Python 2, Supervisor requires Python 2.7 or later')
elif (3, 0) < py_version < (3, 4):
raise RuntimeError('On Python 3, Supervisor requires Python 3.4 or later')
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
version_txt = os.path.join(here, 'oly', 'version.txt')
oly_version = open(version_txt).read().strip()
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="oly",
version=oly_version,
author="Genci Likaj",
author_email="genci.likaj@gmail.com",
description="Oly Cli",
long_description=long_description,
license='MIT',
packages=find_packages(),
package_dir={'oly': 'oly'},
install_requires=['click', 'requests', 'tabulate'],
include_package_data=True,
zip_safe=False,
long_description_content_type = "text/markdown",
url = "https://github.com/glikaj/oly",
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
project_urls={
'Source': 'https://github.com/glikaj/oly/',
'Tracker': 'https://github.com/glikaj/oly/issues',
},
keywords='cli development console docker',
entry_points={
'console_scripts': ['oly = oly.cli:start']
},
) | 32.086207 | 78 | 0.634068 | import os
import sys
py_version = sys.version_info[:2]
if py_version < (2, 7):
raise RuntimeError('On Python 2, Supervisor requires Python 2.7 or later')
elif (3, 0) < py_version < (3, 4):
raise RuntimeError('On Python 3, Supervisor requires Python 3.4 or later')
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
version_txt = os.path.join(here, 'oly', 'version.txt')
oly_version = open(version_txt).read().strip()
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="oly",
version=oly_version,
author="Genci Likaj",
author_email="genci.likaj@gmail.com",
description="Oly Cli",
long_description=long_description,
license='MIT',
packages=find_packages(),
package_dir={'oly': 'oly'},
install_requires=['click', 'requests', 'tabulate'],
include_package_data=True,
zip_safe=False,
long_description_content_type = "text/markdown",
url = "https://github.com/glikaj/oly",
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
project_urls={
'Source': 'https://github.com/glikaj/oly/',
'Tracker': 'https://github.com/glikaj/oly/issues',
},
keywords='cli development console docker',
entry_points={
'console_scripts': ['oly = oly.cli:start']
},
) | true | true |
f72e730e867ce0880e9a4b7cd06ba8b921784b8a | 3,822 | py | Python | LuciferMoringstar_Robot/modules/inline.py | Shanidkk00/PAID-PROMO-FUCK-OFF | b6f0766032ccd09e14cef164a5984ba9f6cb636e | [
"MIT"
] | 2 | 2022-02-08T14:44:36.000Z | 2022-03-03T07:55:14.000Z | LuciferMoringstar_Robot/modules/inline.py | Shanidkk00/PAID-PROMO-FUCK-OFF | b6f0766032ccd09e14cef164a5984ba9f6cb636e | [
"MIT"
] | null | null | null | LuciferMoringstar_Robot/modules/inline.py | Shanidkk00/PAID-PROMO-FUCK-OFF | b6f0766032ccd09e14cef164a5984ba9f6cb636e | [
"MIT"
] | 2 | 2022-02-10T11:31:16.000Z | 2022-02-10T12:40:40.000Z | import logging
from pyrogram import Client as LuciferMoringstar_Robot, filters as Worker, emoji
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, InlineQueryResultCachedDocument
from LuciferMoringstar_Robot.database._utils import get_size
from LuciferMoringstar_Robot.database.autofilter_db import get_search_results, is_subscribed
from config import CACHE_TIME, AUTH_USERS, FORCES_SUB, CUSTOM_FILE_CAPTION
logger = logging.getLogger(__name__)
cache_time = 0 if AUTH_USERS or FORCES_SUB else CACHE_TIME
@LuciferMoringstar_Robot.on_inline_query(Worker.user(AUTH_USERS) if AUTH_USERS else None)
async def answer(bot, query):
if FORCES_SUB and not await is_subscribed(bot, query):
await query.answer(results=[],
cache_time=0,
switch_pm_text='You Have To Subscribe My Channel To Use The Bot',
switch_pm_parameter="subscribe")
return
results = []
if '|' in query.query:
string, file_type = query.query.split('|', maxsplit=1)
string = string.strip()
file_type = file_type.strip().lower()
else:
string = query.query.strip()
file_type = None
offset = int(query.offset or 0)
reply_markup = get_reply_markup(query=string)
files, next_offset = await get_search_results(string,
file_type=file_type,
max_results=10,
offset=offset)
for file in files:
title=file.file_name
size=get_size(file.file_size)
f_caption=file.caption
if CUSTOM_FILE_CAPTION:
try:
f_caption=CUSTOM_FILE_CAPTION.format(file_name=title, file_size=size, file_caption=f_caption)
except Exception as e:
print(e)
f_caption=f_caption
if f_caption is None:
f_caption = f"📁 **Title:** `{file.file_name}`"
results.append(
InlineQueryResultCachedDocument(
title=file.file_name,
file_id=file.file_id,
caption=f_caption,
description=f'Size: {get_size(file.file_size)}\nType: {file.file_type}',
reply_markup=reply_markup))
if results:
switch_pm_text = f"{emoji.FILE_FOLDER} Results"
if string:
switch_pm_text += f" for {string}"
try:
await query.answer(results=results,
is_personal = True,
cache_time=cache_time,
switch_pm_text=switch_pm_text,
switch_pm_parameter="start",
next_offset=str(next_offset))
except Exception as e:
logging.exception(str(e))
await query.answer(results=[], is_personal=True,
cache_time=cache_time,
switch_pm_text=str(e)[:63],
switch_pm_parameter="error")
else:
switch_pm_text = f'{emoji.CROSS_MARK} No results'
if string:
switch_pm_text += f' for "{string}"'
await query.answer(results=[],
is_personal = True,
cache_time=cache_time,
switch_pm_text=switch_pm_text,
switch_pm_parameter="okay")
def get_reply_markup(query):
buttons = [[
InlineKeyboardButton('Support Group', url='t.me/Mo_Tech_Group'),
InlineKeyboardButton('More Botz', url='t.me/MT_Botz')
],[
InlineKeyboardButton('🔍 Search again 🔎', switch_inline_query_current_chat=query)
]]
return InlineKeyboardMarkup(buttons)
| 39.402062 | 109 | 0.584511 | import logging
from pyrogram import Client as LuciferMoringstar_Robot, filters as Worker, emoji
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, InlineQueryResultCachedDocument
from LuciferMoringstar_Robot.database._utils import get_size
from LuciferMoringstar_Robot.database.autofilter_db import get_search_results, is_subscribed
from config import CACHE_TIME, AUTH_USERS, FORCES_SUB, CUSTOM_FILE_CAPTION
logger = logging.getLogger(__name__)
cache_time = 0 if AUTH_USERS or FORCES_SUB else CACHE_TIME
@LuciferMoringstar_Robot.on_inline_query(Worker.user(AUTH_USERS) if AUTH_USERS else None)
async def answer(bot, query):
if FORCES_SUB and not await is_subscribed(bot, query):
await query.answer(results=[],
cache_time=0,
switch_pm_text='You Have To Subscribe My Channel To Use The Bot',
switch_pm_parameter="subscribe")
return
results = []
if '|' in query.query:
string, file_type = query.query.split('|', maxsplit=1)
string = string.strip()
file_type = file_type.strip().lower()
else:
string = query.query.strip()
file_type = None
offset = int(query.offset or 0)
reply_markup = get_reply_markup(query=string)
files, next_offset = await get_search_results(string,
file_type=file_type,
max_results=10,
offset=offset)
for file in files:
title=file.file_name
size=get_size(file.file_size)
f_caption=file.caption
if CUSTOM_FILE_CAPTION:
try:
f_caption=CUSTOM_FILE_CAPTION.format(file_name=title, file_size=size, file_caption=f_caption)
except Exception as e:
print(e)
f_caption=f_caption
if f_caption is None:
f_caption = f"📁 **Title:** `{file.file_name}`"
results.append(
InlineQueryResultCachedDocument(
title=file.file_name,
file_id=file.file_id,
caption=f_caption,
description=f'Size: {get_size(file.file_size)}\nType: {file.file_type}',
reply_markup=reply_markup))
if results:
switch_pm_text = f"{emoji.FILE_FOLDER} Results"
if string:
switch_pm_text += f" for {string}"
try:
await query.answer(results=results,
is_personal = True,
cache_time=cache_time,
switch_pm_text=switch_pm_text,
switch_pm_parameter="start",
next_offset=str(next_offset))
except Exception as e:
logging.exception(str(e))
await query.answer(results=[], is_personal=True,
cache_time=cache_time,
switch_pm_text=str(e)[:63],
switch_pm_parameter="error")
else:
switch_pm_text = f'{emoji.CROSS_MARK} No results'
if string:
switch_pm_text += f' for "{string}"'
await query.answer(results=[],
is_personal = True,
cache_time=cache_time,
switch_pm_text=switch_pm_text,
switch_pm_parameter="okay")
def get_reply_markup(query):
buttons = [[
InlineKeyboardButton('Support Group', url='t.me/Mo_Tech_Group'),
InlineKeyboardButton('More Botz', url='t.me/MT_Botz')
],[
InlineKeyboardButton('🔍 Search again 🔎', switch_inline_query_current_chat=query)
]]
return InlineKeyboardMarkup(buttons)
| true | true |
f72e7328156adf3d454b7598395f831f88d58f24 | 1,839 | py | Python | ics/structures/s_text_api_settings.py | hollinsky-intrepid/python_ics | b6ec5486ec3cc2548e33845c265faccf293b88f5 | [
"Unlicense"
] | null | null | null | ics/structures/s_text_api_settings.py | hollinsky-intrepid/python_ics | b6ec5486ec3cc2548e33845c265faccf293b88f5 | [
"Unlicense"
] | null | null | null | ics/structures/s_text_api_settings.py | hollinsky-intrepid/python_ics | b6ec5486ec3cc2548e33845c265faccf293b88f5 | [
"Unlicense"
] | null | null | null | # This file was auto generated; Do not modify, if you value your sanity!
import ctypes
# can1_options
class can1_options(ctypes.Union):
_pack_ = 2
_fields_ = [
('bExtended', ctypes.c_uint32, 1), # [Bitfield]
('DWord', ctypes.c_uint32),
]
# Extra names go here:
# End of extra names
# can2_options
class can2_options(ctypes.Union):
_pack_ = 2
_fields_ = [
('bExtended', ctypes.c_uint32, 1), # [Bitfield]
('DWord', ctypes.c_uint32),
]
# Extra names go here:
# End of extra names
# can3_options
class can3_options(ctypes.Union):
_pack_ = 2
_fields_ = [
('bExtended', ctypes.c_uint32, 1), # [Bitfield]
('DWord', ctypes.c_uint32),
]
# Extra names go here:
# End of extra names
# can4_options
class can4_options(ctypes.Union):
_pack_ = 2
_fields_ = [
('bExtended', ctypes.c_uint32, 1), # [Bitfield]
('DWord', ctypes.c_uint32),
]
# Extra names go here:
# End of extra names
class s_text_api_settings(ctypes.Structure):
_pack_ = 2
_anonymous_ = ("can1_options", "can2_options", "can3_options", "can4_options",)
_fields_ = [
('can1_tx_id', ctypes.c_uint32),
('can1_rx_id', ctypes.c_uint32),
('can1_options', can1_options),
('can2_tx_id', ctypes.c_uint32),
('can2_rx_id', ctypes.c_uint32),
('can2_options', can2_options),
('network_enables', ctypes.c_uint32),
('can3_tx_id', ctypes.c_uint32),
('can3_rx_id', ctypes.c_uint32),
('can3_options', can3_options),
('can4_tx_id', ctypes.c_uint32),
('can4_rx_id', ctypes.c_uint32),
('can4_options', can4_options),
('reserved', ctypes.c_uint32 * 5),
]
# Extra names go here:
STextAPISettings = s_text_api_settings
# End of extra names
| 25.541667 | 83 | 0.616639 |
import ctypes
class can1_options(ctypes.Union):
_pack_ = 2
_fields_ = [
('bExtended', ctypes.c_uint32, 1),
('DWord', ctypes.c_uint32),
]
class can2_options(ctypes.Union):
_pack_ = 2
_fields_ = [
('bExtended', ctypes.c_uint32, 1),
('DWord', ctypes.c_uint32),
]
class can3_options(ctypes.Union):
_pack_ = 2
_fields_ = [
('bExtended', ctypes.c_uint32, 1),
('DWord', ctypes.c_uint32),
]
class can4_options(ctypes.Union):
_pack_ = 2
_fields_ = [
('bExtended', ctypes.c_uint32, 1),
('DWord', ctypes.c_uint32),
]
class s_text_api_settings(ctypes.Structure):
_pack_ = 2
_anonymous_ = ("can1_options", "can2_options", "can3_options", "can4_options",)
_fields_ = [
('can1_tx_id', ctypes.c_uint32),
('can1_rx_id', ctypes.c_uint32),
('can1_options', can1_options),
('can2_tx_id', ctypes.c_uint32),
('can2_rx_id', ctypes.c_uint32),
('can2_options', can2_options),
('network_enables', ctypes.c_uint32),
('can3_tx_id', ctypes.c_uint32),
('can3_rx_id', ctypes.c_uint32),
('can3_options', can3_options),
('can4_tx_id', ctypes.c_uint32),
('can4_rx_id', ctypes.c_uint32),
('can4_options', can4_options),
('reserved', ctypes.c_uint32 * 5),
]
STextAPISettings = s_text_api_settings
| true | true |
f72e735867669fcf4103b3d3ac7577830db2749f | 321 | py | Python | leetcode/maximum-subarray.py | hg-pyun/algorithm | cf92483c399f05e488b6febc79c80620f115fadf | [
"MIT"
] | 7 | 2018-09-15T13:57:37.000Z | 2022-03-13T10:01:56.000Z | leetcode/maximum-subarray.py | hg-pyun/algorithm | cf92483c399f05e488b6febc79c80620f115fadf | [
"MIT"
] | 1 | 2019-04-26T07:02:28.000Z | 2019-04-26T07:02:28.000Z | leetcode/maximum-subarray.py | hg-pyun/algorithm | cf92483c399f05e488b6febc79c80620f115fadf | [
"MIT"
] | 1 | 2020-05-03T23:43:38.000Z | 2020-05-03T23:43:38.000Z | class Solution:
def maxSubArray(self, nums: List[int]) -> int:
ans = nums[0]
sub_sum = 0
for num in nums:
sub_sum += num
ans = max(sub_sum, ans)
if sub_sum < 0:
sub_sum = 0
return ans
| 20.0625 | 50 | 0.392523 | class Solution:
def maxSubArray(self, nums: List[int]) -> int:
ans = nums[0]
sub_sum = 0
for num in nums:
sub_sum += num
ans = max(sub_sum, ans)
if sub_sum < 0:
sub_sum = 0
return ans
| true | true |
f72e752b81b96c364ac0063922f80616e7fca486 | 2,256 | py | Python | murano/tests/unit/dsl/test_meta.py | openstack/murano | 314c85db8addae184a77c8b47217b1f28e4a1b67 | [
"Apache-2.0"
] | 91 | 2015-04-26T16:05:03.000Z | 2021-12-28T07:12:33.000Z | murano/tests/unit/dsl/test_meta.py | openstack/murano | 314c85db8addae184a77c8b47217b1f28e4a1b67 | [
"Apache-2.0"
] | 3 | 2016-06-24T08:05:20.000Z | 2021-02-07T06:04:47.000Z | murano/tests/unit/dsl/test_meta.py | openstack/murano | 314c85db8addae184a77c8b47217b1f28e4a1b67 | [
"Apache-2.0"
] | 61 | 2015-05-19T22:56:34.000Z | 2021-06-01T05:38:53.000Z | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from murano.tests.unit.dsl.foundation import object_model as om
from murano.tests.unit.dsl.foundation import test_case
class TestMeta(test_case.DslTestCase):
def setUp(self):
super(TestMeta, self).setUp()
self._runner = self.new_runner(om.Object('metatests.TestMeta'))
def test_class_multi_meta(self):
self.assertCountEqual(
[4, 1, 111, 2], self._runner.testClassMultiMeta())
def test_class_single_meta(self):
self.assertCountEqual(
[5, 6], self._runner.testClassSingleMeta())
def test_parent_class_not_inherited_meta(self):
self.assertEqual(3, self._runner.testParentClassNotInheritedMeta())
def test_method_meta(self):
self.assertCountEqual(
[7, 8, 9, 4, 1, 10], self._runner.testMethodMeta())
def test_method_argument_meta(self):
self.assertCountEqual(
[1, 2, 3], self._runner.testMethodArgumentMeta())
def test_inherited_property_meta(self):
self.assertEqual(
[1], self._runner.testInheritedPropertyMeta())
def test_overridden_property_meta(self):
self.assertCountEqual(
[1, 4], self._runner.testOverriddenPropertyMeta())
def test_package_meta(self):
self.assertEqual(
[], self._runner.testPackageMeta())
def test_complex_meta(self):
self.assertCountEqual([
[1, 'metatests.PropertyType'],
[2, 'metatests.PropertyType'],
[3, 'metatests.PropertyType2'],
[4, 'metatests.PropertyType'],
[5, 'metatests.PropertyType2']
], self._runner.testComplexMeta())
| 35.25 | 78 | 0.671986 |
from murano.tests.unit.dsl.foundation import object_model as om
from murano.tests.unit.dsl.foundation import test_case
class TestMeta(test_case.DslTestCase):
def setUp(self):
super(TestMeta, self).setUp()
self._runner = self.new_runner(om.Object('metatests.TestMeta'))
def test_class_multi_meta(self):
self.assertCountEqual(
[4, 1, 111, 2], self._runner.testClassMultiMeta())
def test_class_single_meta(self):
self.assertCountEqual(
[5, 6], self._runner.testClassSingleMeta())
def test_parent_class_not_inherited_meta(self):
self.assertEqual(3, self._runner.testParentClassNotInheritedMeta())
def test_method_meta(self):
self.assertCountEqual(
[7, 8, 9, 4, 1, 10], self._runner.testMethodMeta())
def test_method_argument_meta(self):
self.assertCountEqual(
[1, 2, 3], self._runner.testMethodArgumentMeta())
def test_inherited_property_meta(self):
self.assertEqual(
[1], self._runner.testInheritedPropertyMeta())
def test_overridden_property_meta(self):
self.assertCountEqual(
[1, 4], self._runner.testOverriddenPropertyMeta())
def test_package_meta(self):
self.assertEqual(
[], self._runner.testPackageMeta())
def test_complex_meta(self):
self.assertCountEqual([
[1, 'metatests.PropertyType'],
[2, 'metatests.PropertyType'],
[3, 'metatests.PropertyType2'],
[4, 'metatests.PropertyType'],
[5, 'metatests.PropertyType2']
], self._runner.testComplexMeta())
| true | true |
f72e76c8f0c62355d037b9e364b47158d7de00b5 | 788 | py | Python | profiles_api/migrations/0002_profilefeeditem.py | nirmit1509/profiles-rest-api | c707103b7ffb5c321a6b47ba05175c51ef816d45 | [
"MIT"
] | null | null | null | profiles_api/migrations/0002_profilefeeditem.py | nirmit1509/profiles-rest-api | c707103b7ffb5c321a6b47ba05175c51ef816d45 | [
"MIT"
] | null | null | null | profiles_api/migrations/0002_profilefeeditem.py | nirmit1509/profiles-rest-api | c707103b7ffb5c321a6b47ba05175c51ef816d45 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-12-18 13:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.52 | 126 | 0.633249 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f72e77268ba9acbcb6d08cf96c6155994a820451 | 11,191 | py | Python | core/platform/email/mailgun_email_services_test.py | Tim810306/oppia | 6f90044d12dbe0979c999265cbe46f267c4c592d | [
"Apache-2.0"
] | 2 | 2021-05-24T10:23:32.000Z | 2021-08-22T18:50:14.000Z | core/platform/email/mailgun_email_services_test.py | Tim810306/oppia | 6f90044d12dbe0979c999265cbe46f267c4c592d | [
"Apache-2.0"
] | 11 | 2021-03-03T07:21:27.000Z | 2022-03-12T01:03:44.000Z | core/platform/email/mailgun_email_services_test.py | Tim810306/oppia | 6f90044d12dbe0979c999265cbe46f267c4c592d | [
"Apache-2.0"
] | 1 | 2017-12-06T19:41:49.000Z | 2017-12-06T19:41:49.000Z | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Mailgun API wrapper."""
from __future__ import absolute_import
from __future__ import unicode_literals
import ast
from core.platform.email import mailgun_email_services
from core.tests import test_utils
import feconf
import python_utils
from typing import Dict, List, Tuple, Union
MailgunQueryType = Tuple[
str,
Dict[str, List[Union[str, Dict[str, Dict[str, Union[int, str]]]]]],
Dict[str, str]
]
class EmailTests(test_utils.GenericTestBase):
"""Tests for sending emails."""
class Response(python_utils.OBJECT):
"""Class to mock python_utils.url_open responses."""
def __init__(
self,
url: MailgunQueryType,
expected_url: MailgunQueryType
) -> None:
self.url = url
self.expected_url = expected_url
def getcode(self) -> int:
"""Gets the status code of this url_open mock.
Returns:
int. 200 to signify status is OK. 500 otherwise.
"""
self.url = (
self.url[0],
python_utils.parse_query_string(self.url[1]), # type: ignore[no-untyped-call]
self.url[2],
)
recipient_variable_0 = self.url[1]['recipient_variables'][0]
# Letting mypy know that the variable is of type str.
assert isinstance(recipient_variable_0, str)
self.url[1]['recipient_variables'] = [ast.literal_eval(
recipient_variable_0)]
return 200 if self.url == self.expected_url else 500
def test_send_email_to_mailgun(self) -> None:
"""Test for sending HTTP POST request."""
# Test sending email without bcc, reply_to or recipient_variables.
expected_query_url: MailgunQueryType = (
'https://api.mailgun.net/v3/domain/messages',
{
'from': ['a@a.com'],
'text': ['plaintext_body 😂'],
'recipient_variables': [{}],
'to': ['b@b.com'],
'html': ['Hi abc,<br> 😂'],
'subject': ['Hola 😂 - invitation to collaborate']
},
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swapped_request = lambda *args: args
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
self.assertTrue(resp)
# Test sending email with single bcc and single recipient email.
expected_query_url = (
'https://api.mailgun.net/v3/domain/messages',
{
'from': ['a@a.com'],
'h:Reply-To': ['abc'],
'text': ['plaintext_body 😂'],
'bcc': ['c@c.com'],
'recipient_variables': [
{'b@b.com': {'first': 'Bob', 'id': 1}}
],
'to': ['b@b.com'],
'html': ['Hi abc,<br> 😂'],
'subject': ['Hola 😂 - invitation to collaborate']
},
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂',
bcc=['c@c.com'],
reply_to='abc',
recipient_variables={'b@b.com': {'first': 'Bob', 'id': 1}})
self.assertTrue(resp)
# Test sending email with single bcc, and multiple recipient emails
# differentiated by recipient_variables ids.
expected_query_url = (
'https://api.mailgun.net/v3/domain/messages',
{
'from': ['a@a.com'],
'h:Reply-To': ['abc'],
'text': ['plaintext_body 😂'],
'bcc': ['[\'c@c.com\', \'d@d.com\']'],
'recipient_variables': [
{'b@b.com': {'id': 1, 'first': 'Bob'}}
],
'to': ['b@b.com'],
'html': ['Hi abc,<br> 😂'],
'subject': ['Hola 😂 - invitation to collaborate']
},
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂',
bcc=['c@c.com', 'd@d.com'],
reply_to='abc',
recipient_variables=({'b@b.com': {'first': 'Bob', 'id': 1}}))
self.assertTrue(resp)
def test_batch_send_to_mailgun(self) -> None:
"""Test for sending HTTP POST request."""
expected_query_url: MailgunQueryType = (
'https://api.mailgun.net/v3/domain/messages',
{
'from': ['a@a.com'],
'text': ['plaintext_body 😂'],
'recipient_variables': [{}],
'to': ['[\'b@b.com\', \'c@c.com\', \'d@d.com\']'],
'html': ['Hi abc,<br> 😂'],
'subject': ['Hola 😂 - invitation to collaborate']
},
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swapped_request = lambda *args: args
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com', 'c@c.com', 'd@d.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
self.assertTrue(resp)
def test_mailgun_key_or_domain_name_not_set_raises_exception(self) -> None:
"""Test that exceptions are raised when API key or domain name are
unset.
"""
# Testing no mailgun api key.
mailgun_exception = self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'Mailgun API key is not available.')
with mailgun_exception:
mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com', 'c@c.com', 'd@d.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
# Testing no mailgun domain name.
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
mailgun_exception = self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'Mailgun domain name is not set.')
with swap_api, mailgun_exception:
mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com', 'c@c.com', 'd@d.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
def test_invalid_status_code_returns_false(self) -> None:
expected_query_url: MailgunQueryType = (
'https://api.mailgun.net/v3/domain/messages',
{
'from': ['a@a.com'],
'h:Reply-To': ['abc'],
'text': ['plaintext_body 😂'],
'bcc': ['[\'c@c.com\', \'d@d.com\']'],
'recipient_variables': [
{'b@b.com': {'id': 1, 'first': 'Bob'}}
],
'to': ['b@b.com'],
'html': ['Hi abc,<br> 😂'],
'subject': ['Hola 😂 - invitation to collaborate']
},
{'Authorization': 'Basic'})
swapped_request = lambda *args: args
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂',
bcc=['c@c.com', 'd@d.com'],
reply_to='abc',
recipient_variables=({'b@b.com': {'first': 'Bob', 'id': 1}}))
self.assertFalse(resp)
| 42.390152 | 93 | 0.548566 |
from __future__ import absolute_import
from __future__ import unicode_literals
import ast
from core.platform.email import mailgun_email_services
from core.tests import test_utils
import feconf
import python_utils
from typing import Dict, List, Tuple, Union
MailgunQueryType = Tuple[
str,
Dict[str, List[Union[str, Dict[str, Dict[str, Union[int, str]]]]]],
Dict[str, str]
]
class EmailTests(test_utils.GenericTestBase):
class Response(python_utils.OBJECT):
def __init__(
self,
url: MailgunQueryType,
expected_url: MailgunQueryType
) -> None:
self.url = url
self.expected_url = expected_url
def getcode(self) -> int:
self.url = (
self.url[0],
python_utils.parse_query_string(self.url[1]),
self.url[2],
)
recipient_variable_0 = self.url[1]['recipient_variables'][0]
assert isinstance(recipient_variable_0, str)
self.url[1]['recipient_variables'] = [ast.literal_eval(
recipient_variable_0)]
return 200 if self.url == self.expected_url else 500
def test_send_email_to_mailgun(self) -> None:
expected_query_url: MailgunQueryType = (
'https://api.mailgun.net/v3/domain/messages',
{
'from': ['a@a.com'],
'text': ['plaintext_body 😂'],
'recipient_variables': [{}],
'to': ['b@b.com'],
'html': ['Hi abc,<br> 😂'],
'subject': ['Hola 😂 - invitation to collaborate']
},
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swapped_request = lambda *args: args
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
self.assertTrue(resp)
expected_query_url = (
'https://api.mailgun.net/v3/domain/messages',
{
'from': ['a@a.com'],
'h:Reply-To': ['abc'],
'text': ['plaintext_body 😂'],
'bcc': ['c@c.com'],
'recipient_variables': [
{'b@b.com': {'first': 'Bob', 'id': 1}}
],
'to': ['b@b.com'],
'html': ['Hi abc,<br> 😂'],
'subject': ['Hola 😂 - invitation to collaborate']
},
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂',
bcc=['c@c.com'],
reply_to='abc',
recipient_variables={'b@b.com': {'first': 'Bob', 'id': 1}})
self.assertTrue(resp)
expected_query_url = (
'https://api.mailgun.net/v3/domain/messages',
{
'from': ['a@a.com'],
'h:Reply-To': ['abc'],
'text': ['plaintext_body 😂'],
'bcc': ['[\'c@c.com\', \'d@d.com\']'],
'recipient_variables': [
{'b@b.com': {'id': 1, 'first': 'Bob'}}
],
'to': ['b@b.com'],
'html': ['Hi abc,<br> 😂'],
'subject': ['Hola 😂 - invitation to collaborate']
},
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂',
bcc=['c@c.com', 'd@d.com'],
reply_to='abc',
recipient_variables=({'b@b.com': {'first': 'Bob', 'id': 1}}))
self.assertTrue(resp)
def test_batch_send_to_mailgun(self) -> None:
expected_query_url: MailgunQueryType = (
'https://api.mailgun.net/v3/domain/messages',
{
'from': ['a@a.com'],
'text': ['plaintext_body 😂'],
'recipient_variables': [{}],
'to': ['[\'b@b.com\', \'c@c.com\', \'d@d.com\']'],
'html': ['Hi abc,<br> 😂'],
'subject': ['Hola 😂 - invitation to collaborate']
},
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swapped_request = lambda *args: args
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com', 'c@c.com', 'd@d.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
self.assertTrue(resp)
def test_mailgun_key_or_domain_name_not_set_raises_exception(self) -> None:
mailgun_exception = self.assertRaisesRegexp(
Exception, 'Mailgun API key is not available.')
with mailgun_exception:
mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com', 'c@c.com', 'd@d.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
mailgun_exception = self.assertRaisesRegexp(
Exception, 'Mailgun domain name is not set.')
with swap_api, mailgun_exception:
mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com', 'c@c.com', 'd@d.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
def test_invalid_status_code_returns_false(self) -> None:
expected_query_url: MailgunQueryType = (
'https://api.mailgun.net/v3/domain/messages',
{
'from': ['a@a.com'],
'h:Reply-To': ['abc'],
'text': ['plaintext_body 😂'],
'bcc': ['[\'c@c.com\', \'d@d.com\']'],
'recipient_variables': [
{'b@b.com': {'id': 1, 'first': 'Bob'}}
],
'to': ['b@b.com'],
'html': ['Hi abc,<br> 😂'],
'subject': ['Hola 😂 - invitation to collaborate']
},
{'Authorization': 'Basic'})
swapped_request = lambda *args: args
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂',
bcc=['c@c.com', 'd@d.com'],
reply_to='abc',
recipient_variables=({'b@b.com': {'first': 'Bob', 'id': 1}}))
self.assertFalse(resp)
| true | true |
f72e7770d6e09471566f9a1de0432dcf558fcac0 | 16,661 | py | Python | test/functional/test_framework/mininode.py | GerardoTaboada/EducaCoin | c7f1be5dacd0a10464775c7eeb0eb799fc66cd43 | [
"MIT"
] | 3 | 2018-10-04T04:38:09.000Z | 2021-02-22T04:45:03.000Z | test/functional/test_framework/mininode.py | GerardoTaboada/EducaCoin | c7f1be5dacd0a10464775c7eeb0eb799fc66cd43 | [
"MIT"
] | null | null | null | test/functional/test_framework/mininode.py | GerardoTaboada/EducaCoin | c7f1be5dacd0a10464775c7eeb0eb799fc66cd43 | [
"MIT"
] | 2 | 2018-10-05T22:11:11.000Z | 2020-04-13T04:51:06.000Z | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Educacoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P"""
import asyncore
from collections import defaultdict
from io import BytesIO
import logging
import socket
import struct
import sys
import threading
from test_framework.messages import *
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb", # mainnet
"testnet4": b"\xfd\xd2\xc8\xf1", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
class P2PConnection(asyncore.dispatcher):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# All P2PConnections must be created before starting the NetworkThread.
# assert that the network thread is not running.
assert not network_thread_running()
super().__init__(map=mininode_socket_map)
def peer_connect(self, dstaddr, dstport, net="regtest"):
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sendbuf = b""
self.recvbuf = b""
self.state = "connecting"
self.network = net
self.disconnect = False
logger.info('Connecting to Educacoin Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
def peer_disconnect(self):
# Connection could have already been closed by other end.
if self.state == "connected":
self.disconnect_node()
# Connection and disconnection methods
def handle_connect(self):
"""asyncore callback when a connection is opened."""
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.on_open()
def handle_close(self):
"""asyncore callback when a connection is closed."""
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.on_close()
def disconnect_node(self):
"""Disconnect the p2p connection.
Called by the test logic thread. Causes the p2p connection
to be disconnected on the next iteration of the asyncore loop."""
self.disconnect = True
# Socket read methods
def handle_read(self):
"""asyncore callback when data is read from the socket."""
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def writable(self):
"""asyncore method to determine whether the handle_write() callback should be called on the next loop."""
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
"""asyncore callback when data should be written to the socket."""
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def send_message(self, message, pushbuf=False):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
if (len(self.sendbuf) == 0 and not pushbuf):
try:
sent = self.send(tmsg)
self.sendbuf = tmsg[sent:]
except BlockingIOError:
self.sendbuf = tmsg
else:
self.sendbuf += tmsg
# Class utility methods
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Educacoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
self.verack_received = True
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.state != "connected"
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
def __init__(self):
super().__init__(name="NetworkThread")
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[obj.handle_close() for obj in disconnected]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
logger.debug("Network thread closing")
def network_thread_start():
"""Start the network thread."""
# Only one network thread may run at a time
assert not network_thread_running()
NetworkThread().start()
def network_thread_running():
"""Return whether the network thread is running."""
return any([thread.name == "NetworkThread" for thread in threading.enumerate()])
def network_thread_join(timeout=10):
"""Wait timeout seconds for the network thread to terminate.
Throw if the network thread doesn't terminate in timeout seconds."""
network_threads = [thread for thread in threading.enumerate() if thread.name == "NetworkThread"]
assert len(network_threads) <= 1
for thread in network_threads:
thread.join(timeout)
assert not thread.is_alive()
| 37.609481 | 182 | 0.635556 |
import asyncore
from collections import defaultdict
from io import BytesIO
import logging
import socket
import struct
import sys
import threading
from test_framework.messages import *
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb",
"testnet4": b"\xfd\xd2\xc8\xf1",
"regtest": b"\xfa\xbf\xb5\xda",
}
class P2PConnection(asyncore.dispatcher):
def __init__(self):
assert not network_thread_running()
super().__init__(map=mininode_socket_map)
def peer_connect(self, dstaddr, dstport, net="regtest"):
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sendbuf = b""
self.recvbuf = b""
self.state = "connecting"
self.network = net
self.disconnect = False
logger.info('Connecting to Educacoin Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
def peer_disconnect(self):
if self.state == "connected":
self.disconnect_node()
def handle_connect(self):
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.on_open()
def handle_close(self):
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.on_close()
def disconnect_node(self):
self.disconnect = True
def handle_read(self):
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
raise NotImplementedError
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
if (len(self.sendbuf) == 0 and not pushbuf):
try:
sent = self.send(tmsg)
self.sendbuf = tmsg[sent:]
except BlockingIOError:
self.sendbuf = tmsg
else:
self.sendbuf += tmsg
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
def __init__(self):
super().__init__()
self.message_count = defaultdict(int)
self.last_message = {}
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
# Message receiving methods
def on_message(self, message):
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
self.verack_received = True
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.state != "connected"
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
def __init__(self):
super().__init__(name="NetworkThread")
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[obj.handle_close() for obj in disconnected]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
logger.debug("Network thread closing")
def network_thread_start():
# Only one network thread may run at a time
assert not network_thread_running()
NetworkThread().start()
def network_thread_running():
return any([thread.name == "NetworkThread" for thread in threading.enumerate()])
def network_thread_join(timeout=10):
network_threads = [thread for thread in threading.enumerate() if thread.name == "NetworkThread"]
assert len(network_threads) <= 1
for thread in network_threads:
thread.join(timeout)
assert not thread.is_alive()
| true | true |
f72e77ad679e57d46b94c2f6dab8fbd671008976 | 4,611 | py | Python | venv/Lib/site-packages/statsmodels/discrete/tests/test_margins.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 76 | 2019-12-28T08:37:10.000Z | 2022-03-29T02:19:41.000Z | venv/Lib/site-packages/statsmodels/discrete/tests/test_margins.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 7 | 2020-12-04T04:10:42.000Z | 2021-03-16T00:53:09.000Z | venv/Lib/site-packages/statsmodels/discrete/tests/test_margins.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 35 | 2020-02-04T14:46:25.000Z | 2022-03-24T03:56:17.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 3 21:08:49 2017
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_allclose
from statsmodels.discrete.discrete_model import (Poisson, NegativeBinomial,
NegativeBinomialP)
from statsmodels.tools.tools import add_constant
import statsmodels.discrete.tests.results.results_count_margins as res_stata
# load data into module namespace
from statsmodels.datasets.cpunish import load
cpunish_data = load(as_pandas=False)
cpunish_data.exog[:,3] = np.log(cpunish_data.exog[:,3])
exog = add_constant(cpunish_data.exog, prepend=False)
endog = cpunish_data.endog - 1 # avoid zero-truncation
exog /= np.round(exog.max(0), 3)
class CheckMarginMixin(object):
rtol_fac = 1
def test_margins_table(self):
res1 = self.res1
sl = self.res1_slice
rf = self.rtol_fac
assert_allclose(self.margeff.margeff, self.res1.params[sl], rtol=1e-5 * rf)
assert_allclose(self.margeff.margeff_se, self.res1.bse[sl], rtol=1e-6 * rf)
assert_allclose(self.margeff.pvalues, self.res1.pvalues[sl], rtol=5e-6 * rf)
assert_allclose(self.margeff.conf_int(), res1.margins_table[sl, 4:6],
rtol=1e-6 * rf)
class TestPoissonMargin(CheckMarginMixin):
@classmethod
def setup_class(cls):
# here we do not need to check convergence from default start_params
start_params = [14.1709, 0.7085, -3.4548, -0.539, 3.2368, -7.9299,
-5.0529]
mod_poi = Poisson(endog, exog)
res_poi = mod_poi.fit(start_params=start_params)
#res_poi = mod_poi.fit(maxiter=100)
marge_poi = res_poi.get_margeff()
cls.res = res_poi
cls.margeff = marge_poi
cls.rtol_fac = 1
cls.res1_slice = slice(None, None, None)
cls.res1 = res_stata.results_poisson_margins_cont
class TestPoissonMarginDummy(CheckMarginMixin):
@classmethod
def setup_class(cls):
# here we do not need to check convergence from default start_params
start_params = [14.1709, 0.7085, -3.4548, -0.539, 3.2368, -7.9299,
-5.0529]
mod_poi = Poisson(endog, exog)
res_poi = mod_poi.fit(start_params=start_params)
marge_poi = res_poi.get_margeff(dummy=True)
cls.res = res_poi
cls.margeff = marge_poi
cls.res1_slice = [0, 1, 2, 3, 5, 6]
cls.res1 = res_stata.results_poisson_margins_dummy
class TestNegBinMargin(CheckMarginMixin):
@classmethod
def setup_class(cls):
# here we do not need to check convergence from default start_params
start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,
-2.88, 1.14]
mod = NegativeBinomial(endog, exog)
res = mod.fit(start_params=start_params, method='nm', maxiter=2000)
marge = res.get_margeff()
cls.res = res
cls.margeff = marge
cls.res1_slice = slice(None, None, None)
cls.res1 = res_stata.results_negbin_margins_cont
cls.rtol_fac = 5e1
# negbin has lower agreement with Stata in this case
class TestNegBinMarginDummy(CheckMarginMixin):
@classmethod
def setup_class(cls):
# here we do not need to check convergence from default start_params
start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,
-2.88, 1.14]
mod = NegativeBinomial(endog, exog)
res = mod.fit(start_params=start_params, method='nm', maxiter=2000)
marge = res.get_margeff(dummy=True)
cls.res = res
cls.margeff = marge
cls.res1_slice = cls.res1_slice = [0, 1, 2, 3, 5, 6]
cls.res1 = res_stata.results_negbin_margins_dummy
cls.rtol_fac = 5e1
class TestNegBinPMargin(CheckMarginMixin):
# this is the same as the nb2 version above for NB-P, p=2
@classmethod
def setup_class(cls):
# here we do not need to check convergence from default start_params
start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,
-2.88, 1.14]
mod = NegativeBinomialP(endog, exog) # checks also that default p=2
res = mod.fit(start_params=start_params, method='nm', maxiter=2000)
marge = res.get_margeff()
cls.res = res
cls.margeff = marge
cls.res1_slice = slice(None, None, None)
cls.res1 = res_stata.results_negbin_margins_cont
cls.rtol_fac = 5e1
# negbin has lower agreement with Stata in this case
| 35.744186 | 84 | 0.643678 |
import numpy as np
from numpy.testing import assert_allclose
from statsmodels.discrete.discrete_model import (Poisson, NegativeBinomial,
NegativeBinomialP)
from statsmodels.tools.tools import add_constant
import statsmodels.discrete.tests.results.results_count_margins as res_stata
from statsmodels.datasets.cpunish import load
cpunish_data = load(as_pandas=False)
cpunish_data.exog[:,3] = np.log(cpunish_data.exog[:,3])
exog = add_constant(cpunish_data.exog, prepend=False)
endog = cpunish_data.endog - 1
exog /= np.round(exog.max(0), 3)
class CheckMarginMixin(object):
rtol_fac = 1
def test_margins_table(self):
res1 = self.res1
sl = self.res1_slice
rf = self.rtol_fac
assert_allclose(self.margeff.margeff, self.res1.params[sl], rtol=1e-5 * rf)
assert_allclose(self.margeff.margeff_se, self.res1.bse[sl], rtol=1e-6 * rf)
assert_allclose(self.margeff.pvalues, self.res1.pvalues[sl], rtol=5e-6 * rf)
assert_allclose(self.margeff.conf_int(), res1.margins_table[sl, 4:6],
rtol=1e-6 * rf)
class TestPoissonMargin(CheckMarginMixin):
@classmethod
def setup_class(cls):
start_params = [14.1709, 0.7085, -3.4548, -0.539, 3.2368, -7.9299,
-5.0529]
mod_poi = Poisson(endog, exog)
res_poi = mod_poi.fit(start_params=start_params)
marge_poi = res_poi.get_margeff()
cls.res = res_poi
cls.margeff = marge_poi
cls.rtol_fac = 1
cls.res1_slice = slice(None, None, None)
cls.res1 = res_stata.results_poisson_margins_cont
class TestPoissonMarginDummy(CheckMarginMixin):
@classmethod
def setup_class(cls):
start_params = [14.1709, 0.7085, -3.4548, -0.539, 3.2368, -7.9299,
-5.0529]
mod_poi = Poisson(endog, exog)
res_poi = mod_poi.fit(start_params=start_params)
marge_poi = res_poi.get_margeff(dummy=True)
cls.res = res_poi
cls.margeff = marge_poi
cls.res1_slice = [0, 1, 2, 3, 5, 6]
cls.res1 = res_stata.results_poisson_margins_dummy
class TestNegBinMargin(CheckMarginMixin):
@classmethod
def setup_class(cls):
start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,
-2.88, 1.14]
mod = NegativeBinomial(endog, exog)
res = mod.fit(start_params=start_params, method='nm', maxiter=2000)
marge = res.get_margeff()
cls.res = res
cls.margeff = marge
cls.res1_slice = slice(None, None, None)
cls.res1 = res_stata.results_negbin_margins_cont
cls.rtol_fac = 5e1
class TestNegBinMarginDummy(CheckMarginMixin):
@classmethod
def setup_class(cls):
start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,
-2.88, 1.14]
mod = NegativeBinomial(endog, exog)
res = mod.fit(start_params=start_params, method='nm', maxiter=2000)
marge = res.get_margeff(dummy=True)
cls.res = res
cls.margeff = marge
cls.res1_slice = cls.res1_slice = [0, 1, 2, 3, 5, 6]
cls.res1 = res_stata.results_negbin_margins_dummy
cls.rtol_fac = 5e1
class TestNegBinPMargin(CheckMarginMixin):
@classmethod
def setup_class(cls):
start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,
-2.88, 1.14]
mod = NegativeBinomialP(endog, exog)
res = mod.fit(start_params=start_params, method='nm', maxiter=2000)
marge = res.get_margeff()
cls.res = res
cls.margeff = marge
cls.res1_slice = slice(None, None, None)
cls.res1 = res_stata.results_negbin_margins_cont
cls.rtol_fac = 5e1
| true | true |
f72e7997ae61489068cbd806a984a116eb6dbe1f | 147 | py | Python | chap12.py | maciejkos/ModSimPy | fe80a994689dafd282c3d479b19c90c34c590eb5 | [
"MIT"
] | 1 | 2022-01-04T12:54:18.000Z | 2022-01-04T12:54:18.000Z | chap12.py | Dicaromonroy/ModSimPy | fe80a994689dafd282c3d479b19c90c34c590eb5 | [
"MIT"
] | null | null | null | chap12.py | Dicaromonroy/ModSimPy | fe80a994689dafd282c3d479b19c90c34c590eb5 | [
"MIT"
] | null | null | null | from modsim import *
def calc_total_infected(results, system):
s_0 = results.S[0]
s_end = results.S[system.t_end]
return s_0 - s_end
| 18.375 | 41 | 0.687075 | from modsim import *
def calc_total_infected(results, system):
s_0 = results.S[0]
s_end = results.S[system.t_end]
return s_0 - s_end
| true | true |
f72e7ab3ebec9d29054bd636d606b7df2c7bdcf7 | 7,013 | py | Python | img2gb/__init__.py | flozz/img2gb | 2564a718d0b377d1b524204d97a674aedeec770d | [
"BSD-3-Clause"
] | 23 | 2018-11-14T12:50:31.000Z | 2022-03-30T17:28:43.000Z | img2gb/__init__.py | flozz/img2gb | 2564a718d0b377d1b524204d97a674aedeec770d | [
"BSD-3-Clause"
] | 10 | 2019-07-01T17:24:47.000Z | 2022-01-13T12:38:38.000Z | img2gb/__init__.py | flozz/img2gb | 2564a718d0b377d1b524204d97a674aedeec770d | [
"BSD-3-Clause"
] | 3 | 2019-10-16T23:27:28.000Z | 2022-01-23T22:28:29.000Z | import os.path
from .gbtile import GBTile
from .gbtileset import GBTileset
from .gbtilemap import GBTilemap
from .c_export import generate_c_file, generate_c_header_file
from .version import VERSION
def generate_tileset(
input_images,
output_c=None,
output_h=None,
output_image=None,
name="TILESET",
dedup=False,
alternative_palette=False,
sprite8x16=False):
"""Function that generates tileset's C file, C header and image from an
input image.
:param PIL.Image.Image|list input_images: The input image to generate the
tileset from.
:param file output_c: A file-like object where the C code will be generated
(``None`` to not generate C code).
:param file output_h: A file-like object where the C header (.h) code will
be generated (``None`` to not generate C header code).
:param file output_image: A file-like object where the image representing
the tileset will be generated (``None`` to not generate the image).
.. NOTE::
The file must be openend in binary mode (``open("file", "wb")``)
or you must be using a binary-compatible file-like object, like
a :class:`io.BytesIO`.
:param str name: The name of the tileset (will be used in the generated
code, default = ``"TILESET"``)
:param bool dedup: Deduplicate the tiles of the tileset (default =
``False``)
:param bool alternative_palette: Use the sprite's alternative palette
(inverted colors, default = ``False``)
:param bool sprite8x16: Rearrange the tiles to be used in 8x16 sprites
(default = ``False``).
Example using files::
from PIL import Image
import img2gb
image = Image.open("./my_tileset.png")
c_file = open("example.c", "w")
h_file = open("example.h", "w")
image_file = open("example.png", "wb")
img2gb.generate_tileset(
[image],
output_c=c_file,
output_h=h_file,
output_image=image_file,
dedup=True)
c_file.close()
h_file.close()
image_file.close()
Example using file-like objects::
from io import StringIO, BytesIO
from PIL import Image
import img2gb
image = Image.open("./my_tileset.png")
c_code_io = StringIO()
h_code_io = StringIO()
output_image = BytesIO()
img2gb.generate_tileset(
[image],
output_c=c_code_io,
output_h=h_code_io,
output_image=output_image,
dedup=True)
# Print the C code for the example:
c_code_io.seek(0)
c_code = c_code_io.read()
print(c_code)
"""
if type(input_images) is not list:
tileset = GBTileset.from_image(
input_images,
dedup=dedup,
alternative_palette=alternative_palette,
sprite8x16=sprite8x16
)
else:
tileset = GBTileset()
for image in input_images:
tileset.merge(GBTileset.from_image(
image,
alternative_palette=alternative_palette,
sprite8x16=sprite8x16
), dedup=dedup)
if output_c:
c_code = generate_c_file(tileset.to_c_string(name=name))
output_c.write(c_code)
if output_h:
filename = "%s.h" % name.lower()
if hasattr(output_h, "name"):
filename = os.path.basename(output_h.name)
h_code = generate_c_header_file(
tileset.to_c_header_string(name=name),
filename=filename)
output_h.write(h_code)
if output_image:
image = tileset.to_image()
image.save(output_image, "PNG")
def generate_tilemap(
input_tileset,
input_tilemap_image,
output_c=None,
output_h=None,
name="TILEMAP",
offset=0,
missing="error",
replace=0):
"""Function that generates tilemap's C file and C header from an input
tileset and image.
:param PIL.Image.Image input_tileset: The tileset that contains the tiles
used in the tilemap.
:param PIL.Image.Image input_tilemap_image: An image that represents the
tilemap (its size must be a multiple of 8 and 256x256px maximum).
:param file output_c: A file-like object where the C code will be generated
(``None`` to not generate C code).
:param file output_h: A file-like object where the C header (.h) code will
be generated (``None`` to not generate C header code).
:param str name: The name of the tilemap (will be used in the generated
code, default = ``"TILEMAP"``).
:param int offset: Offset where the tileset starts (useful only of you will
load the given tileset at a place different from ``0`` in the
GameBoy video memeory).
:param string missing: Action to do if a tile of the tilemap is missing
from the tileset:
* ``"error"`` (default): raise an error,
* ``"replace"``: replace the missing tile by an other one (see
``replace`` option).
:param int replace: The id of the replacement tile when
``missing="replace"``.
Example:
.. code-block:: C
from io import BytesIO
from PIL import Image
import img2gb
image = Image.open("./my_tilemap.png")
# Generate the tileset image from the tilemap image
tileset_io = BytesIO()
img2gb.generate_tileset(
[image],
output_image=tileset_io,
dedup=True)
tileset_io.seek(0)
# Generate the tilemap
tileset_image = Image.open(tileset_io)
img2gb.generate_tilemap(
tileset_image,
image,
output_c=open("tilemap.c", "w"),
output_h=open("tilemap.h", "w"))
"""
if missing == "append":
raise ValueError("missing=append is not available from high level functions") # noqa
tileset = GBTileset.from_image(input_tileset, dedup=False, offset=offset)
tilemap = GBTilemap.from_image(
input_tilemap_image,
gbtileset=tileset,
missing=missing,
replace=replace,
)
if output_c:
c_code = generate_c_file(tilemap.to_c_string(name=name))
output_c.write(c_code)
if output_h:
filename = "%s.h" % name.lower()
if hasattr(output_h, "name"):
filename = os.path.basename(output_h.name)
h_code = generate_c_header_file(
tilemap.to_c_header_string(name=name),
filename=filename)
output_h.write(h_code)
__all__ = [
"GBTile",
"GBTileset",
"GBTilemap",
"generate_tileset",
"generate_tilemap",
"VERSION",
]
| 32.022831 | 93 | 0.595038 | import os.path
from .gbtile import GBTile
from .gbtileset import GBTileset
from .gbtilemap import GBTilemap
from .c_export import generate_c_file, generate_c_header_file
from .version import VERSION
def generate_tileset(
input_images,
output_c=None,
output_h=None,
output_image=None,
name="TILESET",
dedup=False,
alternative_palette=False,
sprite8x16=False):
if type(input_images) is not list:
tileset = GBTileset.from_image(
input_images,
dedup=dedup,
alternative_palette=alternative_palette,
sprite8x16=sprite8x16
)
else:
tileset = GBTileset()
for image in input_images:
tileset.merge(GBTileset.from_image(
image,
alternative_palette=alternative_palette,
sprite8x16=sprite8x16
), dedup=dedup)
if output_c:
c_code = generate_c_file(tileset.to_c_string(name=name))
output_c.write(c_code)
if output_h:
filename = "%s.h" % name.lower()
if hasattr(output_h, "name"):
filename = os.path.basename(output_h.name)
h_code = generate_c_header_file(
tileset.to_c_header_string(name=name),
filename=filename)
output_h.write(h_code)
if output_image:
image = tileset.to_image()
image.save(output_image, "PNG")
def generate_tilemap(
input_tileset,
input_tilemap_image,
output_c=None,
output_h=None,
name="TILEMAP",
offset=0,
missing="error",
replace=0):
if missing == "append":
raise ValueError("missing=append is not available from high level functions")
tileset = GBTileset.from_image(input_tileset, dedup=False, offset=offset)
tilemap = GBTilemap.from_image(
input_tilemap_image,
gbtileset=tileset,
missing=missing,
replace=replace,
)
if output_c:
c_code = generate_c_file(tilemap.to_c_string(name=name))
output_c.write(c_code)
if output_h:
filename = "%s.h" % name.lower()
if hasattr(output_h, "name"):
filename = os.path.basename(output_h.name)
h_code = generate_c_header_file(
tilemap.to_c_header_string(name=name),
filename=filename)
output_h.write(h_code)
__all__ = [
"GBTile",
"GBTileset",
"GBTilemap",
"generate_tileset",
"generate_tilemap",
"VERSION",
]
| true | true |
f72e7afc52b4efae67131e7da1c5037fcc3c2abd | 6,050 | py | Python | docs_zh_CN/conf.py | tailocbmt/mmaction2 | 51612047a53a13be7a855878ffe6f2e5dc4b7b0a | [
"Apache-2.0"
] | 1 | 2022-01-29T13:32:23.000Z | 2022-01-29T13:32:23.000Z | docs_zh_CN/conf.py | tailocbmt/mmaction2 | 51612047a53a13be7a855878ffe6f2e5dc4b7b0a | [
"Apache-2.0"
] | null | null | null | docs_zh_CN/conf.py | tailocbmt/mmaction2 | 51612047a53a13be7a855878ffe6f2e5dc4b7b0a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'MMAction2'
copyright = '2020, OpenMMLab'
author = 'MMAction2 Authors'
version_file = '../mmaction/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode',
'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser'
]
# numpy and torch are required
autodoc_mock_imports = ['mmaction.version', 'PIL']
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
# 'logo_url': 'https://mmocr.readthedocs.io/en/latest/',
'menu': [
{
'name':
'教程',
'url':
'https://colab.research.google.com/github/'
'open-mmlab/mmocr/blob/main/demo/MMOCR_Tutorial.ipynb'
},
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmocr'
},
{
'name':
'上游代码库',
'children': [
{
'name': 'MMCV',
'url': 'https://github.com/open-mmlab/mmcv',
'description': '计算机视觉基础库'
},
{
'name': 'MMClassification',
'url': 'https://github.com/open-mmlab/mmclassification',
'description': '图像分类代码库'
},
{
'name': 'MMDetection',
'url': 'https://github.com/open-mmlab/mmdetection',
'description': '物体检测代码库'
},
]
},
{
'name':
'OpenMMLab 各项目',
'children': [
{
'name': 'MMAction2',
'url': 'https://github.com/open-mmlab/mmaction2',
},
{
'name': 'MMClassification',
'url': 'https://github.com/open-mmlab/mmclassification',
},
{
'name': 'MMSegmentation',
'url': 'https://github.com/open-mmlab/mmsegmentation',
},
{
'name': 'MMDetection3D',
'url': 'https://github.com/open-mmlab/mmdetection3d',
},
{
'name': 'MMEditing',
'url': 'https://github.com/open-mmlab/mmediting',
},
{
'name': 'MMDetection3D',
'url': 'https://github.com/open-mmlab/mmdetection3d',
},
{
'name': 'MMPose',
'url': 'https://github.com/open-mmlab/mmpose',
},
{
'name': 'MMTracking',
'url': 'https://github.com/open-mmlab/mmtracking',
},
{
'name': 'MMGeneration',
'url': 'https://github.com/open-mmlab/mmgeneration',
},
{
'name': 'MMOCR',
'url': 'https://github.com/open-mmlab/mmocr',
},
]
},
{
'name':
'OpenMMLab',
'children': [
{
'name': '主页',
'url': 'https://openmmlab.com/'
},
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/'
},
]
},
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
myst_enable_extensions = ['colon_fence']
language = 'zh_CN'
master_doc = 'index'
def builder_inited_handler(app):
subprocess.run(['./merge_docs.sh'])
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
| 32.180851 | 79 | 0.506612 |
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('..'))
project = 'MMAction2'
copyright = '2020, OpenMMLab'
author = 'MMAction2 Authors'
version_file = '../mmaction/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
release = get_version()
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode',
'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser'
]
autodoc_mock_imports = ['mmaction.version', 'PIL']
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'}
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name':
'教程',
'url':
'https://colab.research.google.com/github/'
'open-mmlab/mmocr/blob/main/demo/MMOCR_Tutorial.ipynb'
},
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmocr'
},
{
'name':
'上游代码库',
'children': [
{
'name': 'MMCV',
'url': 'https://github.com/open-mmlab/mmcv',
'description': '计算机视觉基础库'
},
{
'name': 'MMClassification',
'url': 'https://github.com/open-mmlab/mmclassification',
'description': '图像分类代码库'
},
{
'name': 'MMDetection',
'url': 'https://github.com/open-mmlab/mmdetection',
'description': '物体检测代码库'
},
]
},
{
'name':
'OpenMMLab 各项目',
'children': [
{
'name': 'MMAction2',
'url': 'https://github.com/open-mmlab/mmaction2',
},
{
'name': 'MMClassification',
'url': 'https://github.com/open-mmlab/mmclassification',
},
{
'name': 'MMSegmentation',
'url': 'https://github.com/open-mmlab/mmsegmentation',
},
{
'name': 'MMDetection3D',
'url': 'https://github.com/open-mmlab/mmdetection3d',
},
{
'name': 'MMEditing',
'url': 'https://github.com/open-mmlab/mmediting',
},
{
'name': 'MMDetection3D',
'url': 'https://github.com/open-mmlab/mmdetection3d',
},
{
'name': 'MMPose',
'url': 'https://github.com/open-mmlab/mmpose',
},
{
'name': 'MMTracking',
'url': 'https://github.com/open-mmlab/mmtracking',
},
{
'name': 'MMGeneration',
'url': 'https://github.com/open-mmlab/mmgeneration',
},
{
'name': 'MMOCR',
'url': 'https://github.com/open-mmlab/mmocr',
},
]
},
{
'name':
'OpenMMLab',
'children': [
{
'name': '主页',
'url': 'https://openmmlab.com/'
},
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/'
},
]
},
]
}
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
myst_enable_extensions = ['colon_fence']
language = 'zh_CN'
master_doc = 'index'
def builder_inited_handler(app):
subprocess.run(['./merge_docs.sh'])
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
| true | true |
f72e7b095cb6503055bc41e3a7a220bc323fe712 | 11,578 | py | Python | entity.py | iamgreaser/fireball | 2c5afb3dc5756a3b26da9045278f7e4a2bc036d2 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-05-06T15:11:17.000Z | 2019-05-06T15:11:17.000Z | entity.py | iamgreaser/fireball | 2c5afb3dc5756a3b26da9045278f7e4a2bc036d2 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | entity.py | iamgreaser/fireball | 2c5afb3dc5756a3b26da9045278f7e4a2bc036d2 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | """
Copyright 2011 Ben Russell & contributors. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of the contributors.
"""
from math import *
import numpy as np
import pyglet
import helpers
MOUSE_SENS_X = 0.3
MOUSE_SENS_Y = 0.3
PLAYER_SPEED = 3.0*2.0
OBJECT_GRAVITY = 9.8*2.0
PLAYER_FRICTION = 0.02
PLAYER_JUMP_HEIGHT = 10.0
COLLISION_TOLERANCE = 0.2
KEY_MOVE_FORWARD_BIT = 0x0001
KEY_MOVE_BACKWARD_BIT = 0x0002
KEY_MOVE_LEFT_BIT = 0x0004
KEY_MOVE_RIGHT_BIT = 0x0008
KEY_JUMP_BIT = 0x0010
KEY_CROUCH_BIT = 0x0020
KEY_CREEP_BIT = 0x0040
KEY_ZOOM_BIT = 0x0080
class AbstractEntity(helpers.ArgGenerator):
ARGS = []
def set_game(self, idx, game):
self.idx = idx
self.game = game
class PositionedEntity(AbstractEntity):
ARGS = AbstractEntity.ARGS + ["origin","velocity","orient_x","orient_z"]
class PhysicsEntity(PositionedEntity):
ARGS = PositionedEntity.ARGS + []
grounded = False
walkable = False
# i had to use floor,
# otherwise the player would bounce like mad when it was in the water
def trace_vector(self, ox,oy,oz, nx,ny,nz, walkable = False):
#walkable = False
# prep values
dx, dy, dz = (n-o for (o,n) in zip((ox,oy,oz),(nx,ny,nz))) # delta
(x1,y1,z1), (x2,y2,z2) = self.BBOX
height = floor(abs(z2-z1)-0.001)+1
x3, y3, z3 = (v1 if d < 0.0 else v2 for (v1,v2,d) in zip(self.BBOX[0], self.BBOX[1], (dx, dy, dz)))
x4, y4, z4 = (v2-v1 if d < 0.0 else v1-v2 for (v1,v2,d) in zip(self.BBOX[0], self.BBOX[1], (dx, dy, dz)))
z5 = (0.0 if dz < 0.0 else z4)
ox += x3
oy += y3
oz += z3
nx += x3
ny += y3
nz += z3
sx, sy, sz = (v%1.0 if d < 0.0 else 1.0-(v%1.0) for v,d in zip((ox,oy,oz),(dx,dy,dz))) # sub
gx, gy, gz = (-1 if d < 0.0 else 1 for d in (dx, dy, dz)) # direction ("go")
wx, wy, wz = (0.001 if d < 0.0 else 0.999 for d in (dx, dy, dz)) # cell offset when hitting box
vx, vy, vz = (max(0.00001,abs(d)) for d in (dx, dy, dz)) # abs velocity
cx, cy, cz = (int(floor(v)) for v in (ox, oy, oz)) # cell
dcx, dcy, dcz = (abs(int(floor(v))-c) for c,v in zip((cx,cy,cz),(nx,ny,nz))) # cell delta / count
walkable = walkable and dz < 0.0
def sfix(sx,sy,sz):
return tuple(v if d < 0.0 else 1.0-v for (v,d) in zip((sx,sy,sz),(dx,dy,dz)))
# flags to indicate if we've screwed with a value
keep_x = True
keep_y = True
keep_z = True
dc = dcx+dcy+dcz
for i in xrange(dc):
# get our lovely factoriffic stuff
calc_x = sx/vx
calc_y = sy/vy
calc_z = sz/vz
take_x = calc_x < calc_y and calc_x < calc_z
take_y = (not take_x) and calc_y < calc_z
take_z = (not take_x) and (not take_y)
if take_x:
# X trace
t = sx/vx
sy -= t*vy
sz -= t*vz
if keep_x:
cx += gx
sx = 1.0
elif take_y:
# Y trace
t = sy/vy
sx -= t*vx
sz -= t*vz
if keep_y:
cy += gy
sy = 1.0
else:
# Z trace
t = sz/vz
sx -= t*vx
sy -= t*vy
if keep_z:
cz += gz
sz = 1.0
# cell check!
ax,ay,az = sfix(sx,sy,sz) # add this to cx,cy,cz
ncx,ncy,ncz = cx+ax,cy+ay,cz+az
if not keep_x:
ncx = nx
if not keep_y:
ncy = ny
if not keep_z:
ncz = nz
if take_x:
floor_check = not self.game.world.solid_check_box(
cx+0.5-gx,ncy,ncz+1,
cx+0.5,ncy+y4,ncz+z4+1
)
checked_out_as_solid = self.game.world.solid_check_box(
cx+0.5-gx,ncy,ncz,
cx+0.5,ncy+y4,ncz+z4
)
elif take_y:
floor_check = not self.game.world.solid_check_box(
ncx,cy+0.5-gy,ncz+1,
ncx+x4,cy+0.5,ncz+z4+1
)
checked_out_as_solid = self.game.world.solid_check_box(
ncx,cy+0.5-gy,ncz,
ncx+x4,cy+0.5,ncz+z4
)
else:
checked_out_as_solid = self.game.world.solid_check_box(
ncx,ncy,cz+0.5-gz,
ncx+x4,ncy+y4,cz+0.5
)
#if self.game.world.test_if_solid(cx,cy,cz):
if checked_out_as_solid:
if take_x:
if walkable and keep_x and floor_check:
cz += 1
onz = nz
nz = cz+0.001
self.antijerk_stairs += onz-nz
keep_x = False
else:
cx -= gx
#sx = 0.1
if keep_x:
nx = cx+wx
self.velocity[0] *= -0.1
keep_x = False
elif take_y:
if walkable and keep_y and floor_check:
cz += 1
onz = nz
nz = cz+0.001
self.antijerk_stairs += onz-nz
keep_z = False
else:
cy -= gy
#sy = 0.1
if keep_y:
ny = cy+wy
self.velocity[1] *= -0.1
keep_y = False
elif take_z:
cz -= gz
#sz = 0.1
if keep_z:
nz = cz+wz
if gz < 0:
self.grounded = True
self.velocity[2] *= -0.1
keep_z = False
return nx-x3, ny-y3, nz-z3
def update(self, dt):
# get new position
nvec = tuple(self.origin[i] + self.velocity[i]*dt for i in xrange(3))
(x1, y1, z1), (x2, y2, z2) = self.BBOX
ox, oy, oz = self.origin
nx, ny, nz = nvec
# trace each corner
#for vbase in self.BVEC:
# vx, vy, vz, walkable = vbase
# tnx, tny, tnz = self.trace_vector(ox+vx, oy+vy, oz+vz, nx+vx, ny+vy, nz+vz, walkable)
# nx, ny, nz = (v-vo for (v,vo) in zip((tnx,tny,tnz),(vx,vy,vz)))
nx, ny, nz = self.trace_vector(ox, oy, oz, nx, ny, nz, self.walkable)
for i,vt in zip(xrange(3), (nx, ny, nz)):
self.origin[i] = vt
class PlayerEntity(PhysicsEntity):
ARGS = PhysicsEntity.ARGS + ["name","keys"]
BBOX_STAND = ((-0.4, -0.4, -2.4),(0.4, 0.4, 0.4))
BBOX_CROUCH = ((-0.4, -0.4, -1.4),(0.4, 0.4, 0.4))
BBOX = BBOX_STAND
def set_game(self, idx, game):
self.idx = idx
self.game = game
self.target_velocity = [0.0, 0.0, 0.0]
self.cam_vx = self.cam_vy = 0.0
self.antijerk_stairs = 0.0
self.crouching = False
self.walkable = True
if game != None:
# init
if self.origin == None:
x = self.game.world.lx//2 + 0.5
y = self.game.world.ly//2 + 0.5
z = self.game.world.lz + 0.5
self.origin = [x,y,z]
if self.orient_x == None:
self.orient_x = 0.0
if self.orient_z == None:
self.orient_z = 0.0
if self.velocity == None:
self.velocity = [0.0, 0.0, 0.0]
if self.keys == None:
self.keys = 0
if self.name == None:
self.name = "Griefer" + repr(self.idx)
else:
# destroy
pass
def set_camera(self):
x,y,z = self.origin
return x,y,z+self.antijerk_stairs,self.orient_z,self.orient_x
def update(self, dt):
#print dt
cam_rmatrix = self.get_cam_matrix_noxrot()
self.cam_vx = 0.0
self.cam_vy = 0.0
# fix antijerk
self.antijerk_stairs *= exp(-10.0*dt)
# deal with key changes
if (self.keys & KEY_JUMP_BIT) and self.grounded and not self.crouching:
self.velocity[2] = PLAYER_JUMP_HEIGHT
self.grounded = False
if (self.keys & KEY_MOVE_LEFT_BIT):
if not (self.keys & KEY_MOVE_RIGHT_BIT):
self.cam_vx = -1.0
elif (self.keys & KEY_MOVE_RIGHT_BIT):
self.cam_vx = 1.0
if (self.keys & KEY_MOVE_BACKWARD_BIT):
if not (self.keys & KEY_MOVE_FORWARD_BIT):
self.cam_vy = -1.0
elif (self.keys & KEY_MOVE_FORWARD_BIT):
self.cam_vy = 1.0
bvx = self.cam_vx*PLAYER_SPEED
bvy = -self.cam_vy*PLAYER_SPEED
if bool(self.keys & KEY_CROUCH_BIT) != self.crouching:
if self.crouching:
# uncrouch check
(x1,y1,z1),(x2,y2,z2) = self.BBOX_STAND
x,y,z = self.origin
if not self.game.world.solid_check_box(x1+x,y1+y,z1+z+2,x2+x,y2+y,z2+z+0.1+1):
self.origin[2] += 1.0
self.BBOX = self.BBOX_STAND
self.antijerk_stairs -= 1.0
self.crouching = False
self.walkable = True
else:
# crouch - no check needed
self.origin[2] -= 1.0
self.BBOX = self.BBOX_CROUCH
self.antijerk_stairs += 1.0
self.crouching = True
self.walkable = False
if (self.keys & KEY_CREEP_BIT) or self.crouching:
bvx *= 0.5
bvy *= 0.5
q = (np.asmatrix([bvx,bvy,0.0])*cam_rmatrix)
#for i in xrange(3):
# self.velocity[i] *= (1.0-PLAYER_FRICTION*dt)
self.target_velocity[0] = q[0,0]
self.target_velocity[1] = q[0,1]
self.target_velocity[2] = q[0,2]
for i in [0,1]: # don't do this with Z.
#for i in [0,1,2]: # ok, maybe as a temp measure
# TODO: get the math behind this right
self.velocity[i] += (self.target_velocity[i] - self.velocity[i])*(1.0 - exp(-dt*5.0))
self.velocity[2] -= OBJECT_GRAVITY*dt
PhysicsEntity.update(self, dt)
def get_cam_matrix_noxrot(self):
srz,crz = sin(self.orient_z*pi/180.0),cos(self.orient_z*pi/180.0)
cam_rmatrix = np.asmatrix(np.identity(3))
cam_rmatrix *= np.asmatrix([
[crz,srz,0.0],
[-srz,crz,0.0],
[0.0,0.0,1.0],
])
return cam_rmatrix
def get_cam_matrix(self):
srx,crx = sin(self.orient_x*pi/180.0),cos(self.orient_x*pi/180.0)
srz,crz = sin(self.orient_z*pi/180.0),cos(self.orient_z*pi/180.0)
cam_rmatrix = np.asmatrix(np.identity(3))
cam_rmatrix *= np.asmatrix([
[1.0,0.0,0.0],
[0.0,crx,srx],
[0.0,srx,-crx],
])
cam_rmatrix *= np.asmatrix([
[crz,srz,0.0],
[-srz,crz,0.0],
[0.0,0.0,1.0],
])
return cam_rmatrix
def on_mouse_motion(self, x, y, dx, dy):
self.orient_z += dx*MOUSE_SENS_X
self.orient_x -= dy*MOUSE_SENS_Y
def on_key_press(self, key, mod):
if key == pyglet.window.key.W:
self.keys |= KEY_MOVE_FORWARD_BIT
elif key == pyglet.window.key.S:
self.keys |= KEY_MOVE_BACKWARD_BIT
elif key == pyglet.window.key.A:
self.keys |= KEY_MOVE_LEFT_BIT
elif key == pyglet.window.key.D:
self.keys |= KEY_MOVE_RIGHT_BIT
elif key == pyglet.window.key.SPACE:
self.keys |= KEY_JUMP_BIT
elif key == pyglet.window.key.LCTRL:
self.keys |= KEY_CROUCH_BIT
elif key == pyglet.window.key.LSHIFT:
self.keys |= KEY_CREEP_BIT
def on_key_release(self, key, mod):
if key == pyglet.window.key.W:
self.keys &= ~KEY_MOVE_FORWARD_BIT
elif key == pyglet.window.key.S:
self.keys &= ~KEY_MOVE_BACKWARD_BIT
elif key == pyglet.window.key.A:
self.keys &= ~KEY_MOVE_LEFT_BIT
elif key == pyglet.window.key.D:
self.keys &= ~KEY_MOVE_RIGHT_BIT
elif key == pyglet.window.key.SPACE:
self.keys &= ~KEY_JUMP_BIT
elif key == pyglet.window.key.LCTRL:
self.keys &= ~KEY_CROUCH_BIT
elif key == pyglet.window.key.LSHIFT:
self.keys &= ~KEY_CREEP_BIT
| 26.616092 | 107 | 0.632234 |
from math import *
import numpy as np
import pyglet
import helpers
MOUSE_SENS_X = 0.3
MOUSE_SENS_Y = 0.3
PLAYER_SPEED = 3.0*2.0
OBJECT_GRAVITY = 9.8*2.0
PLAYER_FRICTION = 0.02
PLAYER_JUMP_HEIGHT = 10.0
COLLISION_TOLERANCE = 0.2
KEY_MOVE_FORWARD_BIT = 0x0001
KEY_MOVE_BACKWARD_BIT = 0x0002
KEY_MOVE_LEFT_BIT = 0x0004
KEY_MOVE_RIGHT_BIT = 0x0008
KEY_JUMP_BIT = 0x0010
KEY_CROUCH_BIT = 0x0020
KEY_CREEP_BIT = 0x0040
KEY_ZOOM_BIT = 0x0080
class AbstractEntity(helpers.ArgGenerator):
ARGS = []
def set_game(self, idx, game):
self.idx = idx
self.game = game
class PositionedEntity(AbstractEntity):
ARGS = AbstractEntity.ARGS + ["origin","velocity","orient_x","orient_z"]
class PhysicsEntity(PositionedEntity):
ARGS = PositionedEntity.ARGS + []
grounded = False
walkable = False
def trace_vector(self, ox,oy,oz, nx,ny,nz, walkable = False):
dx, dy, dz = (n-o for (o,n) in zip((ox,oy,oz),(nx,ny,nz)))
(x1,y1,z1), (x2,y2,z2) = self.BBOX
height = floor(abs(z2-z1)-0.001)+1
x3, y3, z3 = (v1 if d < 0.0 else v2 for (v1,v2,d) in zip(self.BBOX[0], self.BBOX[1], (dx, dy, dz)))
x4, y4, z4 = (v2-v1 if d < 0.0 else v1-v2 for (v1,v2,d) in zip(self.BBOX[0], self.BBOX[1], (dx, dy, dz)))
z5 = (0.0 if dz < 0.0 else z4)
ox += x3
oy += y3
oz += z3
nx += x3
ny += y3
nz += z3
sx, sy, sz = (v%1.0 if d < 0.0 else 1.0-(v%1.0) for v,d in zip((ox,oy,oz),(dx,dy,dz)))
gx, gy, gz = (-1 if d < 0.0 else 1 for d in (dx, dy, dz))
wx, wy, wz = (0.001 if d < 0.0 else 0.999 for d in (dx, dy, dz))
vx, vy, vz = (max(0.00001,abs(d)) for d in (dx, dy, dz))
cx, cy, cz = (int(floor(v)) for v in (ox, oy, oz))
dcx, dcy, dcz = (abs(int(floor(v))-c) for c,v in zip((cx,cy,cz),(nx,ny,nz)))
walkable = walkable and dz < 0.0
def sfix(sx,sy,sz):
return tuple(v if d < 0.0 else 1.0-v for (v,d) in zip((sx,sy,sz),(dx,dy,dz)))
keep_x = True
keep_y = True
keep_z = True
dc = dcx+dcy+dcz
for i in xrange(dc):
# get our lovely factoriffic stuff
calc_x = sx/vx
calc_y = sy/vy
calc_z = sz/vz
take_x = calc_x < calc_y and calc_x < calc_z
take_y = (not take_x) and calc_y < calc_z
take_z = (not take_x) and (not take_y)
if take_x:
# X trace
t = sx/vx
sy -= t*vy
sz -= t*vz
if keep_x:
cx += gx
sx = 1.0
elif take_y:
# Y trace
t = sy/vy
sx -= t*vx
sz -= t*vz
if keep_y:
cy += gy
sy = 1.0
else:
# Z trace
t = sz/vz
sx -= t*vx
sy -= t*vy
if keep_z:
cz += gz
sz = 1.0
# cell check!
ax,ay,az = sfix(sx,sy,sz) # add this to cx,cy,cz
ncx,ncy,ncz = cx+ax,cy+ay,cz+az
if not keep_x:
ncx = nx
if not keep_y:
ncy = ny
if not keep_z:
ncz = nz
if take_x:
floor_check = not self.game.world.solid_check_box(
cx+0.5-gx,ncy,ncz+1,
cx+0.5,ncy+y4,ncz+z4+1
)
checked_out_as_solid = self.game.world.solid_check_box(
cx+0.5-gx,ncy,ncz,
cx+0.5,ncy+y4,ncz+z4
)
elif take_y:
floor_check = not self.game.world.solid_check_box(
ncx,cy+0.5-gy,ncz+1,
ncx+x4,cy+0.5,ncz+z4+1
)
checked_out_as_solid = self.game.world.solid_check_box(
ncx,cy+0.5-gy,ncz,
ncx+x4,cy+0.5,ncz+z4
)
else:
checked_out_as_solid = self.game.world.solid_check_box(
ncx,ncy,cz+0.5-gz,
ncx+x4,ncy+y4,cz+0.5
)
#if self.game.world.test_if_solid(cx,cy,cz):
if checked_out_as_solid:
if take_x:
if walkable and keep_x and floor_check:
cz += 1
onz = nz
nz = cz+0.001
self.antijerk_stairs += onz-nz
keep_x = False
else:
cx -= gx
#sx = 0.1
if keep_x:
nx = cx+wx
self.velocity[0] *= -0.1
keep_x = False
elif take_y:
if walkable and keep_y and floor_check:
cz += 1
onz = nz
nz = cz+0.001
self.antijerk_stairs += onz-nz
keep_z = False
else:
cy -= gy
#sy = 0.1
if keep_y:
ny = cy+wy
self.velocity[1] *= -0.1
keep_y = False
elif take_z:
cz -= gz
#sz = 0.1
if keep_z:
nz = cz+wz
if gz < 0:
self.grounded = True
self.velocity[2] *= -0.1
keep_z = False
return nx-x3, ny-y3, nz-z3
def update(self, dt):
# get new position
nvec = tuple(self.origin[i] + self.velocity[i]*dt for i in xrange(3))
(x1, y1, z1), (x2, y2, z2) = self.BBOX
ox, oy, oz = self.origin
nx, ny, nz = nvec
# trace each corner
#for vbase in self.BVEC:
# vx, vy, vz, walkable = vbase
# tnx, tny, tnz = self.trace_vector(ox+vx, oy+vy, oz+vz, nx+vx, ny+vy, nz+vz, walkable)
# nx, ny, nz = (v-vo for (v,vo) in zip((tnx,tny,tnz),(vx,vy,vz)))
nx, ny, nz = self.trace_vector(ox, oy, oz, nx, ny, nz, self.walkable)
for i,vt in zip(xrange(3), (nx, ny, nz)):
self.origin[i] = vt
class PlayerEntity(PhysicsEntity):
ARGS = PhysicsEntity.ARGS + ["name","keys"]
BBOX_STAND = ((-0.4, -0.4, -2.4),(0.4, 0.4, 0.4))
BBOX_CROUCH = ((-0.4, -0.4, -1.4),(0.4, 0.4, 0.4))
BBOX = BBOX_STAND
def set_game(self, idx, game):
self.idx = idx
self.game = game
self.target_velocity = [0.0, 0.0, 0.0]
self.cam_vx = self.cam_vy = 0.0
self.antijerk_stairs = 0.0
self.crouching = False
self.walkable = True
if game != None:
# init
if self.origin == None:
x = self.game.world.lx//2 + 0.5
y = self.game.world.ly//2 + 0.5
z = self.game.world.lz + 0.5
self.origin = [x,y,z]
if self.orient_x == None:
self.orient_x = 0.0
if self.orient_z == None:
self.orient_z = 0.0
if self.velocity == None:
self.velocity = [0.0, 0.0, 0.0]
if self.keys == None:
self.keys = 0
if self.name == None:
self.name = "Griefer" + repr(self.idx)
else:
# destroy
pass
def set_camera(self):
x,y,z = self.origin
return x,y,z+self.antijerk_stairs,self.orient_z,self.orient_x
def update(self, dt):
#print dt
cam_rmatrix = self.get_cam_matrix_noxrot()
self.cam_vx = 0.0
self.cam_vy = 0.0
# fix antijerk
self.antijerk_stairs *= exp(-10.0*dt)
# deal with key changes
if (self.keys & KEY_JUMP_BIT) and self.grounded and not self.crouching:
self.velocity[2] = PLAYER_JUMP_HEIGHT
self.grounded = False
if (self.keys & KEY_MOVE_LEFT_BIT):
if not (self.keys & KEY_MOVE_RIGHT_BIT):
self.cam_vx = -1.0
elif (self.keys & KEY_MOVE_RIGHT_BIT):
self.cam_vx = 1.0
if (self.keys & KEY_MOVE_BACKWARD_BIT):
if not (self.keys & KEY_MOVE_FORWARD_BIT):
self.cam_vy = -1.0
elif (self.keys & KEY_MOVE_FORWARD_BIT):
self.cam_vy = 1.0
bvx = self.cam_vx*PLAYER_SPEED
bvy = -self.cam_vy*PLAYER_SPEED
if bool(self.keys & KEY_CROUCH_BIT) != self.crouching:
if self.crouching:
# uncrouch check
(x1,y1,z1),(x2,y2,z2) = self.BBOX_STAND
x,y,z = self.origin
if not self.game.world.solid_check_box(x1+x,y1+y,z1+z+2,x2+x,y2+y,z2+z+0.1+1):
self.origin[2] += 1.0
self.BBOX = self.BBOX_STAND
self.antijerk_stairs -= 1.0
self.crouching = False
self.walkable = True
else:
# crouch - no check needed
self.origin[2] -= 1.0
self.BBOX = self.BBOX_CROUCH
self.antijerk_stairs += 1.0
self.crouching = True
self.walkable = False
if (self.keys & KEY_CREEP_BIT) or self.crouching:
bvx *= 0.5
bvy *= 0.5
q = (np.asmatrix([bvx,bvy,0.0])*cam_rmatrix)
#for i in xrange(3):
# self.velocity[i] *= (1.0-PLAYER_FRICTION*dt)
self.target_velocity[0] = q[0,0]
self.target_velocity[1] = q[0,1]
self.target_velocity[2] = q[0,2]
for i in [0,1]: # don't do this with Z.
self.target_velocity[i] - self.velocity[i])*(1.0 - exp(-dt*5.0))
self.velocity[2] -= OBJECT_GRAVITY*dt
PhysicsEntity.update(self, dt)
def get_cam_matrix_noxrot(self):
srz,crz = sin(self.orient_z*pi/180.0),cos(self.orient_z*pi/180.0)
cam_rmatrix = np.asmatrix(np.identity(3))
cam_rmatrix *= np.asmatrix([
[crz,srz,0.0],
[-srz,crz,0.0],
[0.0,0.0,1.0],
])
return cam_rmatrix
def get_cam_matrix(self):
srx,crx = sin(self.orient_x*pi/180.0),cos(self.orient_x*pi/180.0)
srz,crz = sin(self.orient_z*pi/180.0),cos(self.orient_z*pi/180.0)
cam_rmatrix = np.asmatrix(np.identity(3))
cam_rmatrix *= np.asmatrix([
[1.0,0.0,0.0],
[0.0,crx,srx],
[0.0,srx,-crx],
])
cam_rmatrix *= np.asmatrix([
[crz,srz,0.0],
[-srz,crz,0.0],
[0.0,0.0,1.0],
])
return cam_rmatrix
def on_mouse_motion(self, x, y, dx, dy):
self.orient_z += dx*MOUSE_SENS_X
self.orient_x -= dy*MOUSE_SENS_Y
def on_key_press(self, key, mod):
if key == pyglet.window.key.W:
self.keys |= KEY_MOVE_FORWARD_BIT
elif key == pyglet.window.key.S:
self.keys |= KEY_MOVE_BACKWARD_BIT
elif key == pyglet.window.key.A:
self.keys |= KEY_MOVE_LEFT_BIT
elif key == pyglet.window.key.D:
self.keys |= KEY_MOVE_RIGHT_BIT
elif key == pyglet.window.key.SPACE:
self.keys |= KEY_JUMP_BIT
elif key == pyglet.window.key.LCTRL:
self.keys |= KEY_CROUCH_BIT
elif key == pyglet.window.key.LSHIFT:
self.keys |= KEY_CREEP_BIT
def on_key_release(self, key, mod):
if key == pyglet.window.key.W:
self.keys &= ~KEY_MOVE_FORWARD_BIT
elif key == pyglet.window.key.S:
self.keys &= ~KEY_MOVE_BACKWARD_BIT
elif key == pyglet.window.key.A:
self.keys &= ~KEY_MOVE_LEFT_BIT
elif key == pyglet.window.key.D:
self.keys &= ~KEY_MOVE_RIGHT_BIT
elif key == pyglet.window.key.SPACE:
self.keys &= ~KEY_JUMP_BIT
elif key == pyglet.window.key.LCTRL:
self.keys &= ~KEY_CROUCH_BIT
elif key == pyglet.window.key.LSHIFT:
self.keys &= ~KEY_CREEP_BIT
| true | true |
f72e7b2d77caf3124d0c041b67c7329e92fa362c | 4,961 | py | Python | Lib/hashlib.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | 1 | 2015-05-21T23:47:54.000Z | 2015-05-21T23:47:54.000Z | Lib/hashlib.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | 1 | 2015-10-29T20:51:31.000Z | 2015-10-29T20:51:31.000Z | Lib/hashlib.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | 1 | 2019-04-11T11:27:01.000Z | 2019-04-11T11:27:01.000Z | # $Id: hashlib.py 66094 2008-08-31 16:35:01Z gregory.p.smith $
#
# Copyright (C) 2005-2007 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
#
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, data=b'') - returns a new hash object implementing the
given hash function; initializing the hash
using the given binary data.
Named constructor functions are also available, these are faster
than using new(name):
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are
guaranteed to exist.
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the bytes in arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the bytes passed to the update() method
so far.
- hexdigest(): Like digest() except the digest is returned as a unicode
object of double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update(b"Nobody inspects")
>>> m.update(b" the spammish repetition")
>>> m.digest()
b'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
def __get_builtin_constructor(name):
if name in ('SHA1', 'sha1'):
import _sha1
return _sha1.sha1
elif name in ('MD5', 'md5'):
import _md5
return _md5.md5
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
bs = name[3:]
if bs == '256':
return _sha256.sha256
elif bs == '224':
return _sha256.sha224
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
bs = name[3:]
if bs == '512':
return _sha512.sha512
elif bs == '384':
return _sha512.sha384
raise ValueError("unsupported hash type")
def __py_new(name, data=b''):
"""new(name, data=b'') - Return a new hashing object using the named algorithm;
optionally initialized with data (which must be bytes).
"""
return __get_builtin_constructor(name)(data)
def __hash_new(name, data=b''):
"""new(name, data=b'') - Return a new hashing object using the named algorithm;
optionally initialized with data (which must be bytes).
"""
try:
return _hashlib.new(name, data)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(data)
try:
import _hashlib
# use the wrapper of the C implementation
new = __hash_new
for opensslFuncName in filter(lambda n: n.startswith('openssl_'), dir(_hashlib)):
funcName = opensslFuncName[len('openssl_'):]
try:
# try them all, some may not work due to the OpenSSL
# version not supporting that algorithm.
f = getattr(_hashlib, opensslFuncName)
f()
# Use the C function directly (very fast)
exec(funcName + ' = f')
except ValueError:
try:
# Use the builtin implementation directly (fast)
exec(funcName + ' = __get_builtin_constructor(funcName)')
except ValueError:
# this one has no builtin implementation, don't define it
pass
# clean up our locals
del f
del opensslFuncName
del funcName
except ImportError:
# We don't have the _hashlib OpenSSL module?
# use the built in legacy interfaces via a wrapper function
new = __py_new
# lookup the C function to use directly for the named constructors
md5 = __get_builtin_constructor('md5')
sha1 = __get_builtin_constructor('sha1')
sha224 = __get_builtin_constructor('sha224')
sha256 = __get_builtin_constructor('sha256')
sha384 = __get_builtin_constructor('sha384')
sha512 = __get_builtin_constructor('sha512')
| 35.184397 | 85 | 0.648256 |
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, data=b'') - returns a new hash object implementing the
given hash function; initializing the hash
using the given binary data.
Named constructor functions are also available, these are faster
than using new(name):
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are
guaranteed to exist.
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the bytes in arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the bytes passed to the update() method
so far.
- hexdigest(): Like digest() except the digest is returned as a unicode
object of double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update(b"Nobody inspects")
>>> m.update(b" the spammish repetition")
>>> m.digest()
b'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
def __get_builtin_constructor(name):
if name in ('SHA1', 'sha1'):
import _sha1
return _sha1.sha1
elif name in ('MD5', 'md5'):
import _md5
return _md5.md5
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
bs = name[3:]
if bs == '256':
return _sha256.sha256
elif bs == '224':
return _sha256.sha224
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
bs = name[3:]
if bs == '512':
return _sha512.sha512
elif bs == '384':
return _sha512.sha384
raise ValueError("unsupported hash type")
def __py_new(name, data=b''):
return __get_builtin_constructor(name)(data)
def __hash_new(name, data=b''):
try:
return _hashlib.new(name, data)
except ValueError:
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(data)
try:
import _hashlib
new = __hash_new
for opensslFuncName in filter(lambda n: n.startswith('openssl_'), dir(_hashlib)):
funcName = opensslFuncName[len('openssl_'):]
try:
f = getattr(_hashlib, opensslFuncName)
f()
exec(funcName + ' = f')
except ValueError:
try:
exec(funcName + ' = __get_builtin_constructor(funcName)')
except ValueError:
pass
# clean up our locals
del f
del opensslFuncName
del funcName
except ImportError:
# We don't have the _hashlib OpenSSL module?
new = __py_new
md5 = __get_builtin_constructor('md5')
sha1 = __get_builtin_constructor('sha1')
sha224 = __get_builtin_constructor('sha224')
sha256 = __get_builtin_constructor('sha256')
sha384 = __get_builtin_constructor('sha384')
sha512 = __get_builtin_constructor('sha512')
| true | true |
f72e7cd332d811777563166041005fd0edb29089 | 17,395 | py | Python | python_modules/dagster/dagster/core/definitions/decorators/solid.py | rpatil524/dagster | 6f918d94cbd543ab752ab484a65e3a40fd441716 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/definitions/decorators/solid.py | rpatil524/dagster | 6f918d94cbd543ab752ab484a65e3a40fd441716 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/definitions/decorators/solid.py | rpatil524/dagster | 6f918d94cbd543ab752ab484a65e3a40fd441716 | [
"Apache-2.0"
] | null | null | null | from functools import lru_cache, update_wrapper
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Sequence, Set, Union, cast
from dagster import check
from dagster.core.decorator_utils import format_docstring_for_description
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.core.types.dagster_type import DagsterTypeKind
from dagster.seven import funcsigs
from ...decorator_utils import (
get_function_params,
get_valid_name_permutations,
positional_arg_name_list,
)
from ..inference import infer_input_props, infer_output_props
from ..input import InputDefinition
from ..output import OutputDefinition
from ..policy import RetryPolicy
from ..solid_definition import SolidDefinition
class DecoratedSolidFunction(NamedTuple):
"""Wrapper around the decorated solid function to provide commonly used util methods"""
decorated_fn: Callable[..., Any]
@lru_cache(maxsize=1)
def has_context_arg(self) -> bool:
return is_context_provided(get_function_params(self.decorated_fn))
@lru_cache(maxsize=1)
def positional_inputs(self) -> List[str]:
params = get_function_params(self.decorated_fn)
input_args = params[1:] if self.has_context_arg() else params
return positional_arg_name_list(input_args)
class NoContextDecoratedSolidFunction(DecoratedSolidFunction):
"""Wrapper around a decorated solid function, when the decorator does not permit a context
parameter (such as lambda_solid).
"""
@lru_cache(maxsize=1)
def has_context_arg(self) -> bool:
return False
class _Solid:
def __init__(
self,
name: Optional[str] = None,
input_defs: Optional[Sequence[InputDefinition]] = None,
output_defs: Optional[Sequence[OutputDefinition]] = None,
description: Optional[str] = None,
required_resource_keys: Optional[Set[str]] = None,
config_schema: Optional[Union[Any, Dict[str, Any]]] = None,
tags: Optional[Dict[str, Any]] = None,
version: Optional[str] = None,
decorator_takes_context: Optional[bool] = True,
retry_policy: Optional[RetryPolicy] = None,
):
self.name = check.opt_str_param(name, "name")
self.input_defs = check.opt_list_param(input_defs, "input_defs", InputDefinition)
self.output_defs = check.opt_nullable_list_param(
output_defs, "output_defs", OutputDefinition
)
self.decorator_takes_context = check.bool_param(
decorator_takes_context, "decorator_takes_context"
)
self.description = check.opt_str_param(description, "description")
# these will be checked within SolidDefinition
self.required_resource_keys = required_resource_keys
self.tags = tags
self.version = version
self.retry_policy = retry_policy
# config will be checked within SolidDefinition
self.config_schema = config_schema
def __call__(self, fn: Callable[..., Any]) -> SolidDefinition:
check.callable_param(fn, "fn")
if not self.name:
self.name = fn.__name__
if self.output_defs is None:
output_defs = [OutputDefinition.create_from_inferred(infer_output_props(fn))]
elif len(self.output_defs) == 1:
output_defs = [self.output_defs[0].combine_with_inferred(infer_output_props(fn))]
else:
output_defs = self.output_defs
compute_fn = (
DecoratedSolidFunction(decorated_fn=fn)
if self.decorator_takes_context
else NoContextDecoratedSolidFunction(decorated_fn=fn)
)
resolved_input_defs = resolve_checked_solid_fn_inputs(
decorator_name="@solid",
fn_name=self.name,
compute_fn=compute_fn,
explicit_input_defs=self.input_defs,
exclude_nothing=True,
)
solid_def = SolidDefinition(
name=self.name,
input_defs=resolved_input_defs,
output_defs=output_defs,
compute_fn=compute_fn,
config_schema=self.config_schema,
description=self.description or format_docstring_for_description(fn),
required_resource_keys=self.required_resource_keys,
tags=self.tags,
version=self.version,
retry_policy=self.retry_policy,
)
update_wrapper(solid_def, compute_fn.decorated_fn)
return solid_def
def solid(
name: Union[Callable[..., Any], Optional[str]] = None,
description: Optional[str] = None,
input_defs: Optional[Sequence[InputDefinition]] = None,
output_defs: Optional[Sequence[OutputDefinition]] = None,
config_schema: Optional[Union[Any, Dict[str, Any]]] = None,
required_resource_keys: Optional[Set[str]] = None,
tags: Optional[Dict[str, Any]] = None,
version: Optional[str] = None,
retry_policy: Optional[RetryPolicy] = None,
) -> Union[_Solid, SolidDefinition]:
"""Create a solid with the specified parameters from the decorated function.
This shortcut simplifies the core :class:`SolidDefinition` API by exploding arguments into
kwargs of the decorated compute function and omitting additional parameters when they are not
needed.
Input and output definitions will be inferred from the type signature of the decorated function
if not explicitly provided.
The decorated function will be used as the solid's compute function. The signature of the
decorated function is more flexible than that of the ``compute_fn`` in the core API; it may:
1. Return a value. This value will be wrapped in an :py:class:`Output` and yielded by the compute function.
2. Return an :py:class:`Output`. This output will be yielded by the compute function.
3. Yield :py:class:`Output` or other :ref:`event objects <events>`. Same as default compute behavior.
Note that options 1) and 2) are incompatible with yielding other events -- if you would like
to decorate a function that yields events, it must also wrap its eventual output in an
:py:class:`Output` and yield it.
@solid supports ``async def`` functions as well, including async generators when yielding multiple
events or outputs. Note that async solids will generally be run on their own unless using a custom
:py:class:`Executor` implementation that supports running them together.
Args:
name (Optional[str]): Name of solid. Must be unique within any :py:class:`PipelineDefinition`
using the solid.
description (Optional[str]): Human-readable description of this solid. If not provided, and
the decorated function has docstring, that docstring will be used as the description.
input_defs (Optional[List[InputDefinition]]):
Information about the inputs to the solid. Information provided here will be combined
with what can be inferred from the function signature, with these explicit InputDefinitions
taking precedence.
output_defs (Optional[List[OutputDefinition]]):
Information about the solids outputs. Information provided here will be combined with
what can be inferred from the return type signature if there is only one OutputDefinition
and the function does not use yield.
config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check
that config provided for the solid matches this schema and fail if it does not. If not
set, Dagster will accept any config provided for the solid.
required_resource_keys (Optional[Set[str]]): Set of resource handles required by this solid.
tags (Optional[Dict[str, Any]]): Arbitrary metadata for the solid. Frameworks may
expect and require certain metadata to be attached to a solid. Values that are not strings
will be json encoded and must meet the criteria that `json.loads(json.dumps(value)) == value`.
version (Optional[str]): (Experimental) The version of the solid's compute_fn. Two solids should have
the same version if and only if they deterministically produce the same outputs when
provided the same inputs.
retry_policy (Optional[RetryPolicy]): The retry policy for this solid.
Examples:
.. code-block:: python
@solid
def hello_world():
print('hello')
@solid
def hello_world():
return {'foo': 'bar'}
@solid
def hello_world():
return Output(value={'foo': 'bar'})
@solid
def hello_world():
yield Output(value={'foo': 'bar'})
@solid
def hello_world(foo):
return foo
@solid(
input_defs=[InputDefinition(name="foo", str)],
output_defs=[OutputDefinition(str)]
)
def hello_world(foo):
# explicitly type and name inputs and outputs
return foo
@solid
def hello_world(foo: str) -> str:
# same as above inferred from signature
return foo
@solid
def hello_world(context, foo):
context.log.info('log something')
return foo
@solid(
config_schema={'str_value' : Field(str)}
)
def hello_world(context, foo):
# context.solid_config is a dictionary with 'str_value' key
return foo + context.solid_config['str_value']
"""
# This case is for when decorator is used bare, without arguments. e.g. @solid versus @solid()
if callable(name):
check.invariant(input_defs is None)
check.invariant(output_defs is None)
check.invariant(description is None)
check.invariant(config_schema is None)
check.invariant(required_resource_keys is None)
check.invariant(tags is None)
check.invariant(version is None)
return _Solid()(name)
return _Solid(
name=name,
input_defs=input_defs,
output_defs=output_defs,
config_schema=config_schema,
description=description,
required_resource_keys=required_resource_keys,
tags=tags,
version=version,
retry_policy=retry_policy,
)
def resolve_checked_solid_fn_inputs(
decorator_name: str,
fn_name: str,
compute_fn: DecoratedSolidFunction,
explicit_input_defs: List[InputDefinition],
exclude_nothing: bool,
) -> List[InputDefinition]:
"""
Validate provided input definitions and infer the remaining from the type signature of the compute_fn.
Returns the resolved set of InputDefinitions.
Args:
decorator_name (str): Name of the decorator that is wrapping the solid function.
fn_name (str): Name of the decorated function.
compute_fn (DecoratedSolidFunction): The decorated function, wrapped in the
DecoratedSolidFunction wrapper.
explicit_input_defs (List[InputDefinition]): The input definitions that were explicitly
provided in the decorator.
exclude_nothing (bool): True if Nothing type inputs should be excluded from compute_fn
arguments.
"""
if exclude_nothing:
explicit_names = set(
inp.name
for inp in explicit_input_defs
if not inp.dagster_type.kind == DagsterTypeKind.NOTHING
)
nothing_names = set(
inp.name
for inp in explicit_input_defs
if inp.dagster_type.kind == DagsterTypeKind.NOTHING
)
else:
explicit_names = set(inp.name for inp in explicit_input_defs)
nothing_names = set()
params = get_function_params(compute_fn.decorated_fn)
input_args = params[1:] if compute_fn.has_context_arg() else params
# Validate input arguments
used_inputs = set()
inputs_to_infer = set()
has_kwargs = False
for param in cast(List[funcsigs.Parameter], input_args):
if param.kind == funcsigs.Parameter.VAR_KEYWORD:
has_kwargs = True
elif param.kind == funcsigs.Parameter.VAR_POSITIONAL:
raise DagsterInvalidDefinitionError(
f"{decorator_name} '{fn_name}' decorated function has positional vararg parameter "
f"'{param}'. {decorator_name} decorated functions should only have keyword "
"arguments that match input names and, if system information is required, a first "
"positional parameter named 'context'."
)
else:
if param.name not in explicit_names:
if param.name in nothing_names:
raise DagsterInvalidDefinitionError(
f"{decorator_name} '{fn_name}' decorated function has parameter '{param.name}' that is "
"one of the input_defs of type 'Nothing' which should not be included since "
"no data will be passed for it. "
)
else:
inputs_to_infer.add(param.name)
else:
used_inputs.add(param.name)
undeclared_inputs = explicit_names - used_inputs
if not has_kwargs and undeclared_inputs:
undeclared_inputs_printed = ", '".join(undeclared_inputs)
raise DagsterInvalidDefinitionError(
f"{decorator_name} '{fn_name}' decorated function does not have parameter(s) "
f"'{undeclared_inputs_printed}', which are in provided input_defs. {decorator_name} "
"decorated functions should only have keyword arguments that match input names and, if "
"system information is required, a first positional parameter named 'context'."
)
inferred_props = {
inferred.name: inferred
for inferred in infer_input_props(compute_fn.decorated_fn, compute_fn.has_context_arg())
}
input_defs = []
for input_def in explicit_input_defs:
if input_def.name in inferred_props:
# combine any information missing on the explicit def that can be inferred
input_defs.append(input_def.combine_with_inferred(inferred_props[input_def.name]))
else:
# pass through those that don't have any inference info, such as Nothing type inputs
input_defs.append(input_def)
# build defs from the inferred props for those without explicit entries
input_defs.extend(
InputDefinition.create_from_inferred(inferred)
for inferred in inferred_props.values()
if inferred.name in inputs_to_infer
)
return input_defs
def is_context_provided(params: List[funcsigs.Parameter]) -> bool:
if len(params) == 0:
return False
return params[0].name in get_valid_name_permutations("context")
def lambda_solid(
name: Union[Optional[str], Callable[..., Any]] = None,
description: Optional[str] = None,
input_defs: Optional[List[InputDefinition]] = None,
output_def: Optional[OutputDefinition] = None,
) -> Union[_Solid, SolidDefinition]:
"""Create a simple solid from the decorated function.
This shortcut allows the creation of simple solids that do not require
configuration and whose implementations do not require a
:py:class:`context <SolidExecutionContext>`.
Lambda solids take any number of inputs and produce a single output.
Inputs can be defined using :class:`InputDefinition` and passed to the ``input_defs`` argument
of this decorator, or inferred from the type signature of the decorated function.
The single output can be defined using :class:`OutputDefinition` and passed as the
``output_def`` argument of this decorator, or its type can be inferred from the type signature
of the decorated function.
The body of the decorated function should return a single value, which will be yielded as the
solid's output.
Args:
name (str): Name of solid.
description (str): Solid description.
input_defs (List[InputDefinition]): List of input_defs.
output_def (OutputDefinition): The output of the solid. Defaults to
:class:`OutputDefinition() <OutputDefinition>`.
Examples:
.. code-block:: python
@lambda_solid
def hello_world():
return 'hello'
@lambda_solid(
input_defs=[InputDefinition(name='foo', str)],
output_def=OutputDefinition(str)
)
def hello_world(foo):
# explicitly type and name inputs and outputs
return foo
@lambda_solid
def hello_world(foo: str) -> str:
# same as above inferred from signature
return foo
"""
if callable(name):
check.invariant(input_defs is None)
check.invariant(description is None)
return _Solid(
output_defs=[output_def] if output_def else None, decorator_takes_context=False
)(name)
return _Solid(
name=name,
input_defs=input_defs,
output_defs=[output_def] if output_def else None,
description=description,
decorator_takes_context=False,
)
| 39.714612 | 112 | 0.665306 | from functools import lru_cache, update_wrapper
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Sequence, Set, Union, cast
from dagster import check
from dagster.core.decorator_utils import format_docstring_for_description
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.core.types.dagster_type import DagsterTypeKind
from dagster.seven import funcsigs
from ...decorator_utils import (
get_function_params,
get_valid_name_permutations,
positional_arg_name_list,
)
from ..inference import infer_input_props, infer_output_props
from ..input import InputDefinition
from ..output import OutputDefinition
from ..policy import RetryPolicy
from ..solid_definition import SolidDefinition
class DecoratedSolidFunction(NamedTuple):
decorated_fn: Callable[..., Any]
@lru_cache(maxsize=1)
def has_context_arg(self) -> bool:
return is_context_provided(get_function_params(self.decorated_fn))
@lru_cache(maxsize=1)
def positional_inputs(self) -> List[str]:
params = get_function_params(self.decorated_fn)
input_args = params[1:] if self.has_context_arg() else params
return positional_arg_name_list(input_args)
class NoContextDecoratedSolidFunction(DecoratedSolidFunction):
@lru_cache(maxsize=1)
def has_context_arg(self) -> bool:
return False
class _Solid:
def __init__(
self,
name: Optional[str] = None,
input_defs: Optional[Sequence[InputDefinition]] = None,
output_defs: Optional[Sequence[OutputDefinition]] = None,
description: Optional[str] = None,
required_resource_keys: Optional[Set[str]] = None,
config_schema: Optional[Union[Any, Dict[str, Any]]] = None,
tags: Optional[Dict[str, Any]] = None,
version: Optional[str] = None,
decorator_takes_context: Optional[bool] = True,
retry_policy: Optional[RetryPolicy] = None,
):
self.name = check.opt_str_param(name, "name")
self.input_defs = check.opt_list_param(input_defs, "input_defs", InputDefinition)
self.output_defs = check.opt_nullable_list_param(
output_defs, "output_defs", OutputDefinition
)
self.decorator_takes_context = check.bool_param(
decorator_takes_context, "decorator_takes_context"
)
self.description = check.opt_str_param(description, "description")
self.required_resource_keys = required_resource_keys
self.tags = tags
self.version = version
self.retry_policy = retry_policy
self.config_schema = config_schema
def __call__(self, fn: Callable[..., Any]) -> SolidDefinition:
check.callable_param(fn, "fn")
if not self.name:
self.name = fn.__name__
if self.output_defs is None:
output_defs = [OutputDefinition.create_from_inferred(infer_output_props(fn))]
elif len(self.output_defs) == 1:
output_defs = [self.output_defs[0].combine_with_inferred(infer_output_props(fn))]
else:
output_defs = self.output_defs
compute_fn = (
DecoratedSolidFunction(decorated_fn=fn)
if self.decorator_takes_context
else NoContextDecoratedSolidFunction(decorated_fn=fn)
)
resolved_input_defs = resolve_checked_solid_fn_inputs(
decorator_name="@solid",
fn_name=self.name,
compute_fn=compute_fn,
explicit_input_defs=self.input_defs,
exclude_nothing=True,
)
solid_def = SolidDefinition(
name=self.name,
input_defs=resolved_input_defs,
output_defs=output_defs,
compute_fn=compute_fn,
config_schema=self.config_schema,
description=self.description or format_docstring_for_description(fn),
required_resource_keys=self.required_resource_keys,
tags=self.tags,
version=self.version,
retry_policy=self.retry_policy,
)
update_wrapper(solid_def, compute_fn.decorated_fn)
return solid_def
def solid(
name: Union[Callable[..., Any], Optional[str]] = None,
description: Optional[str] = None,
input_defs: Optional[Sequence[InputDefinition]] = None,
output_defs: Optional[Sequence[OutputDefinition]] = None,
config_schema: Optional[Union[Any, Dict[str, Any]]] = None,
required_resource_keys: Optional[Set[str]] = None,
tags: Optional[Dict[str, Any]] = None,
version: Optional[str] = None,
retry_policy: Optional[RetryPolicy] = None,
) -> Union[_Solid, SolidDefinition]:
if callable(name):
check.invariant(input_defs is None)
check.invariant(output_defs is None)
check.invariant(description is None)
check.invariant(config_schema is None)
check.invariant(required_resource_keys is None)
check.invariant(tags is None)
check.invariant(version is None)
return _Solid()(name)
return _Solid(
name=name,
input_defs=input_defs,
output_defs=output_defs,
config_schema=config_schema,
description=description,
required_resource_keys=required_resource_keys,
tags=tags,
version=version,
retry_policy=retry_policy,
)
def resolve_checked_solid_fn_inputs(
decorator_name: str,
fn_name: str,
compute_fn: DecoratedSolidFunction,
explicit_input_defs: List[InputDefinition],
exclude_nothing: bool,
) -> List[InputDefinition]:
if exclude_nothing:
explicit_names = set(
inp.name
for inp in explicit_input_defs
if not inp.dagster_type.kind == DagsterTypeKind.NOTHING
)
nothing_names = set(
inp.name
for inp in explicit_input_defs
if inp.dagster_type.kind == DagsterTypeKind.NOTHING
)
else:
explicit_names = set(inp.name for inp in explicit_input_defs)
nothing_names = set()
params = get_function_params(compute_fn.decorated_fn)
input_args = params[1:] if compute_fn.has_context_arg() else params
used_inputs = set()
inputs_to_infer = set()
has_kwargs = False
for param in cast(List[funcsigs.Parameter], input_args):
if param.kind == funcsigs.Parameter.VAR_KEYWORD:
has_kwargs = True
elif param.kind == funcsigs.Parameter.VAR_POSITIONAL:
raise DagsterInvalidDefinitionError(
f"{decorator_name} '{fn_name}' decorated function has positional vararg parameter "
f"'{param}'. {decorator_name} decorated functions should only have keyword "
"arguments that match input names and, if system information is required, a first "
"positional parameter named 'context'."
)
else:
if param.name not in explicit_names:
if param.name in nothing_names:
raise DagsterInvalidDefinitionError(
f"{decorator_name} '{fn_name}' decorated function has parameter '{param.name}' that is "
"one of the input_defs of type 'Nothing' which should not be included since "
"no data will be passed for it. "
)
else:
inputs_to_infer.add(param.name)
else:
used_inputs.add(param.name)
undeclared_inputs = explicit_names - used_inputs
if not has_kwargs and undeclared_inputs:
undeclared_inputs_printed = ", '".join(undeclared_inputs)
raise DagsterInvalidDefinitionError(
f"{decorator_name} '{fn_name}' decorated function does not have parameter(s) "
f"'{undeclared_inputs_printed}', which are in provided input_defs. {decorator_name} "
"decorated functions should only have keyword arguments that match input names and, if "
"system information is required, a first positional parameter named 'context'."
)
inferred_props = {
inferred.name: inferred
for inferred in infer_input_props(compute_fn.decorated_fn, compute_fn.has_context_arg())
}
input_defs = []
for input_def in explicit_input_defs:
if input_def.name in inferred_props:
# combine any information missing on the explicit def that can be inferred
input_defs.append(input_def.combine_with_inferred(inferred_props[input_def.name]))
else:
# pass through those that don't have any inference info, such as Nothing type inputs
input_defs.append(input_def)
input_defs.extend(
InputDefinition.create_from_inferred(inferred)
for inferred in inferred_props.values()
if inferred.name in inputs_to_infer
)
return input_defs
def is_context_provided(params: List[funcsigs.Parameter]) -> bool:
if len(params) == 0:
return False
return params[0].name in get_valid_name_permutations("context")
def lambda_solid(
name: Union[Optional[str], Callable[..., Any]] = None,
description: Optional[str] = None,
input_defs: Optional[List[InputDefinition]] = None,
output_def: Optional[OutputDefinition] = None,
) -> Union[_Solid, SolidDefinition]:
if callable(name):
check.invariant(input_defs is None)
check.invariant(description is None)
return _Solid(
output_defs=[output_def] if output_def else None, decorator_takes_context=False
)(name)
return _Solid(
name=name,
input_defs=input_defs,
output_defs=[output_def] if output_def else None,
description=description,
decorator_takes_context=False,
)
| true | true |
f72e7deedad7102a3c5f47b1158672e18ad0b3b5 | 96 | py | Python | mqttassistant/web/healthz.py | madron/mqttassistant | a6e40612b74e60585fd612785da1f2ba81f11881 | [
"MIT"
] | null | null | null | mqttassistant/web/healthz.py | madron/mqttassistant | a6e40612b74e60585fd612785da1f2ba81f11881 | [
"MIT"
] | null | null | null | mqttassistant/web/healthz.py | madron/mqttassistant | a6e40612b74e60585fd612785da1f2ba81f11881 | [
"MIT"
] | 2 | 2022-02-04T15:29:37.000Z | 2022-02-05T16:56:33.000Z | from fastapi import Request, Response
async def main(request: Request):
return Response()
| 16 | 37 | 0.75 | from fastapi import Request, Response
async def main(request: Request):
return Response()
| true | true |
f72e7f367fcf301b211752dfc42c0fae3388424b | 18,108 | py | Python | tests/datasets/commands_tests.py | arpith-kp/superset | 42ff4fc19a34144b31cef82b341871dff34f37d2 | [
"Apache-2.0"
] | 44 | 2021-04-14T10:53:36.000Z | 2021-09-11T00:29:50.000Z | tests/datasets/commands_tests.py | arpith-kp/superset | 42ff4fc19a34144b31cef82b341871dff34f37d2 | [
"Apache-2.0"
] | 55 | 2021-04-02T16:03:57.000Z | 2022-03-24T02:12:33.000Z | tests/datasets/commands_tests.py | arpith-kp/superset | 42ff4fc19a34144b31cef82b341871dff34f37d2 | [
"Apache-2.0"
] | 11 | 2021-06-09T08:30:57.000Z | 2021-11-30T03:16:14.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-self-use, invalid-name, line-too-long
from operator import itemgetter
from typing import Any, List
from unittest.mock import patch
import pytest
import yaml
from superset import db, security_manager
from superset.commands.exceptions import CommandInvalidError
from superset.commands.importers.exceptions import IncorrectVersionError
from superset.connectors.sqla.models import SqlaTable
from superset.databases.commands.importers.v1 import ImportDatabasesCommand
from superset.datasets.commands.exceptions import DatasetNotFoundError
from superset.datasets.commands.export import ExportDatasetsCommand
from superset.datasets.commands.importers import v0, v1
from superset.models.core import Database
from superset.utils.core import get_example_database
from tests.base_tests import SupersetTestCase
from tests.fixtures.energy_dashboard import load_energy_table_with_slice
from tests.fixtures.importexport import (
database_config,
database_metadata_config,
dataset_cli_export,
dataset_config,
dataset_metadata_config,
dataset_ui_export,
)
from tests.fixtures.world_bank_dashboard import load_world_bank_dashboard_with_slices
class TestExportDatasetsCommand(SupersetTestCase):
@patch("superset.security.manager.g")
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_export_dataset_command(self, mock_g):
mock_g.user = security_manager.find_user("admin")
example_db = get_example_database()
example_dataset = _get_table_from_list_by_name(
"energy_usage", example_db.tables
)
command = ExportDatasetsCommand([example_dataset.id])
contents = dict(command.run())
assert list(contents.keys()) == [
"metadata.yaml",
"datasets/examples/energy_usage.yaml",
"databases/examples.yaml",
]
metadata = yaml.safe_load(contents["datasets/examples/energy_usage.yaml"])
# sort columns for deterministc comparison
metadata["columns"] = sorted(metadata["columns"], key=itemgetter("column_name"))
metadata["metrics"] = sorted(metadata["metrics"], key=itemgetter("metric_name"))
# types are different depending on the backend
type_map = {
column.column_name: str(column.type) for column in example_dataset.columns
}
assert metadata == {
"cache_timeout": None,
"columns": [
{
"column_name": "source",
"description": None,
"expression": "",
"filterable": True,
"groupby": True,
"is_active": True,
"is_dttm": False,
"python_date_format": None,
"type": type_map["source"],
"verbose_name": None,
},
{
"column_name": "target",
"description": None,
"expression": "",
"filterable": True,
"groupby": True,
"is_active": True,
"is_dttm": False,
"python_date_format": None,
"type": type_map["target"],
"verbose_name": None,
},
{
"column_name": "value",
"description": None,
"expression": "",
"filterable": True,
"groupby": True,
"is_active": True,
"is_dttm": False,
"python_date_format": None,
"type": type_map["value"],
"verbose_name": None,
},
],
"database_uuid": str(example_db.uuid),
"default_endpoint": None,
"description": "Energy consumption",
"extra": None,
"fetch_values_predicate": None,
"filter_select_enabled": False,
"main_dttm_col": None,
"metrics": [
{
"d3format": None,
"description": None,
"expression": "COUNT(*)",
"extra": None,
"metric_name": "count",
"metric_type": "count",
"verbose_name": "COUNT(*)",
"warning_text": None,
},
{
"d3format": None,
"description": None,
"expression": "SUM(value)",
"extra": None,
"metric_name": "sum__value",
"metric_type": None,
"verbose_name": None,
"warning_text": None,
},
],
"offset": 0,
"params": None,
"schema": None,
"sql": None,
"table_name": "energy_usage",
"template_params": None,
"uuid": str(example_dataset.uuid),
"version": "1.0.0",
}
@patch("superset.security.manager.g")
def test_export_dataset_command_no_access(self, mock_g):
"""Test that users can't export datasets they don't have access to"""
mock_g.user = security_manager.find_user("gamma")
example_db = get_example_database()
example_dataset = example_db.tables[0]
command = ExportDatasetsCommand([example_dataset.id])
contents = command.run()
with self.assertRaises(DatasetNotFoundError):
next(contents)
@patch("superset.security.manager.g")
def test_export_dataset_command_invalid_dataset(self, mock_g):
"""Test that an error is raised when exporting an invalid dataset"""
mock_g.user = security_manager.find_user("admin")
command = ExportDatasetsCommand([-1])
contents = command.run()
with self.assertRaises(DatasetNotFoundError):
next(contents)
@patch("superset.security.manager.g")
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_export_dataset_command_key_order(self, mock_g):
"""Test that they keys in the YAML have the same order as export_fields"""
mock_g.user = security_manager.find_user("admin")
example_db = get_example_database()
example_dataset = _get_table_from_list_by_name(
"energy_usage", example_db.tables
)
command = ExportDatasetsCommand([example_dataset.id])
contents = dict(command.run())
metadata = yaml.safe_load(contents["datasets/examples/energy_usage.yaml"])
assert list(metadata.keys()) == [
"table_name",
"main_dttm_col",
"description",
"default_endpoint",
"offset",
"cache_timeout",
"schema",
"sql",
"params",
"template_params",
"filter_select_enabled",
"fetch_values_predicate",
"extra",
"uuid",
"metrics",
"columns",
"version",
"database_uuid",
]
class TestImportDatasetsCommand(SupersetTestCase):
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_import_v0_dataset_cli_export(self):
num_datasets = db.session.query(SqlaTable).count()
contents = {
"20201119_181105.yaml": yaml.safe_dump(dataset_cli_export),
}
command = v0.ImportDatasetsCommand(contents)
command.run()
new_num_datasets = db.session.query(SqlaTable).count()
assert new_num_datasets == num_datasets + 1
dataset = (
db.session.query(SqlaTable).filter_by(table_name="birth_names_2").one()
)
assert (
dataset.params
== '{"remote_id": 3, "database_name": "examples", "import_time": 1604342885}'
)
assert len(dataset.metrics) == 2
assert dataset.main_dttm_col == "ds"
assert dataset.filter_select_enabled
dataset.columns.sort(key=lambda obj: obj.column_name)
expected_columns = [
"num_california",
"ds",
"state",
"gender",
"name",
"num_boys",
"num_girls",
"num",
]
expected_columns.sort()
assert [col.column_name for col in dataset.columns] == expected_columns
db.session.delete(dataset)
db.session.commit()
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_import_v0_dataset_ui_export(self):
num_datasets = db.session.query(SqlaTable).count()
contents = {
"20201119_181105.yaml": yaml.safe_dump(dataset_ui_export),
}
command = v0.ImportDatasetsCommand(contents)
command.run()
new_num_datasets = db.session.query(SqlaTable).count()
assert new_num_datasets == num_datasets + 1
dataset = (
db.session.query(SqlaTable).filter_by(table_name="birth_names_2").one()
)
assert (
dataset.params
== '{"remote_id": 3, "database_name": "examples", "import_time": 1604342885}'
)
assert len(dataset.metrics) == 2
assert dataset.main_dttm_col == "ds"
assert dataset.filter_select_enabled
assert set(col.column_name for col in dataset.columns) == {
"num_california",
"ds",
"state",
"gender",
"name",
"num_boys",
"num_girls",
"num",
}
db.session.delete(dataset)
db.session.commit()
def test_import_v1_dataset(self):
"""Test that we can import a dataset"""
contents = {
"metadata.yaml": yaml.safe_dump(dataset_metadata_config),
"databases/imported_database.yaml": yaml.safe_dump(database_config),
"datasets/imported_dataset.yaml": yaml.safe_dump(dataset_config),
}
command = v1.ImportDatasetsCommand(contents)
command.run()
dataset = (
db.session.query(SqlaTable).filter_by(uuid=dataset_config["uuid"]).one()
)
assert dataset.table_name == "imported_dataset"
assert dataset.main_dttm_col is None
assert dataset.description == "This is a dataset that was exported"
assert dataset.default_endpoint == ""
assert dataset.offset == 66
assert dataset.cache_timeout == 55
assert dataset.schema == ""
assert dataset.sql == ""
assert dataset.params is None
assert dataset.template_params is None
assert dataset.filter_select_enabled
assert dataset.fetch_values_predicate is None
assert dataset.extra is None
# database is also imported
assert str(dataset.database.uuid) == "b8a1ccd3-779d-4ab7-8ad8-9ab119d7fe89"
assert len(dataset.metrics) == 1
metric = dataset.metrics[0]
assert metric.metric_name == "count"
assert metric.verbose_name == ""
assert metric.metric_type is None
assert metric.expression == "count(1)"
assert metric.description is None
assert metric.d3format is None
assert metric.extra is None
assert metric.warning_text is None
assert len(dataset.columns) == 1
column = dataset.columns[0]
assert column.column_name == "cnt"
assert column.verbose_name == "Count of something"
assert not column.is_dttm
assert column.is_active # imported columns are set to active
assert column.type == "NUMBER"
assert not column.groupby
assert column.filterable
assert column.expression == ""
assert column.description is None
assert column.python_date_format is None
db.session.delete(dataset)
db.session.delete(dataset.database)
db.session.commit()
def test_import_v1_dataset_multiple(self):
"""Test that a dataset can be imported multiple times"""
contents = {
"metadata.yaml": yaml.safe_dump(dataset_metadata_config),
"databases/imported_database.yaml": yaml.safe_dump(database_config),
"datasets/imported_dataset.yaml": yaml.safe_dump(dataset_config),
}
command = v1.ImportDatasetsCommand(contents, overwrite=True)
command.run()
command.run()
dataset = (
db.session.query(SqlaTable).filter_by(uuid=dataset_config["uuid"]).one()
)
assert dataset.table_name == "imported_dataset"
# test that columns and metrics sync, ie, old ones not the import
# are removed
new_config = dataset_config.copy()
new_config["metrics"][0]["metric_name"] = "count2"
new_config["columns"][0]["column_name"] = "cnt2"
contents = {
"metadata.yaml": yaml.safe_dump(dataset_metadata_config),
"databases/imported_database.yaml": yaml.safe_dump(database_config),
"datasets/imported_dataset.yaml": yaml.safe_dump(new_config),
}
command = v1.ImportDatasetsCommand(contents, overwrite=True)
command.run()
dataset = (
db.session.query(SqlaTable).filter_by(uuid=dataset_config["uuid"]).one()
)
assert len(dataset.metrics) == 1
assert dataset.metrics[0].metric_name == "count2"
assert len(dataset.columns) == 1
assert dataset.columns[0].column_name == "cnt2"
db.session.delete(dataset)
db.session.delete(dataset.database)
db.session.commit()
def test_import_v1_dataset_validation(self):
"""Test different validations applied when importing a dataset"""
# metadata.yaml must be present
contents = {
"datasets/imported_dataset.yaml": yaml.safe_dump(dataset_config),
}
command = v1.ImportDatasetsCommand(contents)
with pytest.raises(IncorrectVersionError) as excinfo:
command.run()
assert str(excinfo.value) == "Missing metadata.yaml"
# version should be 1.0.0
contents["metadata.yaml"] = yaml.safe_dump(
{
"version": "2.0.0",
"type": "SqlaTable",
"timestamp": "2020-11-04T21:27:44.423819+00:00",
}
)
command = v1.ImportDatasetsCommand(contents)
with pytest.raises(IncorrectVersionError) as excinfo:
command.run()
assert str(excinfo.value) == "Must be equal to 1.0.0."
# type should be SqlaTable
contents["metadata.yaml"] = yaml.safe_dump(database_metadata_config)
command = v1.ImportDatasetsCommand(contents)
with pytest.raises(CommandInvalidError) as excinfo:
command.run()
assert str(excinfo.value) == "Error importing dataset"
assert excinfo.value.normalized_messages() == {
"metadata.yaml": {"type": ["Must be equal to SqlaTable."]}
}
# must also validate databases
broken_config = database_config.copy()
del broken_config["database_name"]
contents["metadata.yaml"] = yaml.safe_dump(dataset_metadata_config)
contents["databases/imported_database.yaml"] = yaml.safe_dump(broken_config)
command = v1.ImportDatasetsCommand(contents)
with pytest.raises(CommandInvalidError) as excinfo:
command.run()
assert str(excinfo.value) == "Error importing dataset"
assert excinfo.value.normalized_messages() == {
"databases/imported_database.yaml": {
"database_name": ["Missing data for required field."],
}
}
def test_import_v1_dataset_existing_database(self):
"""Test that a dataset can be imported when the database already exists"""
# first import database...
contents = {
"metadata.yaml": yaml.safe_dump(database_metadata_config),
"databases/imported_database.yaml": yaml.safe_dump(database_config),
}
command = ImportDatabasesCommand(contents)
command.run()
database = (
db.session.query(Database).filter_by(uuid=database_config["uuid"]).one()
)
assert len(database.tables) == 0
# ...then dataset
contents = {
"metadata.yaml": yaml.safe_dump(dataset_metadata_config),
"datasets/imported_dataset.yaml": yaml.safe_dump(dataset_config),
"databases/imported_database.yaml": yaml.safe_dump(database_config),
}
command = v1.ImportDatasetsCommand(contents, overwrite=True)
command.run()
database = (
db.session.query(Database).filter_by(uuid=database_config["uuid"]).one()
)
assert len(database.tables) == 1
db.session.delete(database.tables[0])
db.session.delete(database)
db.session.commit()
def _get_table_from_list_by_name(name: str, tables: List[Any]):
for table in tables:
if table.table_name == name:
return table
raise ValueError(f"Table {name} does not exists in database")
| 37.882845 | 89 | 0.602386 |
from operator import itemgetter
from typing import Any, List
from unittest.mock import patch
import pytest
import yaml
from superset import db, security_manager
from superset.commands.exceptions import CommandInvalidError
from superset.commands.importers.exceptions import IncorrectVersionError
from superset.connectors.sqla.models import SqlaTable
from superset.databases.commands.importers.v1 import ImportDatabasesCommand
from superset.datasets.commands.exceptions import DatasetNotFoundError
from superset.datasets.commands.export import ExportDatasetsCommand
from superset.datasets.commands.importers import v0, v1
from superset.models.core import Database
from superset.utils.core import get_example_database
from tests.base_tests import SupersetTestCase
from tests.fixtures.energy_dashboard import load_energy_table_with_slice
from tests.fixtures.importexport import (
database_config,
database_metadata_config,
dataset_cli_export,
dataset_config,
dataset_metadata_config,
dataset_ui_export,
)
from tests.fixtures.world_bank_dashboard import load_world_bank_dashboard_with_slices
class TestExportDatasetsCommand(SupersetTestCase):
@patch("superset.security.manager.g")
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_export_dataset_command(self, mock_g):
mock_g.user = security_manager.find_user("admin")
example_db = get_example_database()
example_dataset = _get_table_from_list_by_name(
"energy_usage", example_db.tables
)
command = ExportDatasetsCommand([example_dataset.id])
contents = dict(command.run())
assert list(contents.keys()) == [
"metadata.yaml",
"datasets/examples/energy_usage.yaml",
"databases/examples.yaml",
]
metadata = yaml.safe_load(contents["datasets/examples/energy_usage.yaml"])
metadata["columns"] = sorted(metadata["columns"], key=itemgetter("column_name"))
metadata["metrics"] = sorted(metadata["metrics"], key=itemgetter("metric_name"))
type_map = {
column.column_name: str(column.type) for column in example_dataset.columns
}
assert metadata == {
"cache_timeout": None,
"columns": [
{
"column_name": "source",
"description": None,
"expression": "",
"filterable": True,
"groupby": True,
"is_active": True,
"is_dttm": False,
"python_date_format": None,
"type": type_map["source"],
"verbose_name": None,
},
{
"column_name": "target",
"description": None,
"expression": "",
"filterable": True,
"groupby": True,
"is_active": True,
"is_dttm": False,
"python_date_format": None,
"type": type_map["target"],
"verbose_name": None,
},
{
"column_name": "value",
"description": None,
"expression": "",
"filterable": True,
"groupby": True,
"is_active": True,
"is_dttm": False,
"python_date_format": None,
"type": type_map["value"],
"verbose_name": None,
},
],
"database_uuid": str(example_db.uuid),
"default_endpoint": None,
"description": "Energy consumption",
"extra": None,
"fetch_values_predicate": None,
"filter_select_enabled": False,
"main_dttm_col": None,
"metrics": [
{
"d3format": None,
"description": None,
"expression": "COUNT(*)",
"extra": None,
"metric_name": "count",
"metric_type": "count",
"verbose_name": "COUNT(*)",
"warning_text": None,
},
{
"d3format": None,
"description": None,
"expression": "SUM(value)",
"extra": None,
"metric_name": "sum__value",
"metric_type": None,
"verbose_name": None,
"warning_text": None,
},
],
"offset": 0,
"params": None,
"schema": None,
"sql": None,
"table_name": "energy_usage",
"template_params": None,
"uuid": str(example_dataset.uuid),
"version": "1.0.0",
}
@patch("superset.security.manager.g")
def test_export_dataset_command_no_access(self, mock_g):
mock_g.user = security_manager.find_user("gamma")
example_db = get_example_database()
example_dataset = example_db.tables[0]
command = ExportDatasetsCommand([example_dataset.id])
contents = command.run()
with self.assertRaises(DatasetNotFoundError):
next(contents)
@patch("superset.security.manager.g")
def test_export_dataset_command_invalid_dataset(self, mock_g):
mock_g.user = security_manager.find_user("admin")
command = ExportDatasetsCommand([-1])
contents = command.run()
with self.assertRaises(DatasetNotFoundError):
next(contents)
@patch("superset.security.manager.g")
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_export_dataset_command_key_order(self, mock_g):
mock_g.user = security_manager.find_user("admin")
example_db = get_example_database()
example_dataset = _get_table_from_list_by_name(
"energy_usage", example_db.tables
)
command = ExportDatasetsCommand([example_dataset.id])
contents = dict(command.run())
metadata = yaml.safe_load(contents["datasets/examples/energy_usage.yaml"])
assert list(metadata.keys()) == [
"table_name",
"main_dttm_col",
"description",
"default_endpoint",
"offset",
"cache_timeout",
"schema",
"sql",
"params",
"template_params",
"filter_select_enabled",
"fetch_values_predicate",
"extra",
"uuid",
"metrics",
"columns",
"version",
"database_uuid",
]
class TestImportDatasetsCommand(SupersetTestCase):
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_import_v0_dataset_cli_export(self):
num_datasets = db.session.query(SqlaTable).count()
contents = {
"20201119_181105.yaml": yaml.safe_dump(dataset_cli_export),
}
command = v0.ImportDatasetsCommand(contents)
command.run()
new_num_datasets = db.session.query(SqlaTable).count()
assert new_num_datasets == num_datasets + 1
dataset = (
db.session.query(SqlaTable).filter_by(table_name="birth_names_2").one()
)
assert (
dataset.params
== '{"remote_id": 3, "database_name": "examples", "import_time": 1604342885}'
)
assert len(dataset.metrics) == 2
assert dataset.main_dttm_col == "ds"
assert dataset.filter_select_enabled
dataset.columns.sort(key=lambda obj: obj.column_name)
expected_columns = [
"num_california",
"ds",
"state",
"gender",
"name",
"num_boys",
"num_girls",
"num",
]
expected_columns.sort()
assert [col.column_name for col in dataset.columns] == expected_columns
db.session.delete(dataset)
db.session.commit()
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_import_v0_dataset_ui_export(self):
num_datasets = db.session.query(SqlaTable).count()
contents = {
"20201119_181105.yaml": yaml.safe_dump(dataset_ui_export),
}
command = v0.ImportDatasetsCommand(contents)
command.run()
new_num_datasets = db.session.query(SqlaTable).count()
assert new_num_datasets == num_datasets + 1
dataset = (
db.session.query(SqlaTable).filter_by(table_name="birth_names_2").one()
)
assert (
dataset.params
== '{"remote_id": 3, "database_name": "examples", "import_time": 1604342885}'
)
assert len(dataset.metrics) == 2
assert dataset.main_dttm_col == "ds"
assert dataset.filter_select_enabled
assert set(col.column_name for col in dataset.columns) == {
"num_california",
"ds",
"state",
"gender",
"name",
"num_boys",
"num_girls",
"num",
}
db.session.delete(dataset)
db.session.commit()
def test_import_v1_dataset(self):
contents = {
"metadata.yaml": yaml.safe_dump(dataset_metadata_config),
"databases/imported_database.yaml": yaml.safe_dump(database_config),
"datasets/imported_dataset.yaml": yaml.safe_dump(dataset_config),
}
command = v1.ImportDatasetsCommand(contents)
command.run()
dataset = (
db.session.query(SqlaTable).filter_by(uuid=dataset_config["uuid"]).one()
)
assert dataset.table_name == "imported_dataset"
assert dataset.main_dttm_col is None
assert dataset.description == "This is a dataset that was exported"
assert dataset.default_endpoint == ""
assert dataset.offset == 66
assert dataset.cache_timeout == 55
assert dataset.schema == ""
assert dataset.sql == ""
assert dataset.params is None
assert dataset.template_params is None
assert dataset.filter_select_enabled
assert dataset.fetch_values_predicate is None
assert dataset.extra is None
assert str(dataset.database.uuid) == "b8a1ccd3-779d-4ab7-8ad8-9ab119d7fe89"
assert len(dataset.metrics) == 1
metric = dataset.metrics[0]
assert metric.metric_name == "count"
assert metric.verbose_name == ""
assert metric.metric_type is None
assert metric.expression == "count(1)"
assert metric.description is None
assert metric.d3format is None
assert metric.extra is None
assert metric.warning_text is None
assert len(dataset.columns) == 1
column = dataset.columns[0]
assert column.column_name == "cnt"
assert column.verbose_name == "Count of something"
assert not column.is_dttm
assert column.is_active
assert column.type == "NUMBER"
assert not column.groupby
assert column.filterable
assert column.expression == ""
assert column.description is None
assert column.python_date_format is None
db.session.delete(dataset)
db.session.delete(dataset.database)
db.session.commit()
def test_import_v1_dataset_multiple(self):
contents = {
"metadata.yaml": yaml.safe_dump(dataset_metadata_config),
"databases/imported_database.yaml": yaml.safe_dump(database_config),
"datasets/imported_dataset.yaml": yaml.safe_dump(dataset_config),
}
command = v1.ImportDatasetsCommand(contents, overwrite=True)
command.run()
command.run()
dataset = (
db.session.query(SqlaTable).filter_by(uuid=dataset_config["uuid"]).one()
)
assert dataset.table_name == "imported_dataset"
new_config = dataset_config.copy()
new_config["metrics"][0]["metric_name"] = "count2"
new_config["columns"][0]["column_name"] = "cnt2"
contents = {
"metadata.yaml": yaml.safe_dump(dataset_metadata_config),
"databases/imported_database.yaml": yaml.safe_dump(database_config),
"datasets/imported_dataset.yaml": yaml.safe_dump(new_config),
}
command = v1.ImportDatasetsCommand(contents, overwrite=True)
command.run()
dataset = (
db.session.query(SqlaTable).filter_by(uuid=dataset_config["uuid"]).one()
)
assert len(dataset.metrics) == 1
assert dataset.metrics[0].metric_name == "count2"
assert len(dataset.columns) == 1
assert dataset.columns[0].column_name == "cnt2"
db.session.delete(dataset)
db.session.delete(dataset.database)
db.session.commit()
def test_import_v1_dataset_validation(self):
contents = {
"datasets/imported_dataset.yaml": yaml.safe_dump(dataset_config),
}
command = v1.ImportDatasetsCommand(contents)
with pytest.raises(IncorrectVersionError) as excinfo:
command.run()
assert str(excinfo.value) == "Missing metadata.yaml"
contents["metadata.yaml"] = yaml.safe_dump(
{
"version": "2.0.0",
"type": "SqlaTable",
"timestamp": "2020-11-04T21:27:44.423819+00:00",
}
)
command = v1.ImportDatasetsCommand(contents)
with pytest.raises(IncorrectVersionError) as excinfo:
command.run()
assert str(excinfo.value) == "Must be equal to 1.0.0."
contents["metadata.yaml"] = yaml.safe_dump(database_metadata_config)
command = v1.ImportDatasetsCommand(contents)
with pytest.raises(CommandInvalidError) as excinfo:
command.run()
assert str(excinfo.value) == "Error importing dataset"
assert excinfo.value.normalized_messages() == {
"metadata.yaml": {"type": ["Must be equal to SqlaTable."]}
}
broken_config = database_config.copy()
del broken_config["database_name"]
contents["metadata.yaml"] = yaml.safe_dump(dataset_metadata_config)
contents["databases/imported_database.yaml"] = yaml.safe_dump(broken_config)
command = v1.ImportDatasetsCommand(contents)
with pytest.raises(CommandInvalidError) as excinfo:
command.run()
assert str(excinfo.value) == "Error importing dataset"
assert excinfo.value.normalized_messages() == {
"databases/imported_database.yaml": {
"database_name": ["Missing data for required field."],
}
}
def test_import_v1_dataset_existing_database(self):
contents = {
"metadata.yaml": yaml.safe_dump(database_metadata_config),
"databases/imported_database.yaml": yaml.safe_dump(database_config),
}
command = ImportDatabasesCommand(contents)
command.run()
database = (
db.session.query(Database).filter_by(uuid=database_config["uuid"]).one()
)
assert len(database.tables) == 0
contents = {
"metadata.yaml": yaml.safe_dump(dataset_metadata_config),
"datasets/imported_dataset.yaml": yaml.safe_dump(dataset_config),
"databases/imported_database.yaml": yaml.safe_dump(database_config),
}
command = v1.ImportDatasetsCommand(contents, overwrite=True)
command.run()
database = (
db.session.query(Database).filter_by(uuid=database_config["uuid"]).one()
)
assert len(database.tables) == 1
db.session.delete(database.tables[0])
db.session.delete(database)
db.session.commit()
def _get_table_from_list_by_name(name: str, tables: List[Any]):
for table in tables:
if table.table_name == name:
return table
raise ValueError(f"Table {name} does not exists in database")
| true | true |
f72e7f84b4f61db9dcf7fd72e77f2264339e6bce | 1,378 | py | Python | hooks/charmhelpers/__init__.py | plumgrid/upstream-charm-plumgrid-gateway | 7448c43f9735203a052d1366aa96a091f6544446 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | hooks/charmhelpers/__init__.py | plumgrid/upstream-charm-plumgrid-gateway | 7448c43f9735203a052d1366aa96a091f6544446 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | hooks/charmhelpers/__init__.py | plumgrid/upstream-charm-plumgrid-gateway | 7448c43f9735203a052d1366aa96a091f6544446 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa
| 35.333333 | 75 | 0.708999 |
import subprocess
import sys
try:
import six
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six
try:
import yaml
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml
| true | true |
f72e7ff4bdcea5b48d5876c34c9a5f7cc754350a | 161 | py | Python | myproject/studentapp/urls.py | EvgenDEP1/my-project-django | 89955f814a5bd54da9fc37855ec2db2538384c4f | [
"Apache-2.0"
] | null | null | null | myproject/studentapp/urls.py | EvgenDEP1/my-project-django | 89955f814a5bd54da9fc37855ec2db2538384c4f | [
"Apache-2.0"
] | null | null | null | myproject/studentapp/urls.py | EvgenDEP1/my-project-django | 89955f814a5bd54da9fc37855ec2db2538384c4f | [
"Apache-2.0"
] | null | null | null | import studentapp.views as studentapp
from django.urls import path
app_name = 'studentapp'
urlpatterns = [
path('', studentapp.student, name='student'),
]
| 17.888889 | 49 | 0.732919 | import studentapp.views as studentapp
from django.urls import path
app_name = 'studentapp'
urlpatterns = [
path('', studentapp.student, name='student'),
]
| true | true |
f72e81657fea988ec3832105863d1b4015271e88 | 1,335 | py | Python | setup.py | ken0-1n/annot_gnomAD | 32c9284398ea85b8b18a0b8115a2393cae54cd7c | [
"MIT"
] | null | null | null | setup.py | ken0-1n/annot_gnomAD | 32c9284398ea85b8b18a0b8115a2393cae54cd7c | [
"MIT"
] | null | null | null | setup.py | ken0-1n/annot_gnomAD | 32c9284398ea85b8b18a0b8115a2393cae54cd7c | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
def get_version():
with open(path.join(here, "annot_gnomad/version.py")) as hin:
for line in hin:
if line.startswith("__version__"):
version = line.partition('=')[2]
return version.strip().strip('\'"')
raise ValueError('Could not find version.')
setup(
name='annot_gnomad',
version=get_version(),
description="annot_gnomad is annotation structural variants in gnomAD.",
long_description="""""",
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
keywords='Bio-informatics',
author='Ken-ichi Chiba',
author_email='kchiba@hgc.jp',
url='https://github.com/ken0-1n/annot_gnomAD.git',
license='MIT',
packages = find_packages(exclude = ['tests']),
install_requires=[
'cyvcf2'
],
entry_points = {'console_scripts': ['annot_gnomad = annot_gnomad:main']},
test_suite = 'unit_tests.suite'
)
| 31.785714 | 79 | 0.595506 | from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
def get_version():
with open(path.join(here, "annot_gnomad/version.py")) as hin:
for line in hin:
if line.startswith("__version__"):
version = line.partition('=')[2]
return version.strip().strip('\'"')
raise ValueError('Could not find version.')
setup(
name='annot_gnomad',
version=get_version(),
description="annot_gnomad is annotation structural variants in gnomAD.",
long_description="""""",
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
keywords='Bio-informatics',
author='Ken-ichi Chiba',
author_email='kchiba@hgc.jp',
url='https://github.com/ken0-1n/annot_gnomAD.git',
license='MIT',
packages = find_packages(exclude = ['tests']),
install_requires=[
'cyvcf2'
],
entry_points = {'console_scripts': ['annot_gnomad = annot_gnomad:main']},
test_suite = 'unit_tests.suite'
)
| true | true |
f72e81800474c7076717f7149cc037c45a9fa546 | 9,652 | py | Python | test/test_ops.py | baheytharwat/tinygrad | acf652c3c524ee3214e9ce58d41113738cb833ae | [
"MIT"
] | null | null | null | test/test_ops.py | baheytharwat/tinygrad | acf652c3c524ee3214e9ce58d41113738cb833ae | [
"MIT"
] | null | null | null | test/test_ops.py | baheytharwat/tinygrad | acf652c3c524ee3214e9ce58d41113738cb833ae | [
"MIT"
] | null | null | null | import os
import torch
import numpy as np
import unittest
import timeit
import functools
from tinygrad.tensor import Tensor, DEFAULT_DEVICE, Device
def helper_test_op(shps, torch_fxn, tinygrad_fxn, atol=1e-6, rtol=1e-3, grad_atol=1e-6, grad_rtol=1e-3, forward_only=False, vals=None, a=-0.5, b=20):
torch.manual_seed(0)
if shps is None:
ts = [torch.tensor(x, requires_grad=True) for x in vals]
else:
ts = [torch.tensor((np.random.random(size=x).astype(np.float32)+a)*b, requires_grad=True) for x in shps]
tst = [Tensor(x.detach().numpy()) for x in ts]
out = torch_fxn(*ts)
ret = tinygrad_fxn(*tst)
np.testing.assert_allclose(ret.cpu().data, out.detach().numpy(), atol=atol, rtol=rtol)
if not forward_only:
out.mean().backward()
ret.mean().backward()
for t, tt in zip(ts, tst):
np.testing.assert_allclose(t.grad, tt.cpu().grad.data, atol=grad_atol, rtol=grad_rtol)
# speed
torch_fp = timeit.Timer(functools.partial(torch_fxn, *ts)).timeit(5) * 1000/5
tinygrad_fp = timeit.Timer(functools.partial(tinygrad_fxn, *tst)).timeit(5) * 1000/5
if not forward_only:
torch_fbp = timeit.Timer(functools.partial(lambda f,x: f(*x).mean().backward(), torch_fxn, ts)).timeit(5) * 1000/5
tinygrad_fbp = timeit.Timer(functools.partial(lambda f,x: f(*x).mean().backward(), tinygrad_fxn, tst)).timeit(5) * 1000/5
else:
torch_fbp, tinygrad_fbp = np.nan, np.nan
print("testing %30r torch/tinygrad fp: %.2f / %.2f ms bp: %.2f / %.2f ms" % (shps, torch_fp, tinygrad_fp, torch_fbp-torch_fp, tinygrad_fbp-tinygrad_fp))
class TestOps(unittest.TestCase):
def test_add(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x+y, Tensor.add)
def test_sub(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x-y, Tensor.sub)
def test_mul(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x*y, Tensor.mul)
def test_div(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x/y, Tensor.div)
def test_pow(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x**y, Tensor.pow, a=0)
def test_sqrt(self):
helper_test_op([(45,65)], lambda x: x.sqrt(), Tensor.sqrt, a=0)
def test_relu(self):
helper_test_op([(45,65)], lambda x: x.relu(), Tensor.relu)
def test_leakyrelu(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.leaky_relu(x,0.01), Tensor.leakyrelu)
def test_abs(self):
helper_test_op([(45,65)], lambda x: torch.abs(x), Tensor.abs)
def test_log(self):
helper_test_op([(45,65)], lambda x: torch.log(x), Tensor.log)
def test_exp(self):
helper_test_op([(45,65)], lambda x: torch.exp(x), Tensor.exp)
def test_sign(self):
helper_test_op([(45,65)], lambda x: torch.sign(x), Tensor.sign)
def test_sigmoid(self):
helper_test_op([(45,65)], lambda x: x.sigmoid(), Tensor.sigmoid)
def test_softplus(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.softplus(x), Tensor.softplus, atol=1e-6, grad_atol=1e-6)
def test_relu6(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.relu6(x), Tensor.relu6)
def test_hardswish(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.hardswish(x), Tensor.hardswish, atol=1e-6, grad_atol=1e-6)
def test_mish(self):
def _mish_pytorch(x):
return x*torch.tanh(torch.nn.functional.softplus(x))
helper_test_op([(45,65)], _mish_pytorch, Tensor.mish, atol=1e-4)
def test_dot(self):
helper_test_op([(45,65), (65,100)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4)
def test_multidot(self):
helper_test_op([(10,45,65), (10,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4)
helper_test_op([(3,3,45,65), (3,3,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4)
def test_sum(self):
helper_test_op([(45,3)], lambda x: x.sum(), Tensor.sum)
helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=(1,2)), lambda x: Tensor.sum(x, axis=(1,2)))
helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=1), lambda x: Tensor.sum(x, axis=1))
def test_max(self):
helper_test_op([(45,3)], lambda x: x.max(), Tensor.max)
helper_test_op([(45,3)], lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5))
helper_test_op(None, lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5),
vals=[
[[1.0,1.0,0.0,1.0]],
])
helper_test_op([(3,4,5,6)], lambda x: x.max(axis=1)[0], lambda x: Tensor.max(x, axis=1))
def test_mean_axis(self):
helper_test_op([(3,4,5,6)], lambda x: x.mean(axis=(1,2)), lambda x: Tensor.mean(x, axis=(1,2)))
def test_logsoftmax(self):
helper_test_op([(45,65)], lambda x: torch.nn.LogSoftmax(dim=1)(x), Tensor.logsoftmax, atol=1e-7, grad_atol=1e-7)
def test_tanh(self):
helper_test_op([(45,65)], lambda x: x.tanh(), Tensor.tanh, atol=1e-6, grad_atol=1e-6)
def test_topo_sort(self):
helper_test_op([(45,65)], lambda x: (x+x)*x, lambda x: x.add(x).mul(x), atol=1e-6, grad_atol=1e-6)
def test_scalar_mul(self):
helper_test_op([(45,65)], lambda x: x*2, lambda x: x*2)
def test_scalar_rmul(self):
helper_test_op([(45,65)], lambda x: 2*x, lambda x: 2*x)
def test_scalar_sub(self):
helper_test_op([(45,65)], lambda x: x-2, lambda x: x-2)
def test_scalar_rsub(self):
helper_test_op([(45,65)], lambda x: 2-x, lambda x: 2-x)
def test_broadcast_full(self):
for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul),
(torch.div, Tensor.div), (torch.pow, Tensor.pow)]:
for shapes in [((5,13,24,16), (5,1,24,1)), ((1,3,1,7,1), (2,1,5,1,8))]:
with self.subTest(op=torch_op.__name__, shapes=shapes):
helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0)
def test_broadcast_partial(self):
for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul),
(torch.div, Tensor.div), (torch.pow, Tensor.pow)]:
for shapes in [((1,32,32,32), (1,32,1,1)), ((5,13,24,16,2), (1,13,24,1,1)),
((4,1), (4,5)), ((1,4), (5,4))]:
with self.subTest(op=torch_op.__name__, shapes=shapes):
# NOTE: ANE backwards?
helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0)
def test_slice(self):
helper_test_op([(3,3,3,3)], lambda x: x[1:2], lambda x: x[1:2])
helper_test_op([(3,3,3,3)], lambda x: x[1:2, 1:2], lambda x: x[1:2, 1:2])
helper_test_op([(3,3,3,3)], lambda x: x[1:2, 1:2, 0:-1], lambda x: x[1:2, 1:2, 0:-1])
def test_pad2d(self):
helper_test_op([(3,3,3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4)), lambda x: x.pad2d(padding=(1,2,3,4)))
def test_transpose(self):
helper_test_op([(3,3,3)], lambda x: x.transpose(1,2), lambda x: x.transpose(order=(0,2,1)))
# This is failing on GPU because the dim is too large
#helper_test_op([(21,22,23,24)], lambda x: x.movedim((3,0,2,1),(0,1,2,3)), lambda x: x.transpose(order=(3,0,2,1)))
helper_test_op([(3,4,5,6)], lambda x: x.movedim((3,2,1,0),(0,1,2,3)), lambda x: x.transpose(order=(3,2,1,0)))
def test_reshape(self):
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,3,6,6)), lambda x: x.reshape(shape=(-1,3,6,6)))
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,1,6,6)), lambda x: x.reshape(shape=(-1,1,6,6)))
def test_detach(self):
helper_test_op([(4,3,6,6)], lambda x: x.detach(), lambda x: x.detach(), forward_only=True)
def test_conv2d(self):
for bs in [1,8]:
for cin in [1,3]:
for groups in [1,3] if cin == 3 else [1]:
for H in [1,2,5]:
for W in [1,2,3,5]:
with self.subTest(batch_size=bs, channels=cin, groups=groups, height=H, width=W):
helper_test_op([(bs,cin,11,28), (6,cin//groups,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(),
lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5)
def test_strided_conv2d(self):
bs = 4
cin = 3
H,W = 3,3
with self.subTest(stride := 2):
helper_test_op([(bs,cin,11,28), (4,cin,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,stride=2).relu(),
lambda x,w: Tensor.conv2d(x,w,stride=stride).relu(), atol=1e-4)
with self.subTest(stride := (2,1)):
helper_test_op([(bs,cin,11,28), (4,cin,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,stride=stride).relu(),
lambda x,w: Tensor.conv2d(x,w,stride=(2,1)).relu(), atol=1e-4)
def test_maxpool2d(self):
for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1)]:
with self.subTest(kernel_size=ksz):
helper_test_op([(32,2,110,28)],
lambda x: torch.nn.functional.max_pool2d(x, kernel_size=ksz),
# TODO: why is this tolerance so high?
lambda x: Tensor.max_pool2d(x, kernel_size=ksz), grad_atol=1e-4)
def test_avgpool2d(self):
shape = (32,2,111,28)
for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1), shape[2:]]:
with self.subTest(kernel_size=ksz):
helper_test_op([shape],
lambda x: torch.nn.functional.avg_pool2d(x, kernel_size=ksz),
lambda x: Tensor.avg_pool2d(x, kernel_size=ksz), rtol=1e-5)
def test_upsample2d_nearest(self):
for sf in [1, 2, 3, 4, 5]:
with self.subTest(scale_factor=sf):
helper_test_op([(32,2,110,28)],
lambda x: torch.nn.functional.interpolate(x, scale_factor=sf, mode='nearest'),
lambda x: Tensor.upsample_nearest2d(x, scale_factor=sf), forward_only=True)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 47.546798 | 157 | 0.631372 | import os
import torch
import numpy as np
import unittest
import timeit
import functools
from tinygrad.tensor import Tensor, DEFAULT_DEVICE, Device
def helper_test_op(shps, torch_fxn, tinygrad_fxn, atol=1e-6, rtol=1e-3, grad_atol=1e-6, grad_rtol=1e-3, forward_only=False, vals=None, a=-0.5, b=20):
torch.manual_seed(0)
if shps is None:
ts = [torch.tensor(x, requires_grad=True) for x in vals]
else:
ts = [torch.tensor((np.random.random(size=x).astype(np.float32)+a)*b, requires_grad=True) for x in shps]
tst = [Tensor(x.detach().numpy()) for x in ts]
out = torch_fxn(*ts)
ret = tinygrad_fxn(*tst)
np.testing.assert_allclose(ret.cpu().data, out.detach().numpy(), atol=atol, rtol=rtol)
if not forward_only:
out.mean().backward()
ret.mean().backward()
for t, tt in zip(ts, tst):
np.testing.assert_allclose(t.grad, tt.cpu().grad.data, atol=grad_atol, rtol=grad_rtol)
torch_fp = timeit.Timer(functools.partial(torch_fxn, *ts)).timeit(5) * 1000/5
tinygrad_fp = timeit.Timer(functools.partial(tinygrad_fxn, *tst)).timeit(5) * 1000/5
if not forward_only:
torch_fbp = timeit.Timer(functools.partial(lambda f,x: f(*x).mean().backward(), torch_fxn, ts)).timeit(5) * 1000/5
tinygrad_fbp = timeit.Timer(functools.partial(lambda f,x: f(*x).mean().backward(), tinygrad_fxn, tst)).timeit(5) * 1000/5
else:
torch_fbp, tinygrad_fbp = np.nan, np.nan
print("testing %30r torch/tinygrad fp: %.2f / %.2f ms bp: %.2f / %.2f ms" % (shps, torch_fp, tinygrad_fp, torch_fbp-torch_fp, tinygrad_fbp-tinygrad_fp))
class TestOps(unittest.TestCase):
def test_add(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x+y, Tensor.add)
def test_sub(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x-y, Tensor.sub)
def test_mul(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x*y, Tensor.mul)
def test_div(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x/y, Tensor.div)
def test_pow(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x**y, Tensor.pow, a=0)
def test_sqrt(self):
helper_test_op([(45,65)], lambda x: x.sqrt(), Tensor.sqrt, a=0)
def test_relu(self):
helper_test_op([(45,65)], lambda x: x.relu(), Tensor.relu)
def test_leakyrelu(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.leaky_relu(x,0.01), Tensor.leakyrelu)
def test_abs(self):
helper_test_op([(45,65)], lambda x: torch.abs(x), Tensor.abs)
def test_log(self):
helper_test_op([(45,65)], lambda x: torch.log(x), Tensor.log)
def test_exp(self):
helper_test_op([(45,65)], lambda x: torch.exp(x), Tensor.exp)
def test_sign(self):
helper_test_op([(45,65)], lambda x: torch.sign(x), Tensor.sign)
def test_sigmoid(self):
helper_test_op([(45,65)], lambda x: x.sigmoid(), Tensor.sigmoid)
def test_softplus(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.softplus(x), Tensor.softplus, atol=1e-6, grad_atol=1e-6)
def test_relu6(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.relu6(x), Tensor.relu6)
def test_hardswish(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.hardswish(x), Tensor.hardswish, atol=1e-6, grad_atol=1e-6)
def test_mish(self):
def _mish_pytorch(x):
return x*torch.tanh(torch.nn.functional.softplus(x))
helper_test_op([(45,65)], _mish_pytorch, Tensor.mish, atol=1e-4)
def test_dot(self):
helper_test_op([(45,65), (65,100)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4)
def test_multidot(self):
helper_test_op([(10,45,65), (10,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4)
helper_test_op([(3,3,45,65), (3,3,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4)
def test_sum(self):
helper_test_op([(45,3)], lambda x: x.sum(), Tensor.sum)
helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=(1,2)), lambda x: Tensor.sum(x, axis=(1,2)))
helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=1), lambda x: Tensor.sum(x, axis=1))
def test_max(self):
helper_test_op([(45,3)], lambda x: x.max(), Tensor.max)
helper_test_op([(45,3)], lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5))
helper_test_op(None, lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5),
vals=[
[[1.0,1.0,0.0,1.0]],
])
helper_test_op([(3,4,5,6)], lambda x: x.max(axis=1)[0], lambda x: Tensor.max(x, axis=1))
def test_mean_axis(self):
helper_test_op([(3,4,5,6)], lambda x: x.mean(axis=(1,2)), lambda x: Tensor.mean(x, axis=(1,2)))
def test_logsoftmax(self):
helper_test_op([(45,65)], lambda x: torch.nn.LogSoftmax(dim=1)(x), Tensor.logsoftmax, atol=1e-7, grad_atol=1e-7)
def test_tanh(self):
helper_test_op([(45,65)], lambda x: x.tanh(), Tensor.tanh, atol=1e-6, grad_atol=1e-6)
def test_topo_sort(self):
helper_test_op([(45,65)], lambda x: (x+x)*x, lambda x: x.add(x).mul(x), atol=1e-6, grad_atol=1e-6)
def test_scalar_mul(self):
helper_test_op([(45,65)], lambda x: x*2, lambda x: x*2)
def test_scalar_rmul(self):
helper_test_op([(45,65)], lambda x: 2*x, lambda x: 2*x)
def test_scalar_sub(self):
helper_test_op([(45,65)], lambda x: x-2, lambda x: x-2)
def test_scalar_rsub(self):
helper_test_op([(45,65)], lambda x: 2-x, lambda x: 2-x)
def test_broadcast_full(self):
for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul),
(torch.div, Tensor.div), (torch.pow, Tensor.pow)]:
for shapes in [((5,13,24,16), (5,1,24,1)), ((1,3,1,7,1), (2,1,5,1,8))]:
with self.subTest(op=torch_op.__name__, shapes=shapes):
helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0)
def test_broadcast_partial(self):
for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul),
(torch.div, Tensor.div), (torch.pow, Tensor.pow)]:
for shapes in [((1,32,32,32), (1,32,1,1)), ((5,13,24,16,2), (1,13,24,1,1)),
((4,1), (4,5)), ((1,4), (5,4))]:
with self.subTest(op=torch_op.__name__, shapes=shapes):
helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0)
def test_slice(self):
helper_test_op([(3,3,3,3)], lambda x: x[1:2], lambda x: x[1:2])
helper_test_op([(3,3,3,3)], lambda x: x[1:2, 1:2], lambda x: x[1:2, 1:2])
helper_test_op([(3,3,3,3)], lambda x: x[1:2, 1:2, 0:-1], lambda x: x[1:2, 1:2, 0:-1])
def test_pad2d(self):
helper_test_op([(3,3,3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4)), lambda x: x.pad2d(padding=(1,2,3,4)))
def test_transpose(self):
helper_test_op([(3,3,3)], lambda x: x.transpose(1,2), lambda x: x.transpose(order=(0,2,1)))
helper_test_op([(3,4,5,6)], lambda x: x.movedim((3,2,1,0),(0,1,2,3)), lambda x: x.transpose(order=(3,2,1,0)))
def test_reshape(self):
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,3,6,6)), lambda x: x.reshape(shape=(-1,3,6,6)))
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,1,6,6)), lambda x: x.reshape(shape=(-1,1,6,6)))
def test_detach(self):
helper_test_op([(4,3,6,6)], lambda x: x.detach(), lambda x: x.detach(), forward_only=True)
def test_conv2d(self):
for bs in [1,8]:
for cin in [1,3]:
for groups in [1,3] if cin == 3 else [1]:
for H in [1,2,5]:
for W in [1,2,3,5]:
with self.subTest(batch_size=bs, channels=cin, groups=groups, height=H, width=W):
helper_test_op([(bs,cin,11,28), (6,cin//groups,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(),
lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5)
def test_strided_conv2d(self):
bs = 4
cin = 3
H,W = 3,3
with self.subTest(stride := 2):
helper_test_op([(bs,cin,11,28), (4,cin,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,stride=2).relu(),
lambda x,w: Tensor.conv2d(x,w,stride=stride).relu(), atol=1e-4)
with self.subTest(stride := (2,1)):
helper_test_op([(bs,cin,11,28), (4,cin,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,stride=stride).relu(),
lambda x,w: Tensor.conv2d(x,w,stride=(2,1)).relu(), atol=1e-4)
def test_maxpool2d(self):
for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1)]:
with self.subTest(kernel_size=ksz):
helper_test_op([(32,2,110,28)],
lambda x: torch.nn.functional.max_pool2d(x, kernel_size=ksz),
lambda x: Tensor.max_pool2d(x, kernel_size=ksz), grad_atol=1e-4)
def test_avgpool2d(self):
shape = (32,2,111,28)
for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1), shape[2:]]:
with self.subTest(kernel_size=ksz):
helper_test_op([shape],
lambda x: torch.nn.functional.avg_pool2d(x, kernel_size=ksz),
lambda x: Tensor.avg_pool2d(x, kernel_size=ksz), rtol=1e-5)
def test_upsample2d_nearest(self):
for sf in [1, 2, 3, 4, 5]:
with self.subTest(scale_factor=sf):
helper_test_op([(32,2,110,28)],
lambda x: torch.nn.functional.interpolate(x, scale_factor=sf, mode='nearest'),
lambda x: Tensor.upsample_nearest2d(x, scale_factor=sf), forward_only=True)
if __name__ == '__main__':
unittest.main(verbosity=2)
| true | true |
f72e819cdc32ca8ddd85aa0af7d020cbe93c59c3 | 58,151 | py | Python | kubernetes/client/apis/storage_v1_api.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/apis/storage_v1_api.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | 1 | 2021-04-30T20:41:19.000Z | 2021-04-30T20:41:19.000Z | venv/lib/python2.7/site-packages/kubernetes/client/apis/storage_v1_api.py | 784134748/kubernetes-install | 5df59632c2619632e422948b667fb68eab9ff5be | [
"MIT"
] | 1 | 2020-05-09T07:16:55.000Z | 2020-05-09T07:16:55.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class StorageV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_storage_class(self, body, **kwargs):
"""
create a StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_storage_class(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1StorageClass body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_storage_class_with_http_info(body, **kwargs)
else:
(data) = self.create_storage_class_with_http_info(body, **kwargs)
return data
def create_storage_class_with_http_info(self, body, **kwargs):
"""
create a StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_storage_class_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1StorageClass body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_storage_class`")
collection_formats = {}
path_params = {}
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_storage_class(self, **kwargs):
"""
delete collection of StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_storage_class(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_storage_class_with_http_info(**kwargs)
else:
(data) = self.delete_collection_storage_class_with_http_info(**kwargs)
return data
def delete_collection_storage_class_with_http_info(self, **kwargs):
"""
delete collection of StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_storage_class_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_storage_class" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_storage_class(self, name, body, **kwargs):
"""
delete a StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_storage_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StorageClass (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.delete_storage_class_with_http_info(name, body, **kwargs)
return data
def delete_storage_class_with_http_info(self, name, body, **kwargs):
"""
delete a StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_storage_class_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StorageClass (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_storage_class`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_storage_class(self, **kwargs):
"""
list or watch objects of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_storage_class(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StorageClassList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_storage_class_with_http_info(**kwargs)
else:
(data) = self.list_storage_class_with_http_info(**kwargs)
return data
def list_storage_class_with_http_info(self, **kwargs):
"""
list or watch objects of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_storage_class_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StorageClassList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_storage_class" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClassList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_storage_class(self, name, body, **kwargs):
"""
partially update the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_storage_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StorageClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_storage_class_with_http_info(name, body, **kwargs)
return data
def patch_storage_class_with_http_info(self, name, body, **kwargs):
"""
partially update the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_storage_class_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StorageClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_storage_class`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_storage_class(self, name, **kwargs):
"""
read the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StorageClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_storage_class_with_http_info(name, **kwargs)
else:
(data) = self.read_storage_class_with_http_info(name, **kwargs)
return data
def read_storage_class_with_http_info(self, name, **kwargs):
"""
read the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_storage_class_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StorageClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_storage_class(self, name, body, **kwargs):
"""
replace the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_storage_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StorageClass (required)
:param V1StorageClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_storage_class_with_http_info(name, body, **kwargs)
return data
def replace_storage_class_with_http_info(self, name, body, **kwargs):
"""
replace the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_storage_class_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StorageClass (required)
:param V1StorageClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_storage_class`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 62.127137 | 1,390 | 0.646025 |
from __future__ import absolute_import
import sys
import os
import re
from six import iteritems
from ..api_client import ApiClient
class StorageV1Api(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_storage_class(self, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_storage_class_with_http_info(body, **kwargs)
else:
(data) = self.create_storage_class_with_http_info(body, **kwargs)
return data
def create_storage_class_with_http_info(self, body, **kwargs):
all_params = ['body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_storage_class" % key
)
params[key] = val
del params['kwargs']
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_storage_class`")
collection_formats = {}
path_params = {}
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_storage_class(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_storage_class_with_http_info(**kwargs)
else:
(data) = self.delete_collection_storage_class_with_http_info(**kwargs)
return data
def delete_collection_storage_class_with_http_info(self, **kwargs):
all_params = ['include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_storage_class" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_storage_class(self, name, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.delete_storage_class_with_http_info(name, body, **kwargs)
return data
def delete_storage_class_with_http_info(self, name, body, **kwargs):
all_params = ['name', 'body', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_storage_class" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_storage_class`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_storage_class(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_storage_class_with_http_info(**kwargs)
else:
(data) = self.list_storage_class_with_http_info(**kwargs)
return data
def list_storage_class_with_http_info(self, **kwargs):
all_params = ['include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_storage_class" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClassList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_storage_class(self, name, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_storage_class_with_http_info(name, body, **kwargs)
return data
def patch_storage_class_with_http_info(self, name, body, **kwargs):
all_params = ['name', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_storage_class" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_storage_class`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_storage_class(self, name, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_storage_class_with_http_info(name, **kwargs)
else:
(data) = self.read_storage_class_with_http_info(name, **kwargs)
return data
def read_storage_class_with_http_info(self, name, **kwargs):
all_params = ['name', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_storage_class" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_storage_class(self, name, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_storage_class_with_http_info(name, body, **kwargs)
return data
def replace_storage_class_with_http_info(self, name, body, **kwargs):
all_params = ['name', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_storage_class" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_storage_class`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_storage_class`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1/storageclasses/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StorageClass',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true | true |
f72e81a4f6bf6b24baaad1bd0696c84db8d72574 | 900 | py | Python | tests/test_todo.py | damianfs/canvasapi | 10ef96d268a0535c888d8fdd8169da31d9a66e3f | [
"MIT"
] | 386 | 2017-03-31T15:44:40.000Z | 2022-03-31T12:28:41.000Z | tests/test_todo.py | damianfs/canvasapi | 10ef96d268a0535c888d8fdd8169da31d9a66e3f | [
"MIT"
] | 502 | 2017-04-03T19:06:06.000Z | 2022-03-10T21:59:23.000Z | tests/test_todo.py | damianfs/canvasapi | 10ef96d268a0535c888d8fdd8169da31d9a66e3f | [
"MIT"
] | 148 | 2017-04-03T14:26:38.000Z | 2022-03-14T15:07:00.000Z | import unittest
import requests_mock
from canvasapi import Canvas
from canvasapi.todo import Todo
from tests import settings
@requests_mock.Mocker()
class TestTodo(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
self.todo = Todo(
self.canvas._Canvas__requester,
{
"type": "grading",
"assignment": {},
"ignore": ".. url ..",
"ignore_permanently": ".. url ..",
"html_url": ".. url ..",
"needs_grading_count": 3,
"context_type": "course",
"course_id": 1,
"group_id": None,
},
)
def test_str(self, m):
test_str = str(self.todo)
self.assertIsInstance(test_str, str)
self.assertEqual(test_str, "Todo Item (grading)")
| 26.470588 | 65 | 0.534444 | import unittest
import requests_mock
from canvasapi import Canvas
from canvasapi.todo import Todo
from tests import settings
@requests_mock.Mocker()
class TestTodo(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
self.todo = Todo(
self.canvas._Canvas__requester,
{
"type": "grading",
"assignment": {},
"ignore": ".. url ..",
"ignore_permanently": ".. url ..",
"html_url": ".. url ..",
"needs_grading_count": 3,
"context_type": "course",
"course_id": 1,
"group_id": None,
},
)
def test_str(self, m):
test_str = str(self.todo)
self.assertIsInstance(test_str, str)
self.assertEqual(test_str, "Todo Item (grading)")
| true | true |
f72e81aed967860ab086d392a5b77ebbbf88814a | 347,727 | py | Python | sympy/solvers/ode.py | CameronKing/sympy | 3295b02c617a10ea8db0a070356cc0ba5a3b5121 | [
"BSD-3-Clause"
] | null | null | null | sympy/solvers/ode.py | CameronKing/sympy | 3295b02c617a10ea8db0a070356cc0ba5a3b5121 | [
"BSD-3-Clause"
] | 2 | 2019-08-04T13:10:46.000Z | 2020-11-06T19:59:25.000Z | sympy/solvers/ode.py | CameronKing/sympy | 3295b02c617a10ea8db0a070356cc0ba5a3b5121 | [
"BSD-3-Clause"
] | null | null | null | r"""
This module contains :py:meth:`~sympy.solvers.ode.dsolve` and different helper
functions that it uses.
:py:meth:`~sympy.solvers.ode.dsolve` solves ordinary differential equations.
See the docstring on the various functions for their uses. Note that partial
differential equations support is in ``pde.py``. Note that hint functions
have docstrings describing their various methods, but they are intended for
internal use. Use ``dsolve(ode, func, hint=hint)`` to solve an ODE using a
specific hint. See also the docstring on
:py:meth:`~sympy.solvers.ode.dsolve`.
**Functions in this module**
These are the user functions in this module:
- :py:meth:`~sympy.solvers.ode.dsolve` - Solves ODEs.
- :py:meth:`~sympy.solvers.ode.classify_ode` - Classifies ODEs into
possible hints for :py:meth:`~sympy.solvers.ode.dsolve`.
- :py:meth:`~sympy.solvers.ode.checkodesol` - Checks if an equation is the
solution to an ODE.
- :py:meth:`~sympy.solvers.ode.homogeneous_order` - Returns the
homogeneous order of an expression.
- :py:meth:`~sympy.solvers.ode.infinitesimals` - Returns the infinitesimals
of the Lie group of point transformations of an ODE, such that it is
invariant.
- :py:meth:`~sympy.solvers.ode_checkinfsol` - Checks if the given infinitesimals
are the actual infinitesimals of a first order ODE.
These are the non-solver helper functions that are for internal use. The
user should use the various options to
:py:meth:`~sympy.solvers.ode.dsolve` to obtain the functionality provided
by these functions:
- :py:meth:`~sympy.solvers.ode.odesimp` - Does all forms of ODE
simplification.
- :py:meth:`~sympy.solvers.ode.ode_sol_simplicity` - A key function for
comparing solutions by simplicity.
- :py:meth:`~sympy.solvers.ode.constantsimp` - Simplifies arbitrary
constants.
- :py:meth:`~sympy.solvers.ode.constant_renumber` - Renumber arbitrary
constants.
- :py:meth:`~sympy.solvers.ode._handle_Integral` - Evaluate unevaluated
Integrals.
See also the docstrings of these functions.
**Currently implemented solver methods**
The following methods are implemented for solving ordinary differential
equations. See the docstrings of the various hint functions for more
information on each (run ``help(ode)``):
- 1st order separable differential equations.
- 1st order differential equations whose coefficients or `dx` and `dy` are
functions homogeneous of the same order.
- 1st order exact differential equations.
- 1st order linear differential equations.
- 1st order Bernoulli differential equations.
- Power series solutions for first order differential equations.
- Lie Group method of solving first order differential equations.
- 2nd order Liouville differential equations.
- Power series solutions for second order differential equations
at ordinary and regular singular points.
- `n`\th order differential equation that can be solved with algebraic
rearrangement and integration.
- `n`\th order linear homogeneous differential equation with constant
coefficients.
- `n`\th order linear inhomogeneous differential equation with constant
coefficients using the method of undetermined coefficients.
- `n`\th order linear inhomogeneous differential equation with constant
coefficients using the method of variation of parameters.
**Philosophy behind this module**
This module is designed to make it easy to add new ODE solving methods without
having to mess with the solving code for other methods. The idea is that
there is a :py:meth:`~sympy.solvers.ode.classify_ode` function, which takes in
an ODE and tells you what hints, if any, will solve the ODE. It does this
without attempting to solve the ODE, so it is fast. Each solving method is a
hint, and it has its own function, named ``ode_<hint>``. That function takes
in the ODE and any match expression gathered by
:py:meth:`~sympy.solvers.ode.classify_ode` and returns a solved result. If
this result has any integrals in it, the hint function will return an
unevaluated :py:class:`~sympy.integrals.Integral` class.
:py:meth:`~sympy.solvers.ode.dsolve`, which is the user wrapper function
around all of this, will then call :py:meth:`~sympy.solvers.ode.odesimp` on
the result, which, among other things, will attempt to solve the equation for
the dependent variable (the function we are solving for), simplify the
arbitrary constants in the expression, and evaluate any integrals, if the hint
allows it.
**How to add new solution methods**
If you have an ODE that you want :py:meth:`~sympy.solvers.ode.dsolve` to be
able to solve, try to avoid adding special case code here. Instead, try
finding a general method that will solve your ODE, as well as others. This
way, the :py:mod:`~sympy.solvers.ode` module will become more robust, and
unhindered by special case hacks. WolphramAlpha and Maple's
DETools[odeadvisor] function are two resources you can use to classify a
specific ODE. It is also better for a method to work with an `n`\th order ODE
instead of only with specific orders, if possible.
To add a new method, there are a few things that you need to do. First, you
need a hint name for your method. Try to name your hint so that it is
unambiguous with all other methods, including ones that may not be implemented
yet. If your method uses integrals, also include a ``hint_Integral`` hint.
If there is more than one way to solve ODEs with your method, include a hint
for each one, as well as a ``<hint>_best`` hint. Your ``ode_<hint>_best()``
function should choose the best using min with ``ode_sol_simplicity`` as the
key argument. See
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_best`, for example.
The function that uses your method will be called ``ode_<hint>()``, so the
hint must only use characters that are allowed in a Python function name
(alphanumeric characters and the underscore '``_``' character). Include a
function for every hint, except for ``_Integral`` hints
(:py:meth:`~sympy.solvers.ode.dsolve` takes care of those automatically).
Hint names should be all lowercase, unless a word is commonly capitalized
(such as Integral or Bernoulli). If you have a hint that you do not want to
run with ``all_Integral`` that doesn't have an ``_Integral`` counterpart (such
as a best hint that would defeat the purpose of ``all_Integral``), you will
need to remove it manually in the :py:meth:`~sympy.solvers.ode.dsolve` code.
See also the :py:meth:`~sympy.solvers.ode.classify_ode` docstring for
guidelines on writing a hint name.
Determine *in general* how the solutions returned by your method compare with
other methods that can potentially solve the same ODEs. Then, put your hints
in the :py:data:`~sympy.solvers.ode.allhints` tuple in the order that they
should be called. The ordering of this tuple determines which hints are
default. Note that exceptions are ok, because it is easy for the user to
choose individual hints with :py:meth:`~sympy.solvers.ode.dsolve`. In
general, ``_Integral`` variants should go at the end of the list, and
``_best`` variants should go before the various hints they apply to. For
example, the ``undetermined_coefficients`` hint comes before the
``variation_of_parameters`` hint because, even though variation of parameters
is more general than undetermined coefficients, undetermined coefficients
generally returns cleaner results for the ODEs that it can solve than
variation of parameters does, and it does not require integration, so it is
much faster.
Next, you need to have a match expression or a function that matches the type
of the ODE, which you should put in :py:meth:`~sympy.solvers.ode.classify_ode`
(if the match function is more than just a few lines, like
:py:meth:`~sympy.solvers.ode._undetermined_coefficients_match`, it should go
outside of :py:meth:`~sympy.solvers.ode.classify_ode`). It should match the
ODE without solving for it as much as possible, so that
:py:meth:`~sympy.solvers.ode.classify_ode` remains fast and is not hindered by
bugs in solving code. Be sure to consider corner cases. For example, if your
solution method involves dividing by something, make sure you exclude the case
where that division will be 0.
In most cases, the matching of the ODE will also give you the various parts
that you need to solve it. You should put that in a dictionary (``.match()``
will do this for you), and add that as ``matching_hints['hint'] = matchdict``
in the relevant part of :py:meth:`~sympy.solvers.ode.classify_ode`.
:py:meth:`~sympy.solvers.ode.classify_ode` will then send this to
:py:meth:`~sympy.solvers.ode.dsolve`, which will send it to your function as
the ``match`` argument. Your function should be named ``ode_<hint>(eq, func,
order, match)`. If you need to send more information, put it in the ``match``
dictionary. For example, if you had to substitute in a dummy variable in
:py:meth:`~sympy.solvers.ode.classify_ode` to match the ODE, you will need to
pass it to your function using the `match` dict to access it. You can access
the independent variable using ``func.args[0]``, and the dependent variable
(the function you are trying to solve for) as ``func.func``. If, while trying
to solve the ODE, you find that you cannot, raise ``NotImplementedError``.
:py:meth:`~sympy.solvers.ode.dsolve` will catch this error with the ``all``
meta-hint, rather than causing the whole routine to fail.
Add a docstring to your function that describes the method employed. Like
with anything else in SymPy, you will need to add a doctest to the docstring,
in addition to real tests in ``test_ode.py``. Try to maintain consistency
with the other hint functions' docstrings. Add your method to the list at the
top of this docstring. Also, add your method to ``ode.rst`` in the
``docs/src`` directory, so that the Sphinx docs will pull its docstring into
the main SymPy documentation. Be sure to make the Sphinx documentation by
running ``make html`` from within the doc directory to verify that the
docstring formats correctly.
If your solution method involves integrating, use :py:meth:`Integral()
<sympy.integrals.integrals.Integral>` instead of
:py:meth:`~sympy.core.expr.Expr.integrate`. This allows the user to bypass
hard/slow integration by using the ``_Integral`` variant of your hint. In
most cases, calling :py:meth:`sympy.core.basic.Basic.doit` will integrate your
solution. If this is not the case, you will need to write special code in
:py:meth:`~sympy.solvers.ode._handle_Integral`. Arbitrary constants should be
symbols named ``C1``, ``C2``, and so on. All solution methods should return
an equality instance. If you need an arbitrary number of arbitrary constants,
you can use ``constants = numbered_symbols(prefix='C', cls=Symbol, start=1)``.
If it is possible to solve for the dependent function in a general way, do so.
Otherwise, do as best as you can, but do not call solve in your
``ode_<hint>()`` function. :py:meth:`~sympy.solvers.ode.odesimp` will attempt
to solve the solution for you, so you do not need to do that. Lastly, if your
ODE has a common simplification that can be applied to your solutions, you can
add a special case in :py:meth:`~sympy.solvers.ode.odesimp` for it. For
example, solutions returned from the ``1st_homogeneous_coeff`` hints often
have many :py:meth:`~sympy.functions.log` terms, so
:py:meth:`~sympy.solvers.ode.odesimp` calls
:py:meth:`~sympy.simplify.simplify.logcombine` on them (it also helps to write
the arbitrary constant as ``log(C1)`` instead of ``C1`` in this case). Also
consider common ways that you can rearrange your solution to have
:py:meth:`~sympy.solvers.ode.constantsimp` take better advantage of it. It is
better to put simplification in :py:meth:`~sympy.solvers.ode.odesimp` than in
your method, because it can then be turned off with the simplify flag in
:py:meth:`~sympy.solvers.ode.dsolve`. If you have any extraneous
simplification in your function, be sure to only run it using ``if
match.get('simplify', True):``, especially if it can be slow or if it can
reduce the domain of the solution.
Finally, as with every contribution to SymPy, your method will need to be
tested. Add a test for each method in ``test_ode.py``. Follow the
conventions there, i.e., test the solver using ``dsolve(eq, f(x),
hint=your_hint)``, and also test the solution using
:py:meth:`~sympy.solvers.ode.checkodesol` (you can put these in a separate
tests and skip/XFAIL if it runs too slow/doesn't work). Be sure to call your
hint specifically in :py:meth:`~sympy.solvers.ode.dsolve`, that way the test
won't be broken simply by the introduction of another matching hint. If your
method works for higher order (>1) ODEs, you will need to run ``sol =
constant_renumber(sol, 'C', 1, order)`` for each solution, where ``order`` is
the order of the ODE. This is because ``constant_renumber`` renumbers the
arbitrary constants by printing order, which is platform dependent. Try to
test every corner case of your solver, including a range of orders if it is a
`n`\th order solver, but if your solver is slow, such as if it involves hard
integration, try to keep the test run time down.
Feel free to refactor existing hints to avoid duplicating code or creating
inconsistencies. If you can show that your method exactly duplicates an
existing method, including in the simplicity and speed of obtaining the
solutions, then you can remove the old, less general method. The existing
code is tested extensively in ``test_ode.py``, so if anything is broken, one
of those tests will surely fail.
"""
from __future__ import print_function, division
from collections import defaultdict
from itertools import islice
from sympy.core import Add, S, Mul, Pow, oo
from sympy.core.compatibility import ordered, iterable, is_sequence, range, string_types
from sympy.core.containers import Tuple
from sympy.core.exprtools import factor_terms
from sympy.core.expr import AtomicExpr, Expr
from sympy.core.function import (Function, Derivative, AppliedUndef, diff,
expand, expand_mul, Subs, _mexpand)
from sympy.core.multidimensional import vectorize
from sympy.core.numbers import NaN, zoo, I, Number
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, Dummy, symbols
from sympy.core.sympify import sympify
from sympy.logic.boolalg import (BooleanAtom, And, Not, BooleanTrue,
BooleanFalse)
from sympy.functions import cos, exp, im, log, re, sin, tan, sqrt, \
atan2, conjugate, Piecewise
from sympy.functions.combinatorial.factorials import factorial
from sympy.integrals.integrals import Integral, integrate
from sympy.matrices import wronskian, Matrix, eye, zeros
from sympy.polys import (Poly, RootOf, rootof, terms_gcd,
PolynomialError, lcm, roots)
from sympy.polys.polyroots import roots_quartic
from sympy.polys.polytools import cancel, degree, div
from sympy.series import Order
from sympy.series.series import series
from sympy.simplify import collect, logcombine, powsimp, separatevars, \
simplify, trigsimp, posify, cse
from sympy.simplify.powsimp import powdenest
from sympy.simplify.radsimp import collect_const
from sympy.solvers import solve
from sympy.solvers.pde import pdsolve
from sympy.utilities import numbered_symbols, default_sort_key, sift
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
#: This is a list of hints in the order that they should be preferred by
#: :py:meth:`~sympy.solvers.ode.classify_ode`. In general, hints earlier in the
#: list should produce simpler solutions than those later in the list (for
#: ODEs that fit both). For now, the order of this list is based on empirical
#: observations by the developers of SymPy.
#:
#: The hint used by :py:meth:`~sympy.solvers.ode.dsolve` for a specific ODE
#: can be overridden (see the docstring).
#:
#: In general, ``_Integral`` hints are grouped at the end of the list, unless
#: there is a method that returns an unevaluable integral most of the time
#: (which go near the end of the list anyway). ``default``, ``all``,
#: ``best``, and ``all_Integral`` meta-hints should not be included in this
#: list, but ``_best`` and ``_Integral`` hints should be included.
allhints = (
"nth_algebraic",
"separable",
"1st_exact",
"1st_linear",
"Bernoulli",
"Riccati_special_minus2",
"1st_homogeneous_coeff_best",
"1st_homogeneous_coeff_subs_indep_div_dep",
"1st_homogeneous_coeff_subs_dep_div_indep",
"almost_linear",
"linear_coefficients",
"separable_reduced",
"1st_power_series",
"lie_group",
"nth_linear_constant_coeff_homogeneous",
"nth_linear_euler_eq_homogeneous",
"nth_linear_constant_coeff_undetermined_coefficients",
"nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients",
"nth_linear_constant_coeff_variation_of_parameters",
"nth_linear_euler_eq_nonhomogeneous_variation_of_parameters",
"Liouville",
"nth_order_reducible",
"2nd_power_series_ordinary",
"2nd_power_series_regular",
"nth_algebraic_Integral",
"separable_Integral",
"1st_exact_Integral",
"1st_linear_Integral",
"Bernoulli_Integral",
"1st_homogeneous_coeff_subs_indep_div_dep_Integral",
"1st_homogeneous_coeff_subs_dep_div_indep_Integral",
"almost_linear_Integral",
"linear_coefficients_Integral",
"separable_reduced_Integral",
"nth_linear_constant_coeff_variation_of_parameters_Integral",
"nth_linear_euler_eq_nonhomogeneous_variation_of_parameters_Integral",
"Liouville_Integral",
)
lie_heuristics = (
"abaco1_simple",
"abaco1_product",
"abaco2_similar",
"abaco2_unique_unknown",
"abaco2_unique_general",
"linear",
"function_sum",
"bivariate",
"chi"
)
def sub_func_doit(eq, func, new):
r"""
When replacing the func with something else, we usually want the
derivative evaluated, so this function helps in making that happen.
Examples
========
>>> from sympy import Derivative, symbols, Function
>>> from sympy.solvers.ode import sub_func_doit
>>> x, z = symbols('x, z')
>>> y = Function('y')
>>> sub_func_doit(3*Derivative(y(x), x) - 1, y(x), x)
2
>>> sub_func_doit(x*Derivative(y(x), x) - y(x)**2 + y(x), y(x),
... 1/(x*(z + 1/x)))
x*(-1/(x**2*(z + 1/x)) + 1/(x**3*(z + 1/x)**2)) + 1/(x*(z + 1/x))
...- 1/(x**2*(z + 1/x)**2)
"""
reps= {func: new}
for d in eq.atoms(Derivative):
if d.expr == func:
reps[d] = new.diff(*d.variable_count)
else:
reps[d] = d.xreplace({func: new}).doit(deep=False)
return eq.xreplace(reps)
def get_numbered_constants(eq, num=1, start=1, prefix='C'):
"""
Returns a list of constants that do not occur
in eq already.
"""
ncs = iter_numbered_constants(eq, start, prefix)
Cs = [next(ncs) for i in range(num)]
return (Cs[0] if num == 1 else tuple(Cs))
def iter_numbered_constants(eq, start=1, prefix='C'):
"""
Returns an iterator of constants that do not occur
in eq already.
"""
if isinstance(eq, Expr):
eq = [eq]
elif not iterable(eq):
raise ValueError("Expected Expr or iterable but got %s" % eq)
atom_set = set().union(*[i.free_symbols for i in eq])
func_set = set().union(*[i.atoms(Function) for i in eq])
if func_set:
atom_set |= {Symbol(str(f.func)) for f in func_set}
return numbered_symbols(start=start, prefix=prefix, exclude=atom_set)
def dsolve(eq, func=None, hint="default", simplify=True,
ics= None, xi=None, eta=None, x0=0, n=6, **kwargs):
r"""
Solves any (supported) kind of ordinary differential equation and
system of ordinary differential equations.
For single ordinary differential equation
=========================================
It is classified under this when number of equation in ``eq`` is one.
**Usage**
``dsolve(eq, f(x), hint)`` -> Solve ordinary differential equation
``eq`` for function ``f(x)``, using method ``hint``.
**Details**
``eq`` can be any supported ordinary differential equation (see the
:py:mod:`~sympy.solvers.ode` docstring for supported methods).
This can either be an :py:class:`~sympy.core.relational.Equality`,
or an expression, which is assumed to be equal to ``0``.
``f(x)`` is a function of one variable whose derivatives in that
variable make up the ordinary differential equation ``eq``. In
many cases it is not necessary to provide this; it will be
autodetected (and an error raised if it couldn't be detected).
``hint`` is the solving method that you want dsolve to use. Use
``classify_ode(eq, f(x))`` to get all of the possible hints for an
ODE. The default hint, ``default``, will use whatever hint is
returned first by :py:meth:`~sympy.solvers.ode.classify_ode`. See
Hints below for more options that you can use for hint.
``simplify`` enables simplification by
:py:meth:`~sympy.solvers.ode.odesimp`. See its docstring for more
information. Turn this off, for example, to disable solving of
solutions for ``func`` or simplification of arbitrary constants.
It will still integrate with this hint. Note that the solution may
contain more arbitrary constants than the order of the ODE with
this option enabled.
``xi`` and ``eta`` are the infinitesimal functions of an ordinary
differential equation. They are the infinitesimals of the Lie group
of point transformations for which the differential equation is
invariant. The user can specify values for the infinitesimals. If
nothing is specified, ``xi`` and ``eta`` are calculated using
:py:meth:`~sympy.solvers.ode.infinitesimals` with the help of various
heuristics.
``ics`` is the set of initial/boundary conditions for the differential equation.
It should be given in the form of ``{f(x0): x1, f(x).diff(x).subs(x, x2):
x3}`` and so on. For power series solutions, if no initial
conditions are specified ``f(0)`` is assumed to be ``C0`` and the power
series solution is calculated about 0.
``x0`` is the point about which the power series solution of a differential
equation is to be evaluated.
``n`` gives the exponent of the dependent variable up to which the power series
solution of a differential equation is to be evaluated.
**Hints**
Aside from the various solving methods, there are also some meta-hints
that you can pass to :py:meth:`~sympy.solvers.ode.dsolve`:
``default``:
This uses whatever hint is returned first by
:py:meth:`~sympy.solvers.ode.classify_ode`. This is the
default argument to :py:meth:`~sympy.solvers.ode.dsolve`.
``all``:
To make :py:meth:`~sympy.solvers.ode.dsolve` apply all
relevant classification hints, use ``dsolve(ODE, func,
hint="all")``. This will return a dictionary of
``hint:solution`` terms. If a hint causes dsolve to raise the
``NotImplementedError``, value of that hint's key will be the
exception object raised. The dictionary will also include
some special keys:
- ``order``: The order of the ODE. See also
:py:meth:`~sympy.solvers.deutils.ode_order` in
``deutils.py``.
- ``best``: The simplest hint; what would be returned by
``best`` below.
- ``best_hint``: The hint that would produce the solution
given by ``best``. If more than one hint produces the best
solution, the first one in the tuple returned by
:py:meth:`~sympy.solvers.ode.classify_ode` is chosen.
- ``default``: The solution that would be returned by default.
This is the one produced by the hint that appears first in
the tuple returned by
:py:meth:`~sympy.solvers.ode.classify_ode`.
``all_Integral``:
This is the same as ``all``, except if a hint also has a
corresponding ``_Integral`` hint, it only returns the
``_Integral`` hint. This is useful if ``all`` causes
:py:meth:`~sympy.solvers.ode.dsolve` to hang because of a
difficult or impossible integral. This meta-hint will also be
much faster than ``all``, because
:py:meth:`~sympy.core.expr.Expr.integrate` is an expensive
routine.
``best``:
To have :py:meth:`~sympy.solvers.ode.dsolve` try all methods
and return the simplest one. This takes into account whether
the solution is solvable in the function, whether it contains
any Integral classes (i.e. unevaluatable integrals), and
which one is the shortest in size.
See also the :py:meth:`~sympy.solvers.ode.classify_ode` docstring for
more info on hints, and the :py:mod:`~sympy.solvers.ode` docstring for
a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x # x is the independent variable
>>> f = Function("f")(x) # f is a function of x
>>> # f_ will be the derivative of f with respect to x
>>> f_ = Derivative(f, x)
- See ``test_ode.py`` for many tests, which serves also as a set of
examples for how to use :py:meth:`~sympy.solvers.ode.dsolve`.
- :py:meth:`~sympy.solvers.ode.dsolve` always returns an
:py:class:`~sympy.core.relational.Equality` class (except for the
case when the hint is ``all`` or ``all_Integral``). If possible, it
solves the solution explicitly for the function being solved for.
Otherwise, it returns an implicit solution.
- Arbitrary constants are symbols named ``C1``, ``C2``, and so on.
- Because all solutions should be mathematically equivalent, some
hints may return the exact same result for an ODE. Often, though,
two different hints will return the same solution formatted
differently. The two should be equivalent. Also note that sometimes
the values of the arbitrary constants in two different solutions may
not be the same, because one constant may have "absorbed" other
constants into it.
- Do ``help(ode.ode_<hintname>)`` to get help more information on a
specific hint, where ``<hintname>`` is the name of a hint without
``_Integral``.
For system of ordinary differential equations
=============================================
**Usage**
``dsolve(eq, func)`` -> Solve a system of ordinary differential
equations ``eq`` for ``func`` being list of functions including
`x(t)`, `y(t)`, `z(t)` where number of functions in the list depends
upon the number of equations provided in ``eq``.
**Details**
``eq`` can be any supported system of ordinary differential equations
This can either be an :py:class:`~sympy.core.relational.Equality`,
or an expression, which is assumed to be equal to ``0``.
``func`` holds ``x(t)`` and ``y(t)`` being functions of one variable which
together with some of their derivatives make up the system of ordinary
differential equation ``eq``. It is not necessary to provide this; it
will be autodetected (and an error raised if it couldn't be detected).
**Hints**
The hints are formed by parameters returned by classify_sysode, combining
them give hints name used later for forming method name.
Examples
========
>>> from sympy import Function, dsolve, Eq, Derivative, sin, cos, symbols
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(Derivative(f(x), x, x) + 9*f(x), f(x))
Eq(f(x), C1*sin(3*x) + C2*cos(3*x))
>>> eq = sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x)
>>> dsolve(eq, hint='1st_exact')
[Eq(f(x), -acos(C1/cos(x)) + 2*pi), Eq(f(x), acos(C1/cos(x)))]
>>> dsolve(eq, hint='almost_linear')
[Eq(f(x), -acos(C1/cos(x)) + 2*pi), Eq(f(x), acos(C1/cos(x)))]
>>> t = symbols('t')
>>> x, y = symbols('x, y', cls=Function)
>>> eq = (Eq(Derivative(x(t),t), 12*t*x(t) + 8*y(t)), Eq(Derivative(y(t),t), 21*x(t) + 7*t*y(t)))
>>> dsolve(eq)
[Eq(x(t), C1*x0(t) + C2*x0(t)*Integral(8*exp(Integral(7*t, t))*exp(Integral(12*t, t))/x0(t)**2, t)),
Eq(y(t), C1*y0(t) + C2*(y0(t)*Integral(8*exp(Integral(7*t, t))*exp(Integral(12*t, t))/x0(t)**2, t) +
exp(Integral(7*t, t))*exp(Integral(12*t, t))/x0(t)))]
>>> eq = (Eq(Derivative(x(t),t),x(t)*y(t)*sin(t)), Eq(Derivative(y(t),t),y(t)**2*sin(t)))
>>> dsolve(eq)
{Eq(x(t), -exp(C1)/(C2*exp(C1) - cos(t))), Eq(y(t), -1/(C1 - cos(t)))}
"""
if iterable(eq):
match = classify_sysode(eq, func)
eq = match['eq']
order = match['order']
func = match['func']
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
# keep highest order term coefficient positive
for i in range(len(eq)):
for func_ in func:
if isinstance(func_, list):
pass
else:
if eq[i].coeff(diff(func[i],t,ode_order(eq[i], func[i]))).is_negative:
eq[i] = -eq[i]
match['eq'] = eq
if len(set(order.values()))!=1:
raise ValueError("It solves only those systems of equations whose orders are equal")
match['order'] = list(order.values())[0]
def recur_len(l):
return sum(recur_len(item) if isinstance(item,list) else 1 for item in l)
if recur_len(func) != len(eq):
raise ValueError("dsolve() and classify_sysode() work with "
"number of functions being equal to number of equations")
if match['type_of_equation'] is None:
raise NotImplementedError
else:
if match['is_linear'] == True:
if match['no_of_equation'] > 3:
solvefunc = globals()['sysode_linear_neq_order%(order)s' % match]
else:
solvefunc = globals()['sysode_linear_%(no_of_equation)seq_order%(order)s' % match]
else:
solvefunc = globals()['sysode_nonlinear_%(no_of_equation)seq_order%(order)s' % match]
sols = solvefunc(match)
if ics:
constants = Tuple(*sols).free_symbols - Tuple(*eq).free_symbols
solved_constants = solve_ics(sols, func, constants, ics)
return [sol.subs(solved_constants) for sol in sols]
return sols
else:
given_hint = hint # hint given by the user
# See the docstring of _desolve for more details.
hints = _desolve(eq, func=func,
hint=hint, simplify=True, xi=xi, eta=eta, type='ode', ics=ics,
x0=x0, n=n, **kwargs)
eq = hints.pop('eq', eq)
all_ = hints.pop('all', False)
if all_:
retdict = {}
failed_hints = {}
gethints = classify_ode(eq, dict=True)
orderedhints = gethints['ordered_hints']
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint], simplify)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
retdict[hint] = rv
func = hints[hint]['func']
retdict['best'] = min(list(retdict.values()), key=lambda x:
ode_sol_simplicity(x, func, trysolving=not simplify))
if given_hint == 'best':
return retdict['best']
for i in orderedhints:
if retdict['best'] == retdict.get(i, None):
retdict['best_hint'] = i
break
retdict['default'] = gethints['default']
retdict['order'] = gethints['order']
retdict.update(failed_hints)
return retdict
else:
# The key 'hint' stores the hint needed to be solved for.
hint = hints['hint']
return _helper_simplify(eq, hint, hints, simplify, ics=ics)
def _helper_simplify(eq, hint, match, simplify=True, ics=None, **kwargs):
r"""
Helper function of dsolve that calls the respective
:py:mod:`~sympy.solvers.ode` functions to solve for the ordinary
differential equations. This minimizes the computation in calling
:py:meth:`~sympy.solvers.deutils._desolve` multiple times.
"""
r = match
if hint.endswith('_Integral'):
solvefunc = globals()['ode_' + hint[:-len('_Integral')]]
else:
solvefunc = globals()['ode_' + hint]
func = r['func']
order = r['order']
match = r[hint]
free = eq.free_symbols
cons = lambda s: s.free_symbols.difference(free)
if simplify:
# odesimp() will attempt to integrate, if necessary, apply constantsimp(),
# attempt to solve for func, and apply any other hint specific
# simplifications
sols = solvefunc(eq, func, order, match)
if isinstance(sols, Expr):
rv = odesimp(eq, sols, func, hint)
else:
rv = [odesimp(eq, s, func, hint) for s in sols]
else:
# We still want to integrate (you can disable it separately with the hint)
match['simplify'] = False # Some hints can take advantage of this option
rv = _handle_Integral(solvefunc(eq, func, order, match), func, hint)
if ics and not 'power_series' in hint:
if isinstance(rv, Expr):
solved_constants = solve_ics([rv], [r['func']], cons(rv), ics)
rv = rv.subs(solved_constants)
else:
rv1 = []
for s in rv:
try:
solved_constants = solve_ics([s], [r['func']], cons(s), ics)
except ValueError:
continue
rv1.append(s.subs(solved_constants))
if len(rv1) == 1:
return rv1[0]
rv = rv1
return rv
def solve_ics(sols, funcs, constants, ics):
"""
Solve for the constants given initial conditions
``sols`` is a list of solutions.
``funcs`` is a list of functions.
``constants`` is a list of constants.
``ics`` is the set of initial/boundary conditions for the differential
equation. It should be given in the form of ``{f(x0): x1,
f(x).diff(x).subs(x, x2): x3}`` and so on.
Returns a dictionary mapping constants to values.
``solution.subs(constants)`` will replace the constants in ``solution``.
Example
=======
>>> # From dsolve(f(x).diff(x) - f(x), f(x))
>>> from sympy import symbols, Eq, exp, Function
>>> from sympy.solvers.ode import solve_ics
>>> f = Function('f')
>>> x, C1 = symbols('x C1')
>>> sols = [Eq(f(x), C1*exp(x))]
>>> funcs = [f(x)]
>>> constants = [C1]
>>> ics = {f(0): 2}
>>> solved_constants = solve_ics(sols, funcs, constants, ics)
>>> solved_constants
{C1: 2}
>>> sols[0].subs(solved_constants)
Eq(f(x), 2*exp(x))
"""
# Assume ics are of the form f(x0): value or Subs(diff(f(x), x, n), (x,
# x0)): value (currently checked by classify_ode). To solve, replace x
# with x0, f(x0) with value, then solve for constants. For f^(n)(x0),
# differentiate the solution n times, so that f^(n)(x) appears.
x = funcs[0].args[0]
diff_sols = []
subs_sols = []
diff_variables = set()
for funcarg, value in ics.items():
if isinstance(funcarg, AppliedUndef):
x0 = funcarg.args[0]
matching_func = [f for f in funcs if f.func == funcarg.func][0]
S = sols
elif isinstance(funcarg, (Subs, Derivative)):
if isinstance(funcarg, Subs):
# Make sure it stays a subs. Otherwise subs below will produce
# a different looking term.
funcarg = funcarg.doit()
if isinstance(funcarg, Subs):
deriv = funcarg.expr
x0 = funcarg.point[0]
variables = funcarg.expr.variables
matching_func = deriv
elif isinstance(funcarg, Derivative):
deriv = funcarg
x0 = funcarg.variables[0]
variables = (x,)*len(funcarg.variables)
matching_func = deriv.subs(x0, x)
if variables not in diff_variables:
for sol in sols:
if sol.has(deriv.expr.func):
diff_sols.append(Eq(sol.lhs.diff(*variables), sol.rhs.diff(*variables)))
diff_variables.add(variables)
S = diff_sols
else:
raise NotImplementedError("Unrecognized initial condition")
for sol in S:
if sol.has(matching_func):
sol2 = sol
sol2 = sol2.subs(x, x0)
sol2 = sol2.subs(funcarg, value)
# This check is necessary because of issue #15724
if not isinstance(sol2, BooleanAtom) or not subs_sols:
subs_sols = [s for s in subs_sols if not isinstance(s, BooleanAtom)]
subs_sols.append(sol2)
# TODO: Use solveset here
try:
solved_constants = solve(subs_sols, constants, dict=True)
except NotImplementedError:
solved_constants = []
# XXX: We can't differentiate between the solution not existing because of
# invalid initial conditions, and not existing because solve is not smart
# enough. If we could use solveset, this might be improvable, but for now,
# we use NotImplementedError in this case.
if not solved_constants:
raise ValueError("Couldn't solve for initial conditions")
if solved_constants == True:
raise ValueError("Initial conditions did not produce any solutions for constants. Perhaps they are degenerate.")
if len(solved_constants) > 1:
raise NotImplementedError("Initial conditions produced too many solutions for constants")
return solved_constants[0]
def classify_ode(eq, func=None, dict=False, ics=None, **kwargs):
r"""
Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve`
classifications for an ODE.
The tuple is ordered so that first item is the classification that
:py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In
general, classifications at the near the beginning of the list will
produce better solutions faster than those near the end, thought there are
always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a
different classification, use ``dsolve(ODE, func,
hint=<classification>)``. See also the
:py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints
you can use.
If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will
return a dictionary of ``hint:match`` expression terms. This is intended
for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that
because dictionaries are ordered arbitrarily, this will most likely not be
in the same order as the tuple.
You can get help on different hints by executing
``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint
without ``_Integral``.
See :py:data:`~sympy.solvers.ode.allhints` or the
:py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints
that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`.
Notes
=====
These are remarks on hint names.
``_Integral``
If a classification has ``_Integral`` at the end, it will return the
expression with an unevaluated :py:class:`~sympy.integrals.Integral`
class in it. Note that a hint may do this anyway if
:py:meth:`~sympy.core.expr.Expr.integrate` cannot do the integral,
though just using an ``_Integral`` will do so much faster. Indeed, an
``_Integral`` hint will always be faster than its corresponding hint
without ``_Integral`` because
:py:meth:`~sympy.core.expr.Expr.integrate` is an expensive routine.
If :py:meth:`~sympy.solvers.ode.dsolve` hangs, it is probably because
:py:meth:`~sympy.core.expr.Expr.integrate` is hanging on a tough or
impossible integral. Try using an ``_Integral`` hint or
``all_Integral`` to get it return something.
Note that some hints do not have ``_Integral`` counterparts. This is
because :py:meth:`~sympy.solvers.ode.integrate` is not used in solving
the ODE for those method. For example, `n`\th order linear homogeneous
ODEs with constant coefficients do not require integration to solve,
so there is no ``nth_linear_homogeneous_constant_coeff_Integrate``
hint. You can easily evaluate any unevaluated
:py:class:`~sympy.integrals.Integral`\s in an expression by doing
``expr.doit()``.
Ordinals
Some hints contain an ordinal such as ``1st_linear``. This is to help
differentiate them from other hints, as well as from other methods
that may not be implemented yet. If a hint has ``nth`` in it, such as
the ``nth_linear`` hints, this means that the method used to applies
to ODEs of any order.
``indep`` and ``dep``
Some hints contain the words ``indep`` or ``dep``. These reference
the independent variable and the dependent function, respectively. For
example, if an ODE is in terms of `f(x)`, then ``indep`` will refer to
`x` and ``dep`` will refer to `f`.
``subs``
If a hints has the word ``subs`` in it, it means the the ODE is solved
by substituting the expression given after the word ``subs`` for a
single dummy variable. This is usually in terms of ``indep`` and
``dep`` as above. The substituted expression will be written only in
characters allowed for names of Python objects, meaning operators will
be spelled out. For example, ``indep``/``dep`` will be written as
``indep_div_dep``.
``coeff``
The word ``coeff`` in a hint refers to the coefficients of something
in the ODE, usually of the derivative terms. See the docstring for
the individual methods for more info (``help(ode)``). This is
contrast to ``coefficients``, as in ``undetermined_coefficients``,
which refers to the common name of a method.
``_best``
Methods that have more than one fundamental way to solve will have a
hint for each sub-method and a ``_best`` meta-classification. This
will evaluate all hints and return the best, using the same
considerations as the normal ``best`` meta-hint.
Examples
========
>>> from sympy import Function, classify_ode, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> classify_ode(Eq(f(x).diff(x), 0), f(x))
('nth_algebraic', 'separable', '1st_linear', '1st_homogeneous_coeff_best',
'1st_homogeneous_coeff_subs_indep_div_dep',
'1st_homogeneous_coeff_subs_dep_div_indep',
'1st_power_series', 'lie_group',
'nth_linear_constant_coeff_homogeneous',
'nth_linear_euler_eq_homogeneous', 'nth_algebraic_Integral',
'separable_Integral', '1st_linear_Integral',
'1st_homogeneous_coeff_subs_indep_div_dep_Integral',
'1st_homogeneous_coeff_subs_dep_div_indep_Integral')
>>> classify_ode(f(x).diff(x, 2) + 3*f(x).diff(x) + 2*f(x) - 4)
('nth_linear_constant_coeff_undetermined_coefficients',
'nth_linear_constant_coeff_variation_of_parameters',
'nth_linear_constant_coeff_variation_of_parameters_Integral')
"""
ics = sympify(ics)
prep = kwargs.pop('prep', True)
if func and len(func.args) != 1:
raise ValueError("dsolve() and classify_ode() only "
"work with functions of one variable, not %s" % func)
if prep or func is None:
eq, func_ = _preprocess(eq, func)
if func is None:
func = func_
x = func.args[0]
f = func.func
y = Dummy('y')
xi = kwargs.get('xi')
eta = kwargs.get('eta')
terms = kwargs.get('n')
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_ode(eq.lhs - eq.rhs, func, dict=dict, ics=ics, xi=xi,
n=terms, eta=eta, prep=False)
eq = eq.lhs
order = ode_order(eq, f(x))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {"order": order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
df = f(x).diff(x)
a = Wild('a', exclude=[f(x)])
b = Wild('b', exclude=[f(x)])
c = Wild('c', exclude=[f(x)])
d = Wild('d', exclude=[df, f(x).diff(x, 2)])
e = Wild('e', exclude=[df])
k = Wild('k', exclude=[df])
n = Wild('n', exclude=[x, f(x), df])
c1 = Wild('c1', exclude=[x])
a2 = Wild('a2', exclude=[x, f(x), df])
b2 = Wild('b2', exclude=[x, f(x), df])
c2 = Wild('c2', exclude=[x, f(x), df])
d2 = Wild('d2', exclude=[x, f(x), df])
a3 = Wild('a3', exclude=[f(x), df, f(x).diff(x, 2)])
b3 = Wild('b3', exclude=[f(x), df, f(x).diff(x, 2)])
c3 = Wild('c3', exclude=[f(x), df, f(x).diff(x, 2)])
r3 = {'xi': xi, 'eta': eta} # Used for the lie_group hint
boundary = {} # Used to extract initial conditions
C1 = Symbol("C1")
eq = expand(eq)
# Preprocessing to get the initial conditions out
if ics is not None:
for funcarg in ics:
# Separating derivatives
if isinstance(funcarg, (Subs, Derivative)):
# f(x).diff(x).subs(x, 0) is a Subs, but f(x).diff(x).subs(x,
# y) is a Derivative
if isinstance(funcarg, Subs):
deriv = funcarg.expr
old = funcarg.variables[0]
new = funcarg.point[0]
elif isinstance(funcarg, Derivative):
deriv = funcarg
# No information on this. Just assume it was x
old = x
new = funcarg.variables[0]
if (isinstance(deriv, Derivative) and isinstance(deriv.args[0],
AppliedUndef) and deriv.args[0].func == f and
len(deriv.args[0].args) == 1 and old == x and not
new.has(x) and all(i == deriv.variables[0] for i in
deriv.variables) and not ics[funcarg].has(f)):
dorder = ode_order(deriv, x)
temp = 'f' + str(dorder)
boundary.update({temp: new, temp + 'val': ics[funcarg]})
else:
raise ValueError("Enter valid boundary conditions for Derivatives")
# Separating functions
elif isinstance(funcarg, AppliedUndef):
if (funcarg.func == f and len(funcarg.args) == 1 and
not funcarg.args[0].has(x) and not ics[funcarg].has(f)):
boundary.update({'f0': funcarg.args[0], 'f0val': ics[funcarg]})
else:
raise ValueError("Enter valid boundary conditions for Function")
else:
raise ValueError("Enter boundary conditions of the form ics={f(point}: value, f(x).diff(x, order).subs(x, point): value}")
# Precondition to try remove f(x) from highest order derivative
reduced_eq = None
if eq.is_Add:
deriv_coef = eq.coeff(f(x).diff(x, order))
if deriv_coef not in (1, 0):
r = deriv_coef.match(a*f(x)**c1)
if r and r[c1]:
den = f(x)**r[c1]
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
## Linear case: a(x)*y'+b(x)*y+c(x) == 0
if eq.is_Add:
ind, dep = reduced_eq.as_independent(f)
else:
u = Dummy('u')
ind, dep = (reduced_eq + u).as_independent(f)
ind, dep = [tmp.subs(u, 0) for tmp in [ind, dep]]
r = {a: dep.coeff(df),
b: dep.coeff(f(x)),
c: ind}
# double check f[a] since the preconditioning may have failed
if not r[a].has(f) and not r[b].has(f) and (
r[a]*df + r[b]*f(x) + r[c]).expand() - reduced_eq == 0:
r['a'] = a
r['b'] = b
r['c'] = c
matching_hints["1st_linear"] = r
matching_hints["1st_linear_Integral"] = r
## Bernoulli case: a(x)*y'+b(x)*y+c(x)*y**n == 0
r = collect(
reduced_eq, f(x), exact=True).match(a*df + b*f(x) + c*f(x)**n)
if r and r[c] != 0 and r[n] != 1: # See issue 4676
r['a'] = a
r['b'] = b
r['c'] = c
r['n'] = n
matching_hints["Bernoulli"] = r
matching_hints["Bernoulli_Integral"] = r
## Riccati special n == -2 case: a2*y'+b2*y**2+c2*y/x+d2/x**2 == 0
r = collect(reduced_eq,
f(x), exact=True).match(a2*df + b2*f(x)**2 + c2*f(x)/x + d2/x**2)
if r and r[b2] != 0 and (r[c2] != 0 or r[d2] != 0):
r['a2'] = a2
r['b2'] = b2
r['c2'] = c2
r['d2'] = d2
matching_hints["Riccati_special_minus2"] = r
# NON-REDUCED FORM OF EQUATION matches
r = collect(eq, df, exact=True).match(d + e * df)
if r:
r['d'] = d
r['e'] = e
r['y'] = y
r[d] = r[d].subs(f(x), y)
r[e] = r[e].subs(f(x), y)
# FIRST ORDER POWER SERIES WHICH NEEDS INITIAL CONDITIONS
# TODO: Hint first order series should match only if d/e is analytic.
# For now, only d/e and (d/e).diff(arg) is checked for existence at
# at a given point.
# This is currently done internally in ode_1st_power_series.
point = boundary.get('f0', 0)
value = boundary.get('f0val', C1)
check = cancel(r[d]/r[e])
check1 = check.subs({x: point, y: value})
if not check1.has(oo) and not check1.has(zoo) and \
not check1.has(NaN) and not check1.has(-oo):
check2 = (check1.diff(x)).subs({x: point, y: value})
if not check2.has(oo) and not check2.has(zoo) and \
not check2.has(NaN) and not check2.has(-oo):
rseries = r.copy()
rseries.update({'terms': terms, 'f0': point, 'f0val': value})
matching_hints["1st_power_series"] = rseries
r3.update(r)
## Exact Differential Equation: P(x, y) + Q(x, y)*y' = 0 where
# dP/dy == dQ/dx
try:
if r[d] != 0:
numerator = simplify(r[d].diff(y) - r[e].diff(x))
# The following few conditions try to convert a non-exact
# differential equation into an exact one.
# References : Differential equations with applications
# and historical notes - George E. Simmons
if numerator:
# If (dP/dy - dQ/dx) / Q = f(x)
# then exp(integral(f(x))*equation becomes exact
factor = simplify(numerator/r[e])
variables = factor.free_symbols
if len(variables) == 1 and x == variables.pop():
factor = exp(Integral(factor).doit())
r[d] *= factor
r[e] *= factor
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
else:
# If (dP/dy - dQ/dx) / -P = f(y)
# then exp(integral(f(y))*equation becomes exact
factor = simplify(-numerator/r[d])
variables = factor.free_symbols
if len(variables) == 1 and y == variables.pop():
factor = exp(Integral(factor).doit())
r[d] *= factor
r[e] *= factor
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
else:
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
except NotImplementedError:
# Differentiating the coefficients might fail because of things
# like f(2*x).diff(x). See issue 4624 and issue 4719.
pass
# Any first order ODE can be ideally solved by the Lie Group
# method
matching_hints["lie_group"] = r3
# This match is used for several cases below; we now collect on
# f(x) so the matching works.
r = collect(reduced_eq, df, exact=True).match(d + e*df)
if r:
# Using r[d] and r[e] without any modification for hints
# linear-coefficients and separable-reduced.
num, den = r[d], r[e] # ODE = d/e + df
r['d'] = d
r['e'] = e
r['y'] = y
r[d] = num.subs(f(x), y)
r[e] = den.subs(f(x), y)
## Separable Case: y' == P(y)*Q(x)
r[d] = separatevars(r[d])
r[e] = separatevars(r[e])
# m1[coeff]*m1[x]*m1[y] + m2[coeff]*m2[x]*m2[y]*y'
m1 = separatevars(r[d], dict=True, symbols=(x, y))
m2 = separatevars(r[e], dict=True, symbols=(x, y))
if m1 and m2:
r1 = {'m1': m1, 'm2': m2, 'y': y}
matching_hints["separable"] = r1
matching_hints["separable_Integral"] = r1
## First order equation with homogeneous coefficients:
# dy/dx == F(y/x) or dy/dx == F(x/y)
ordera = homogeneous_order(r[d], x, y)
if ordera is not None:
orderb = homogeneous_order(r[e], x, y)
if ordera == orderb:
# u1=y/x and u2=x/y
u1 = Dummy('u1')
u2 = Dummy('u2')
s = "1st_homogeneous_coeff_subs"
s1 = s + "_dep_div_indep"
s2 = s + "_indep_div_dep"
if simplify((r[d] + u1*r[e]).subs({x: 1, y: u1})) != 0:
matching_hints[s1] = r
matching_hints[s1 + "_Integral"] = r
if simplify((r[e] + u2*r[d]).subs({x: u2, y: 1})) != 0:
matching_hints[s2] = r
matching_hints[s2 + "_Integral"] = r
if s1 in matching_hints and s2 in matching_hints:
matching_hints["1st_homogeneous_coeff_best"] = r
## Linear coefficients of the form
# y'+ F((a*x + b*y + c)/(a'*x + b'y + c')) = 0
# that can be reduced to homogeneous form.
F = num/den
params = _linear_coeff_match(F, func)
if params:
xarg, yarg = params
u = Dummy('u')
t = Dummy('t')
# Dummy substitution for df and f(x).
dummy_eq = reduced_eq.subs(((df, t), (f(x), u)))
reps = ((x, x + xarg), (u, u + yarg), (t, df), (u, f(x)))
dummy_eq = simplify(dummy_eq.subs(reps))
# get the re-cast values for e and d
r2 = collect(expand(dummy_eq), [df, f(x)]).match(e*df + d)
if r2:
orderd = homogeneous_order(r2[d], x, f(x))
if orderd is not None:
ordere = homogeneous_order(r2[e], x, f(x))
if orderd == ordere:
# Match arguments are passed in such a way that it
# is coherent with the already existing homogeneous
# functions.
r2[d] = r2[d].subs(f(x), y)
r2[e] = r2[e].subs(f(x), y)
r2.update({'xarg': xarg, 'yarg': yarg,
'd': d, 'e': e, 'y': y})
matching_hints["linear_coefficients"] = r2
matching_hints["linear_coefficients_Integral"] = r2
## Equation of the form y' + (y/x)*H(x^n*y) = 0
# that can be reduced to separable form
factor = simplify(x/f(x)*num/den)
# Try representing factor in terms of x^n*y
# where n is lowest power of x in factor;
# first remove terms like sqrt(2)*3 from factor.atoms(Mul)
u = None
for mul in ordered(factor.atoms(Mul)):
if mul.has(x):
_, u = mul.as_independent(x, f(x))
break
if u and u.has(f(x)):
h = x**(degree(Poly(u.subs(f(x), y), gen=x)))*f(x)
p = Wild('p')
if (u/h == 1) or ((u/h).simplify().match(x**p)):
t = Dummy('t')
r2 = {'t': t}
xpart, ypart = u.as_independent(f(x))
test = factor.subs(((u, t), (1/u, 1/t)))
free = test.free_symbols
if len(free) == 1 and free.pop() == t:
r2.update({'power': xpart.as_base_exp()[1], 'u': test})
matching_hints["separable_reduced"] = r2
matching_hints["separable_reduced_Integral"] = r2
## Almost-linear equation of the form f(x)*g(y)*y' + k(x)*l(y) + m(x) = 0
r = collect(eq, [df, f(x)]).match(e*df + d)
if r:
r2 = r.copy()
r2[c] = S.Zero
if r2[d].is_Add:
# Separate the terms having f(x) to r[d] and
# remaining to r[c]
no_f, r2[d] = r2[d].as_independent(f(x))
r2[c] += no_f
factor = simplify(r2[d].diff(f(x))/r[e])
if factor and not factor.has(f(x)):
r2[d] = factor_terms(r2[d])
u = r2[d].as_independent(f(x), as_Add=False)[1]
r2.update({'a': e, 'b': d, 'c': c, 'u': u})
r2[d] /= u
r2[e] /= u.diff(f(x))
matching_hints["almost_linear"] = r2
matching_hints["almost_linear_Integral"] = r2
elif order == 2:
# Liouville ODE in the form
# f(x).diff(x, 2) + g(f(x))*(f(x).diff(x))**2 + h(x)*f(x).diff(x)
# See Goldstein and Braun, "Advanced Methods for the Solution of
# Differential Equations", pg. 98
s = d*f(x).diff(x, 2) + e*df**2 + k*df
r = reduced_eq.match(s)
if r and r[d] != 0:
y = Dummy('y')
g = simplify(r[e]/r[d]).subs(f(x), y)
h = simplify(r[k]/r[d]).subs(f(x), y)
if y in h.free_symbols or x in g.free_symbols:
pass
else:
r = {'g': g, 'h': h, 'y': y}
matching_hints["Liouville"] = r
matching_hints["Liouville_Integral"] = r
# Homogeneous second order differential equation of the form
# a3*f(x).diff(x, 2) + b3*f(x).diff(x) + c3, where
# for simplicity, a3, b3 and c3 are assumed to be polynomials.
# It has a definite power series solution at point x0 if, b3/a3 and c3/a3
# are analytic at x0.
deq = a3*(f(x).diff(x, 2)) + b3*df + c3*f(x)
r = collect(reduced_eq,
[f(x).diff(x, 2), f(x).diff(x), f(x)]).match(deq)
ordinary = False
if r and r[a3] != 0:
if all([r[key].is_polynomial() for key in r]):
p = cancel(r[b3]/r[a3]) # Used below
q = cancel(r[c3]/r[a3]) # Used below
point = kwargs.get('x0', 0)
check = p.subs(x, point)
if not check.has(oo) and not check.has(NaN) and \
not check.has(zoo) and not check.has(-oo):
check = q.subs(x, point)
if not check.has(oo) and not check.has(NaN) and \
not check.has(zoo) and not check.has(-oo):
ordinary = True
r.update({'a3': a3, 'b3': b3, 'c3': c3, 'x0': point, 'terms': terms})
matching_hints["2nd_power_series_ordinary"] = r
# Checking if the differential equation has a regular singular point
# at x0. It has a regular singular point at x0, if (b3/a3)*(x - x0)
# and (c3/a3)*((x - x0)**2) are analytic at x0.
if not ordinary:
p = cancel((x - point)*p)
check = p.subs(x, point)
if not check.has(oo) and not check.has(NaN) and \
not check.has(zoo) and not check.has(-oo):
q = cancel(((x - point)**2)*q)
check = q.subs(x, point)
if not check.has(oo) and not check.has(NaN) and \
not check.has(zoo) and not check.has(-oo):
coeff_dict = {'p': p, 'q': q, 'x0': point, 'terms': terms}
matching_hints["2nd_power_series_regular"] = coeff_dict
if order > 0:
# Any ODE that can be solved with a substitution and
# repeated integration e.g.:
# `d^2/dx^2(y) + x*d/dx(y) = constant
#f'(x) must be finite for this to work
r = _nth_order_reducible_match(reduced_eq, func)
if r:
matching_hints['nth_order_reducible'] = r
# Any ODE that can be solved with a combination of algebra and
# integrals e.g.:
# d^3/dx^3(x y) = F(x)
r = _nth_algebraic_match(reduced_eq, func)
if r['solutions']:
matching_hints['nth_algebraic'] = r
matching_hints['nth_algebraic_Integral'] = r
# nth order linear ODE
# a_n(x)y^(n) + ... + a_1(x)y' + a_0(x)y = F(x) = b
r = _nth_linear_match(reduced_eq, func, order)
# Constant coefficient case (a_i is constant for all i)
if r and not any(r[i].has(x) for i in r if i >= 0):
# Inhomogeneous case: F(x) is not identically 0
if r[-1]:
undetcoeff = _undetermined_coefficients_match(r[-1], x)
s = "nth_linear_constant_coeff_variation_of_parameters"
matching_hints[s] = r
matching_hints[s + "_Integral"] = r
if undetcoeff['test']:
r['trialset'] = undetcoeff['trialset']
matching_hints[
"nth_linear_constant_coeff_undetermined_coefficients"
] = r
# Homogeneous case: F(x) is identically 0
else:
matching_hints["nth_linear_constant_coeff_homogeneous"] = r
# nth order Euler equation a_n*x**n*y^(n) + ... + a_1*x*y' + a_0*y = F(x)
#In case of Homogeneous euler equation F(x) = 0
def _test_term(coeff, order):
r"""
Linear Euler ODEs have the form K*x**order*diff(y(x),x,order) = F(x),
where K is independent of x and y(x), order>= 0.
So we need to check that for each term, coeff == K*x**order from
some K. We have a few cases, since coeff may have several
different types.
"""
if order < 0:
raise ValueError("order should be greater than 0")
if coeff == 0:
return True
if order == 0:
if x in coeff.free_symbols:
return False
return True
if coeff.is_Mul:
if coeff.has(f(x)):
return False
return x**order in coeff.args
elif coeff.is_Pow:
return coeff.as_base_exp() == (x, order)
elif order == 1:
return x == coeff
return False
# Find coefficient for highest derivative, multiply coefficients to
# bring the equation into Euler form if possible
r_rescaled = None
if r is not None:
coeff = r[order]
factor = x**order / coeff
r_rescaled = {i: factor*r[i] for i in r}
if r_rescaled and not any(not _test_term(r_rescaled[i], i) for i in
r_rescaled if i != 'trialset' and i >= 0):
if not r_rescaled[-1]:
matching_hints["nth_linear_euler_eq_homogeneous"] = r_rescaled
else:
matching_hints["nth_linear_euler_eq_nonhomogeneous_variation_of_parameters"] = r_rescaled
matching_hints["nth_linear_euler_eq_nonhomogeneous_variation_of_parameters_Integral"] = r_rescaled
e, re = posify(r_rescaled[-1].subs(x, exp(x)))
undetcoeff = _undetermined_coefficients_match(e.subs(re), x)
if undetcoeff['test']:
r_rescaled['trialset'] = undetcoeff['trialset']
matching_hints["nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients"] = r_rescaled
# Order keys based on allhints.
retlist = [i for i in allhints if i in matching_hints]
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for dsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = retlist[0] if retlist else None
matching_hints["ordered_hints"] = tuple(retlist)
return matching_hints
else:
return tuple(retlist)
def classify_sysode(eq, funcs=None, **kwargs):
r"""
Returns a dictionary of parameter names and values that define the system
of ordinary differential equations in ``eq``.
The parameters are further used in
:py:meth:`~sympy.solvers.ode.dsolve` for solving that system.
The parameter names and values are:
'is_linear' (boolean), which tells whether the given system is linear.
Note that "linear" here refers to the operator: terms such as ``x*diff(x,t)`` are
nonlinear, whereas terms like ``sin(t)*diff(x,t)`` are still linear operators.
'func' (list) contains the :py:class:`~sympy.core.function.Function`s that
appear with a derivative in the ODE, i.e. those that we are trying to solve
the ODE for.
'order' (dict) with the maximum derivative for each element of the 'func'
parameter.
'func_coeff' (dict) with the coefficient for each triple ``(equation number,
function, order)```. The coefficients are those subexpressions that do not
appear in 'func', and hence can be considered constant for purposes of ODE
solving.
'eq' (list) with the equations from ``eq``, sympified and transformed into
expressions (we are solving for these expressions to be zero).
'no_of_equations' (int) is the number of equations (same as ``len(eq)``).
'type_of_equation' (string) is an internal classification of the type of
ODE.
References
==========
-http://eqworld.ipmnet.ru/en/solutions/sysode/sode-toc1.htm
-A. D. Polyanin and A. V. Manzhirov, Handbook of Mathematics for Engineers and Scientists
Examples
========
>>> from sympy import Function, Eq, symbols, diff
>>> from sympy.solvers.ode import classify_sysode
>>> from sympy.abc import t
>>> f, x, y = symbols('f, x, y', cls=Function)
>>> k, l, m, n = symbols('k, l, m, n', Integer=True)
>>> x1 = diff(x(t), t) ; y1 = diff(y(t), t)
>>> x2 = diff(x(t), t, t) ; y2 = diff(y(t), t, t)
>>> eq = (Eq(5*x1, 12*x(t) - 6*y(t)), Eq(2*y1, 11*x(t) + 3*y(t)))
>>> classify_sysode(eq)
{'eq': [-12*x(t) + 6*y(t) + 5*Derivative(x(t), t), -11*x(t) - 3*y(t) + 2*Derivative(y(t), t)],
'func': [x(t), y(t)], 'func_coeff': {(0, x(t), 0): -12, (0, x(t), 1): 5, (0, y(t), 0): 6,
(0, y(t), 1): 0, (1, x(t), 0): -11, (1, x(t), 1): 0, (1, y(t), 0): -3, (1, y(t), 1): 2},
'is_linear': True, 'no_of_equation': 2, 'order': {x(t): 1, y(t): 1}, 'type_of_equation': 'type1'}
>>> eq = (Eq(diff(x(t),t), 5*t*x(t) + t**2*y(t)), Eq(diff(y(t),t), -t**2*x(t) + 5*t*y(t)))
>>> classify_sysode(eq)
{'eq': [-t**2*y(t) - 5*t*x(t) + Derivative(x(t), t), t**2*x(t) - 5*t*y(t) + Derivative(y(t), t)],
'func': [x(t), y(t)], 'func_coeff': {(0, x(t), 0): -5*t, (0, x(t), 1): 1, (0, y(t), 0): -t**2,
(0, y(t), 1): 0, (1, x(t), 0): t**2, (1, x(t), 1): 0, (1, y(t), 0): -5*t, (1, y(t), 1): 1},
'is_linear': True, 'no_of_equation': 2, 'order': {x(t): 1, y(t): 1}, 'type_of_equation': 'type4'}
"""
# Sympify equations and convert iterables of equations into
# a list of equations
def _sympify(eq):
return list(map(sympify, eq if iterable(eq) else [eq]))
eq, funcs = (_sympify(w) for w in [eq, funcs])
for i, fi in enumerate(eq):
if isinstance(fi, Equality):
eq[i] = fi.lhs - fi.rhs
matching_hints = {"no_of_equation":i+1}
matching_hints['eq'] = eq
if i==0:
raise ValueError("classify_sysode() works for systems of ODEs. "
"For scalar ODEs, classify_ode should be used")
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
# find all the functions if not given
order = dict()
if funcs==[None]:
funcs = []
for eqs in eq:
derivs = eqs.atoms(Derivative)
func = set().union(*[d.atoms(AppliedUndef) for d in derivs])
for func_ in func:
funcs.append(func_)
funcs = list(set(funcs))
if len(funcs) != len(eq):
raise ValueError("Number of functions given is not equal to the number of equations %s" % funcs)
func_dict = dict()
for func in funcs:
if not order.get(func, False):
max_order = 0
for i, eqs_ in enumerate(eq):
order_ = ode_order(eqs_,func)
if max_order < order_:
max_order = order_
eq_no = i
if eq_no in func_dict:
list_func = []
list_func.append(func_dict[eq_no])
list_func.append(func)
func_dict[eq_no] = list_func
else:
func_dict[eq_no] = func
order[func] = max_order
funcs = [func_dict[i] for i in range(len(func_dict))]
matching_hints['func'] = funcs
for func in funcs:
if isinstance(func, list):
for func_elem in func:
if len(func_elem.args) != 1:
raise ValueError("dsolve() and classify_sysode() work with "
"functions of one variable only, not %s" % func)
else:
if func and len(func.args) != 1:
raise ValueError("dsolve() and classify_sysode() work with "
"functions of one variable only, not %s" % func)
# find the order of all equation in system of odes
matching_hints["order"] = order
# find coefficients of terms f(t), diff(f(t),t) and higher derivatives
# and similarly for other functions g(t), diff(g(t),t) in all equations.
# Here j denotes the equation number, funcs[l] denotes the function about
# which we are talking about and k denotes the order of function funcs[l]
# whose coefficient we are calculating.
def linearity_check(eqs, j, func, is_linear_):
for k in range(order[func] + 1):
func_coef[j, func, k] = collect(eqs.expand(), [diff(func, t, k)]).coeff(diff(func, t, k))
if is_linear_ == True:
if func_coef[j, func, k] == 0:
if k == 0:
coef = eqs.as_independent(func, as_Add=True)[1]
for xr in range(1, ode_order(eqs,func) + 1):
coef -= eqs.as_independent(diff(func, t, xr), as_Add=True)[1]
if coef != 0:
is_linear_ = False
else:
if eqs.as_independent(diff(func, t, k), as_Add=True)[1]:
is_linear_ = False
else:
for func_ in funcs:
if isinstance(func_, list):
for elem_func_ in func_:
dep = func_coef[j, func, k].as_independent(elem_func_, as_Add=True)[1]
if dep != 0:
is_linear_ = False
else:
dep = func_coef[j, func, k].as_independent(func_, as_Add=True)[1]
if dep != 0:
is_linear_ = False
return is_linear_
func_coef = {}
is_linear = True
for j, eqs in enumerate(eq):
for func in funcs:
if isinstance(func, list):
for func_elem in func:
is_linear = linearity_check(eqs, j, func_elem, is_linear)
else:
is_linear = linearity_check(eqs, j, func, is_linear)
matching_hints['func_coeff'] = func_coef
matching_hints['is_linear'] = is_linear
if len(set(order.values())) == 1:
order_eq = list(matching_hints['order'].values())[0]
if matching_hints['is_linear'] == True:
if matching_hints['no_of_equation'] == 2:
if order_eq == 1:
type_of_equation = check_linear_2eq_order1(eq, funcs, func_coef)
elif order_eq == 2:
type_of_equation = check_linear_2eq_order2(eq, funcs, func_coef)
else:
type_of_equation = None
elif matching_hints['no_of_equation'] == 3:
if order_eq == 1:
type_of_equation = check_linear_3eq_order1(eq, funcs, func_coef)
if type_of_equation is None:
type_of_equation = check_linear_neq_order1(eq, funcs, func_coef)
else:
type_of_equation = None
else:
if order_eq == 1:
type_of_equation = check_linear_neq_order1(eq, funcs, func_coef)
else:
type_of_equation = None
else:
if matching_hints['no_of_equation'] == 2:
if order_eq == 1:
type_of_equation = check_nonlinear_2eq_order1(eq, funcs, func_coef)
else:
type_of_equation = None
elif matching_hints['no_of_equation'] == 3:
if order_eq == 1:
type_of_equation = check_nonlinear_3eq_order1(eq, funcs, func_coef)
else:
type_of_equation = None
else:
type_of_equation = None
else:
type_of_equation = None
matching_hints['type_of_equation'] = type_of_equation
return matching_hints
def check_linear_2eq_order1(eq, func, func_coef):
x = func[0].func
y = func[1].func
fc = func_coef
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
r = dict()
# for equations Eq(a1*diff(x(t),t), b1*x(t) + c1*y(t) + d1)
# and Eq(a2*diff(y(t),t), b2*x(t) + c2*y(t) + d2)
r['a1'] = fc[0,x(t),1] ; r['a2'] = fc[1,y(t),1]
r['b1'] = -fc[0,x(t),0]/fc[0,x(t),1] ; r['b2'] = -fc[1,x(t),0]/fc[1,y(t),1]
r['c1'] = -fc[0,y(t),0]/fc[0,x(t),1] ; r['c2'] = -fc[1,y(t),0]/fc[1,y(t),1]
forcing = [S(0),S(0)]
for i in range(2):
for j in Add.make_args(eq[i]):
if not j.has(x(t), y(t)):
forcing[i] += j
if not (forcing[0].has(t) or forcing[1].has(t)):
# We can handle homogeneous case and simple constant forcings
r['d1'] = forcing[0]
r['d2'] = forcing[1]
else:
# Issue #9244: nonhomogeneous linear systems are not supported
return None
# Conditions to check for type 6 whose equations are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and
# Eq(diff(y(t),t), a*[f(t) + a*h(t)]x(t) + a*[g(t) - h(t)]*y(t))
p = 0
q = 0
p1 = cancel(r['b2']/(cancel(r['b2']/r['c2']).as_numer_denom()[0]))
p2 = cancel(r['b1']/(cancel(r['b1']/r['c1']).as_numer_denom()[0]))
for n, i in enumerate([p1, p2]):
for j in Mul.make_args(collect_const(i)):
if not j.has(t):
q = j
if q and n==0:
if ((r['b2']/j - r['b1'])/(r['c1'] - r['c2']/j)) == j:
p = 1
elif q and n==1:
if ((r['b1']/j - r['b2'])/(r['c2'] - r['c1']/j)) == j:
p = 2
# End of condition for type 6
if r['d1']!=0 or r['d2']!=0:
if not r['d1'].has(t) and not r['d2'].has(t):
if all(not r[k].has(t) for k in 'a1 a2 b1 b2 c1 c2'.split()):
# Equations for type 2 are Eq(a1*diff(x(t),t),b1*x(t)+c1*y(t)+d1) and Eq(a2*diff(y(t),t),b2*x(t)+c2*y(t)+d2)
return "type2"
else:
return None
else:
if all(not r[k].has(t) for k in 'a1 a2 b1 b2 c1 c2'.split()):
# Equations for type 1 are Eq(a1*diff(x(t),t),b1*x(t)+c1*y(t)) and Eq(a2*diff(y(t),t),b2*x(t)+c2*y(t))
return "type1"
else:
r['b1'] = r['b1']/r['a1'] ; r['b2'] = r['b2']/r['a2']
r['c1'] = r['c1']/r['a1'] ; r['c2'] = r['c2']/r['a2']
if (r['b1'] == r['c2']) and (r['c1'] == r['b2']):
# Equation for type 3 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), g(t)*x(t) + f(t)*y(t))
return "type3"
elif (r['b1'] == r['c2']) and (r['c1'] == -r['b2']) or (r['b1'] == -r['c2']) and (r['c1'] == r['b2']):
# Equation for type 4 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), -g(t)*x(t) + f(t)*y(t))
return "type4"
elif (not cancel(r['b2']/r['c1']).has(t) and not cancel((r['c2']-r['b1'])/r['c1']).has(t)) \
or (not cancel(r['b1']/r['c2']).has(t) and not cancel((r['c1']-r['b2'])/r['c2']).has(t)):
# Equations for type 5 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), a*g(t)*x(t) + [f(t) + b*g(t)]*y(t)
return "type5"
elif p:
return "type6"
else:
# Equations for type 7 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), h(t)*x(t) + p(t)*y(t))
return "type7"
def check_linear_2eq_order2(eq, func, func_coef):
x = func[0].func
y = func[1].func
fc = func_coef
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
r = dict()
a = Wild('a', exclude=[1/t])
b = Wild('b', exclude=[1/t**2])
u = Wild('u', exclude=[t, t**2])
v = Wild('v', exclude=[t, t**2])
w = Wild('w', exclude=[t, t**2])
p = Wild('p', exclude=[t, t**2])
r['a1'] = fc[0,x(t),2] ; r['a2'] = fc[1,y(t),2]
r['b1'] = fc[0,x(t),1] ; r['b2'] = fc[1,x(t),1]
r['c1'] = fc[0,y(t),1] ; r['c2'] = fc[1,y(t),1]
r['d1'] = fc[0,x(t),0] ; r['d2'] = fc[1,x(t),0]
r['e1'] = fc[0,y(t),0] ; r['e2'] = fc[1,y(t),0]
const = [S(0), S(0)]
for i in range(2):
for j in Add.make_args(eq[i]):
if not (j.has(x(t)) or j.has(y(t))):
const[i] += j
r['f1'] = const[0]
r['f2'] = const[1]
if r['f1']!=0 or r['f2']!=0:
if all(not r[k].has(t) for k in 'a1 a2 d1 d2 e1 e2 f1 f2'.split()) \
and r['b1']==r['c1']==r['b2']==r['c2']==0:
return "type2"
elif all(not r[k].has(t) for k in 'a1 a2 b1 b2 c1 c2 d1 d2 e1 e1'.split()):
p = [S(0), S(0)] ; q = [S(0), S(0)]
for n, e in enumerate([r['f1'], r['f2']]):
if e.has(t):
tpart = e.as_independent(t, Mul)[1]
for i in Mul.make_args(tpart):
if i.has(exp):
b, e = i.as_base_exp()
co = e.coeff(t)
if co and not co.has(t) and co.has(I):
p[n] = 1
else:
q[n] = 1
else:
q[n] = 1
else:
q[n] = 1
if p[0]==1 and p[1]==1 and q[0]==0 and q[1]==0:
return "type4"
else:
return None
else:
return None
else:
if r['b1']==r['b2']==r['c1']==r['c2']==0 and all(not r[k].has(t) \
for k in 'a1 a2 d1 d2 e1 e2'.split()):
return "type1"
elif r['b1']==r['e1']==r['c2']==r['d2']==0 and all(not r[k].has(t) \
for k in 'a1 a2 b2 c1 d1 e2'.split()) and r['c1'] == -r['b2'] and \
r['d1'] == r['e2']:
return "type3"
elif cancel(-r['b2']/r['d2'])==t and cancel(-r['c1']/r['e1'])==t and not \
(r['d2']/r['a2']).has(t) and not (r['e1']/r['a1']).has(t) and \
r['b1']==r['d1']==r['c2']==r['e2']==0:
return "type5"
elif ((r['a1']/r['d1']).expand()).match((p*(u*t**2+v*t+w)**2).expand()) and not \
(cancel(r['a1']*r['d2']/(r['a2']*r['d1']))).has(t) and not (r['d1']/r['e1']).has(t) and not \
(r['d2']/r['e2']).has(t) and r['b1'] == r['b2'] == r['c1'] == r['c2'] == 0:
return "type10"
elif not cancel(r['d1']/r['e1']).has(t) and not cancel(r['d2']/r['e2']).has(t) and not \
cancel(r['d1']*r['a2']/(r['d2']*r['a1'])).has(t) and r['b1']==r['b2']==r['c1']==r['c2']==0:
return "type6"
elif not cancel(r['b1']/r['c1']).has(t) and not cancel(r['b2']/r['c2']).has(t) and not \
cancel(r['b1']*r['a2']/(r['b2']*r['a1'])).has(t) and r['d1']==r['d2']==r['e1']==r['e2']==0:
return "type7"
elif cancel(-r['b2']/r['d2'])==t and cancel(-r['c1']/r['e1'])==t and not \
cancel(r['e1']*r['a2']/(r['d2']*r['a1'])).has(t) and r['e1'].has(t) \
and r['b1']==r['d1']==r['c2']==r['e2']==0:
return "type8"
elif (r['b1']/r['a1']).match(a/t) and (r['b2']/r['a2']).match(a/t) and not \
(r['b1']/r['c1']).has(t) and not (r['b2']/r['c2']).has(t) and \
(r['d1']/r['a1']).match(b/t**2) and (r['d2']/r['a2']).match(b/t**2) \
and not (r['d1']/r['e1']).has(t) and not (r['d2']/r['e2']).has(t):
return "type9"
elif -r['b1']/r['d1']==-r['c1']/r['e1']==-r['b2']/r['d2']==-r['c2']/r['e2']==t:
return "type11"
else:
return None
def check_linear_3eq_order1(eq, func, func_coef):
x = func[0].func
y = func[1].func
z = func[2].func
fc = func_coef
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
r = dict()
r['a1'] = fc[0,x(t),1]; r['a2'] = fc[1,y(t),1]; r['a3'] = fc[2,z(t),1]
r['b1'] = fc[0,x(t),0]; r['b2'] = fc[1,x(t),0]; r['b3'] = fc[2,x(t),0]
r['c1'] = fc[0,y(t),0]; r['c2'] = fc[1,y(t),0]; r['c3'] = fc[2,y(t),0]
r['d1'] = fc[0,z(t),0]; r['d2'] = fc[1,z(t),0]; r['d3'] = fc[2,z(t),0]
forcing = [S(0), S(0), S(0)]
for i in range(3):
for j in Add.make_args(eq[i]):
if not j.has(x(t), y(t), z(t)):
forcing[i] += j
if forcing[0].has(t) or forcing[1].has(t) or forcing[2].has(t):
# We can handle homogeneous case and simple constant forcings.
# Issue #9244: nonhomogeneous linear systems are not supported
return None
if all(not r[k].has(t) for k in 'a1 a2 a3 b1 b2 b3 c1 c2 c3 d1 d2 d3'.split()):
if r['c1']==r['d1']==r['d2']==0:
return 'type1'
elif r['c1'] == -r['b2'] and r['d1'] == -r['b3'] and r['d2'] == -r['c3'] \
and r['b1'] == r['c2'] == r['d3'] == 0:
return 'type2'
elif r['b1'] == r['c2'] == r['d3'] == 0 and r['c1']/r['a1'] == -r['d1']/r['a1'] \
and r['d2']/r['a2'] == -r['b2']/r['a2'] and r['b3']/r['a3'] == -r['c3']/r['a3']:
return 'type3'
else:
return None
else:
for k1 in 'c1 d1 b2 d2 b3 c3'.split():
if r[k1] == 0:
continue
else:
if all(not cancel(r[k1]/r[k]).has(t) for k in 'd1 b2 d2 b3 c3'.split() if r[k]!=0) \
and all(not cancel(r[k1]/(r['b1'] - r[k])).has(t) for k in 'b1 c2 d3'.split() if r['b1']!=r[k]):
return 'type4'
else:
break
return None
def check_linear_neq_order1(eq, func, func_coef):
fc = func_coef
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
n = len(eq)
for i in range(n):
for j in range(n):
if (fc[i, func[j], 0]/fc[i, func[i], 1]).has(t):
return None
if len(eq) == 3:
return 'type6'
return 'type1'
def check_nonlinear_2eq_order1(eq, func, func_coef):
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
f = Wild('f')
g = Wild('g')
u, v = symbols('u, v', cls=Dummy)
def check_type(x, y):
r1 = eq[0].match(t*diff(x(t),t) - x(t) + f)
r2 = eq[1].match(t*diff(y(t),t) - y(t) + g)
if not (r1 and r2):
r1 = eq[0].match(diff(x(t),t) - x(t)/t + f/t)
r2 = eq[1].match(diff(y(t),t) - y(t)/t + g/t)
if not (r1 and r2):
r1 = (-eq[0]).match(t*diff(x(t),t) - x(t) + f)
r2 = (-eq[1]).match(t*diff(y(t),t) - y(t) + g)
if not (r1 and r2):
r1 = (-eq[0]).match(diff(x(t),t) - x(t)/t + f/t)
r2 = (-eq[1]).match(diff(y(t),t) - y(t)/t + g/t)
if r1 and r2 and not (r1[f].subs(diff(x(t),t),u).subs(diff(y(t),t),v).has(t) \
or r2[g].subs(diff(x(t),t),u).subs(diff(y(t),t),v).has(t)):
return 'type5'
else:
return None
for func_ in func:
if isinstance(func_, list):
x = func[0][0].func
y = func[0][1].func
eq_type = check_type(x, y)
if not eq_type:
eq_type = check_type(y, x)
return eq_type
x = func[0].func
y = func[1].func
fc = func_coef
n = Wild('n', exclude=[x(t),y(t)])
f1 = Wild('f1', exclude=[v,t])
f2 = Wild('f2', exclude=[v,t])
g1 = Wild('g1', exclude=[u,t])
g2 = Wild('g2', exclude=[u,t])
for i in range(2):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
r = eq[0].match(diff(x(t),t) - x(t)**n*f)
if r:
g = (diff(y(t),t) - eq[1])/r[f]
if r and not (g.has(x(t)) or g.subs(y(t),v).has(t) or r[f].subs(x(t),u).subs(y(t),v).has(t)):
return 'type1'
r = eq[0].match(diff(x(t),t) - exp(n*x(t))*f)
if r:
g = (diff(y(t),t) - eq[1])/r[f]
if r and not (g.has(x(t)) or g.subs(y(t),v).has(t) or r[f].subs(x(t),u).subs(y(t),v).has(t)):
return 'type2'
g = Wild('g')
r1 = eq[0].match(diff(x(t),t) - f)
r2 = eq[1].match(diff(y(t),t) - g)
if r1 and r2 and not (r1[f].subs(x(t),u).subs(y(t),v).has(t) or \
r2[g].subs(x(t),u).subs(y(t),v).has(t)):
return 'type3'
r1 = eq[0].match(diff(x(t),t) - f)
r2 = eq[1].match(diff(y(t),t) - g)
num, den = (
(r1[f].subs(x(t),u).subs(y(t),v))/
(r2[g].subs(x(t),u).subs(y(t),v))).as_numer_denom()
R1 = num.match(f1*g1)
R2 = den.match(f2*g2)
# phi = (r1[f].subs(x(t),u).subs(y(t),v))/num
if R1 and R2:
return 'type4'
return None
def check_nonlinear_2eq_order2(eq, func, func_coef):
return None
def check_nonlinear_3eq_order1(eq, func, func_coef):
x = func[0].func
y = func[1].func
z = func[2].func
fc = func_coef
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
u, v, w = symbols('u, v, w', cls=Dummy)
a = Wild('a', exclude=[x(t), y(t), z(t), t])
b = Wild('b', exclude=[x(t), y(t), z(t), t])
c = Wild('c', exclude=[x(t), y(t), z(t), t])
f = Wild('f')
F1 = Wild('F1')
F2 = Wild('F2')
F3 = Wild('F3')
for i in range(3):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
r1 = eq[0].match(diff(x(t),t) - a*y(t)*z(t))
r2 = eq[1].match(diff(y(t),t) - b*z(t)*x(t))
r3 = eq[2].match(diff(z(t),t) - c*x(t)*y(t))
if r1 and r2 and r3:
num1, den1 = r1[a].as_numer_denom()
num2, den2 = r2[b].as_numer_denom()
num3, den3 = r3[c].as_numer_denom()
if solve([num1*u-den1*(v-w), num2*v-den2*(w-u), num3*w-den3*(u-v)],[u, v]):
return 'type1'
r = eq[0].match(diff(x(t),t) - y(t)*z(t)*f)
if r:
r1 = collect_const(r[f]).match(a*f)
r2 = ((diff(y(t),t) - eq[1])/r1[f]).match(b*z(t)*x(t))
r3 = ((diff(z(t),t) - eq[2])/r1[f]).match(c*x(t)*y(t))
if r1 and r2 and r3:
num1, den1 = r1[a].as_numer_denom()
num2, den2 = r2[b].as_numer_denom()
num3, den3 = r3[c].as_numer_denom()
if solve([num1*u-den1*(v-w), num2*v-den2*(w-u), num3*w-den3*(u-v)],[u, v]):
return 'type2'
r = eq[0].match(diff(x(t),t) - (F2-F3))
if r:
r1 = collect_const(r[F2]).match(c*F2)
r1.update(collect_const(r[F3]).match(b*F3))
if r1:
if eq[1].has(r1[F2]) and not eq[1].has(r1[F3]):
r1[F2], r1[F3] = r1[F3], r1[F2]
r1[c], r1[b] = -r1[b], -r1[c]
r2 = eq[1].match(diff(y(t),t) - a*r1[F3] + r1[c]*F1)
if r2:
r3 = (eq[2] == diff(z(t),t) - r1[b]*r2[F1] + r2[a]*r1[F2])
if r1 and r2 and r3:
return 'type3'
r = eq[0].match(diff(x(t),t) - z(t)*F2 + y(t)*F3)
if r:
r1 = collect_const(r[F2]).match(c*F2)
r1.update(collect_const(r[F3]).match(b*F3))
if r1:
if eq[1].has(r1[F2]) and not eq[1].has(r1[F3]):
r1[F2], r1[F3] = r1[F3], r1[F2]
r1[c], r1[b] = -r1[b], -r1[c]
r2 = (diff(y(t),t) - eq[1]).match(a*x(t)*r1[F3] - r1[c]*z(t)*F1)
if r2:
r3 = (diff(z(t),t) - eq[2] == r1[b]*y(t)*r2[F1] - r2[a]*x(t)*r1[F2])
if r1 and r2 and r3:
return 'type4'
r = (diff(x(t),t) - eq[0]).match(x(t)*(F2 - F3))
if r:
r1 = collect_const(r[F2]).match(c*F2)
r1.update(collect_const(r[F3]).match(b*F3))
if r1:
if eq[1].has(r1[F2]) and not eq[1].has(r1[F3]):
r1[F2], r1[F3] = r1[F3], r1[F2]
r1[c], r1[b] = -r1[b], -r1[c]
r2 = (diff(y(t),t) - eq[1]).match(y(t)*(a*r1[F3] - r1[c]*F1))
if r2:
r3 = (diff(z(t),t) - eq[2] == z(t)*(r1[b]*r2[F1] - r2[a]*r1[F2]))
if r1 and r2 and r3:
return 'type5'
return None
def check_nonlinear_3eq_order2(eq, func, func_coef):
return None
def checksysodesol(eqs, sols, func=None):
r"""
Substitutes corresponding ``sols`` for each functions into each ``eqs`` and
checks that the result of substitutions for each equation is ``0``. The
equations and solutions passed can be any iterable.
This only works when each ``sols`` have one function only, like `x(t)` or `y(t)`.
For each function, ``sols`` can have a single solution or a list of solutions.
In most cases it will not be necessary to explicitly identify the function,
but if the function cannot be inferred from the original equation it
can be supplied through the ``func`` argument.
When a sequence of equations is passed, the same sequence is used to return
the result for each equation with each function substituted with corresponding
solutions.
It tries the following method to find zero equivalence for each equation:
Substitute the solutions for functions, like `x(t)` and `y(t)` into the
original equations containing those functions.
This function returns a tuple. The first item in the tuple is ``True`` if
the substitution results for each equation is ``0``, and ``False`` otherwise.
The second item in the tuple is what the substitution results in. Each element
of the ``list`` should always be ``0`` corresponding to each equation if the
first item is ``True``. Note that sometimes this function may return ``False``,
but with an expression that is identically equal to ``0``, instead of returning
``True``. This is because :py:meth:`~sympy.simplify.simplify.simplify` cannot
reduce the expression to ``0``. If an expression returned by each function
vanishes identically, then ``sols`` really is a solution to ``eqs``.
If this function seems to hang, it is probably because of a difficult simplification.
Examples
========
>>> from sympy import Eq, diff, symbols, sin, cos, exp, sqrt, S, Function
>>> from sympy.solvers.ode import checksysodesol
>>> C1, C2 = symbols('C1:3')
>>> t = symbols('t')
>>> x, y = symbols('x, y', cls=Function)
>>> eq = (Eq(diff(x(t),t), x(t) + y(t) + 17), Eq(diff(y(t),t), -2*x(t) + y(t) + 12))
>>> sol = [Eq(x(t), (C1*sin(sqrt(2)*t) + C2*cos(sqrt(2)*t))*exp(t) - S(5)/3),
... Eq(y(t), (sqrt(2)*C1*cos(sqrt(2)*t) - sqrt(2)*C2*sin(sqrt(2)*t))*exp(t) - S(46)/3)]
>>> checksysodesol(eq, sol)
(True, [0, 0])
>>> eq = (Eq(diff(x(t),t),x(t)*y(t)**4), Eq(diff(y(t),t),y(t)**3))
>>> sol = [Eq(x(t), C1*exp(-1/(4*(C2 + t)))), Eq(y(t), -sqrt(2)*sqrt(-1/(C2 + t))/2),
... Eq(x(t), C1*exp(-1/(4*(C2 + t)))), Eq(y(t), sqrt(2)*sqrt(-1/(C2 + t))/2)]
>>> checksysodesol(eq, sol)
(True, [0, 0])
"""
def _sympify(eq):
return list(map(sympify, eq if iterable(eq) else [eq]))
eqs = _sympify(eqs)
for i in range(len(eqs)):
if isinstance(eqs[i], Equality):
eqs[i] = eqs[i].lhs - eqs[i].rhs
if func is None:
funcs = []
for eq in eqs:
derivs = eq.atoms(Derivative)
func = set().union(*[d.atoms(AppliedUndef) for d in derivs])
for func_ in func:
funcs.append(func_)
funcs = list(set(funcs))
if not all(isinstance(func, AppliedUndef) and len(func.args) == 1 for func in funcs)\
and len({func.args for func in funcs})!=1:
raise ValueError("func must be a function of one variable, not %s" % func)
for sol in sols:
if len(sol.atoms(AppliedUndef)) != 1:
raise ValueError("solutions should have one function only")
if len(funcs) != len({sol.lhs for sol in sols}):
raise ValueError("number of solutions provided does not match the number of equations")
dictsol = dict()
for sol in sols:
func = list(sol.atoms(AppliedUndef))[0]
if sol.rhs == func:
sol = sol.reversed
solved = sol.lhs == func and not sol.rhs.has(func)
if not solved:
rhs = solve(sol, func)
if not rhs:
raise NotImplementedError
else:
rhs = sol.rhs
dictsol[func] = rhs
checkeq = []
for eq in eqs:
for func in funcs:
eq = sub_func_doit(eq, func, dictsol[func])
ss = simplify(eq)
if ss != 0:
eq = ss.expand(force=True)
else:
eq = 0
checkeq.append(eq)
if len(set(checkeq)) == 1 and list(set(checkeq))[0] == 0:
return (True, checkeq)
else:
return (False, checkeq)
@vectorize(0)
def odesimp(ode, eq, func, hint):
r"""
Simplifies solutions of ODEs, including trying to solve for ``func`` and
running :py:meth:`~sympy.solvers.ode.constantsimp`.
It may use knowledge of the type of solution that the hint returns to
apply additional simplifications.
It also attempts to integrate any :py:class:`~sympy.integrals.Integral`\s
in the expression, if the hint is not an ``_Integral`` hint.
This function should have no effect on expressions returned by
:py:meth:`~sympy.solvers.ode.dsolve`, as
:py:meth:`~sympy.solvers.ode.dsolve` already calls
:py:meth:`~sympy.solvers.ode.odesimp`, but the individual hint functions
do not call :py:meth:`~sympy.solvers.ode.odesimp` (because the
:py:meth:`~sympy.solvers.ode.dsolve` wrapper does). Therefore, this
function is designed for mainly internal use.
Examples
========
>>> from sympy import sin, symbols, dsolve, pprint, Function
>>> from sympy.solvers.ode import odesimp
>>> x , u2, C1= symbols('x,u2,C1')
>>> f = Function('f')
>>> eq = dsolve(x*f(x).diff(x) - f(x) - x*sin(f(x)/x), f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral',
... simplify=False)
>>> pprint(eq, wrap_line=False)
x
----
f(x)
/
|
| / 1 \
| -|u2 + -------|
| | /1 \|
| | sin|--||
| \ \u2//
log(f(x)) = log(C1) + | ---------------- d(u2)
| 2
| u2
|
/
>>> pprint(odesimp(eq, f(x), 1, {C1},
... hint='1st_homogeneous_coeff_subs_indep_div_dep'
... )) #doctest: +SKIP
x
--------- = C1
/f(x)\
tan|----|
\2*x /
"""
x = func.args[0]
f = func.func
C1 = get_numbered_constants(eq, num=1)
constants = eq.free_symbols - ode.free_symbols
# First, integrate if the hint allows it.
eq = _handle_Integral(eq, func, hint)
if hint.startswith("nth_linear_euler_eq_nonhomogeneous"):
eq = simplify(eq)
if not isinstance(eq, Equality):
raise TypeError("eq should be an instance of Equality")
# Second, clean up the arbitrary constants.
# Right now, nth linear hints can put as many as 2*order constants in an
# expression. If that number grows with another hint, the third argument
# here should be raised accordingly, or constantsimp() rewritten to handle
# an arbitrary number of constants.
eq = constantsimp(eq, constants)
# Lastly, now that we have cleaned up the expression, try solving for func.
# When CRootOf is implemented in solve(), we will want to return a CRootOf
# every time instead of an Equality.
# Get the f(x) on the left if possible.
if eq.rhs == func and not eq.lhs.has(func):
eq = [Eq(eq.rhs, eq.lhs)]
# make sure we are working with lists of solutions in simplified form.
if eq.lhs == func and not eq.rhs.has(func):
# The solution is already solved
eq = [eq]
# special simplification of the rhs
if hint.startswith("nth_linear_constant_coeff"):
# Collect terms to make the solution look nice.
# This is also necessary for constantsimp to remove unnecessary
# terms from the particular solution from variation of parameters
#
# Collect is not behaving reliably here. The results for
# some linear constant-coefficient equations with repeated
# roots do not properly simplify all constants sometimes.
# 'collectterms' gives different orders sometimes, and results
# differ in collect based on that order. The
# sort-reverse trick fixes things, but may fail in the
# future. In addition, collect is splitting exponentials with
# rational powers for no reason. We have to do a match
# to fix this using Wilds.
global collectterms
try:
collectterms.sort(key=default_sort_key)
collectterms.reverse()
except Exception:
pass
assert len(eq) == 1 and eq[0].lhs == f(x)
sol = eq[0].rhs
sol = expand_mul(sol)
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x)*sin(abs(imroot)*x))
sol = collect(sol, x**i*exp(reroot*x)*cos(imroot*x))
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x))
del collectterms
# Collect is splitting exponentials with rational powers for
# no reason. We call powsimp to fix.
sol = powsimp(sol)
eq[0] = Eq(f(x), sol)
else:
# The solution is not solved, so try to solve it
try:
floats = any(i.is_Float for i in eq.atoms(Number))
eqsol = solve(eq, func, force=True, rational=False if floats else None)
if not eqsol:
raise NotImplementedError
except (NotImplementedError, PolynomialError):
eq = [eq]
else:
def _expand(expr):
numer, denom = expr.as_numer_denom()
if denom.is_Add:
return expr
else:
return powsimp(expr.expand(), combine='exp', deep=True)
# XXX: the rest of odesimp() expects each ``t`` to be in a
# specific normal form: rational expression with numerator
# expanded, but with combined exponential functions (at
# least in this setup all tests pass).
eq = [Eq(f(x), _expand(t)) for t in eqsol]
# special simplification of the lhs.
if hint.startswith("1st_homogeneous_coeff"):
for j, eqi in enumerate(eq):
newi = logcombine(eqi, force=True)
if isinstance(newi.lhs, log) and newi.rhs == 0:
newi = Eq(newi.lhs.args[0]/C1, C1)
eq[j] = newi
# We cleaned up the constants before solving to help the solve engine with
# a simpler expression, but the solved expression could have introduced
# things like -C1, so rerun constantsimp() one last time before returning.
for i, eqi in enumerate(eq):
eq[i] = constantsimp(eqi, constants)
eq[i] = constant_renumber(eq[i], ode.free_symbols)
# If there is only 1 solution, return it;
# otherwise return the list of solutions.
if len(eq) == 1:
eq = eq[0]
return eq
def checkodesol(ode, sol, func=None, order='auto', solve_for_func=True):
r"""
Substitutes ``sol`` into ``ode`` and checks that the result is ``0``.
This only works when ``func`` is one function, like `f(x)`. ``sol`` can
be a single solution or a list of solutions. Each solution may be an
:py:class:`~sympy.core.relational.Equality` that the solution satisfies,
e.g. ``Eq(f(x), C1), Eq(f(x) + C1, 0)``; or simply an
:py:class:`~sympy.core.expr.Expr`, e.g. ``f(x) - C1``. In most cases it
will not be necessary to explicitly identify the function, but if the
function cannot be inferred from the original equation it can be supplied
through the ``func`` argument.
If a sequence of solutions is passed, the same sort of container will be
used to return the result for each solution.
It tries the following methods, in order, until it finds zero equivalence:
1. Substitute the solution for `f` in the original equation. This only
works if ``ode`` is solved for `f`. It will attempt to solve it first
unless ``solve_for_func == False``.
2. Take `n` derivatives of the solution, where `n` is the order of
``ode``, and check to see if that is equal to the solution. This only
works on exact ODEs.
3. Take the 1st, 2nd, ..., `n`\th derivatives of the solution, each time
solving for the derivative of `f` of that order (this will always be
possible because `f` is a linear operator). Then back substitute each
derivative into ``ode`` in reverse order.
This function returns a tuple. The first item in the tuple is ``True`` if
the substitution results in ``0``, and ``False`` otherwise. The second
item in the tuple is what the substitution results in. It should always
be ``0`` if the first item is ``True``. Sometimes this function will
return ``False`` even when an expression is identically equal to ``0``.
This happens when :py:meth:`~sympy.simplify.simplify.simplify` does not
reduce the expression to ``0``. If an expression returned by this
function vanishes identically, then ``sol`` really is a solution to
the ``ode``.
If this function seems to hang, it is probably because of a hard
simplification.
To use this function to test, test the first item of the tuple.
Examples
========
>>> from sympy import Eq, Function, checkodesol, symbols
>>> x, C1 = symbols('x,C1')
>>> f = Function('f')
>>> checkodesol(f(x).diff(x), Eq(f(x), C1))
(True, 0)
>>> assert checkodesol(f(x).diff(x), C1)[0]
>>> assert not checkodesol(f(x).diff(x), x)[0]
>>> checkodesol(f(x).diff(x, 2), x**2)
(False, 2)
"""
if not isinstance(ode, Equality):
ode = Eq(ode, 0)
if func is None:
try:
_, func = _preprocess(ode.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(*funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkodesol for this case.')
func = funcs.pop()
if not isinstance(func, AppliedUndef) or len(func.args) != 1:
raise ValueError(
"func must be a function of one variable, not %s" % func)
if is_sequence(sol, set):
return type(sol)([checkodesol(ode, i, order=order, solve_for_func=solve_for_func) for i in sol])
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
if order == 'auto':
order = ode_order(ode, func)
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
rhs = solve(sol, func)
if rhs:
eqs = [Eq(func, t) for t in rhs]
if len(rhs) == 1:
eqs = eqs[0]
return checkodesol(ode, eqs, order=order,
solve_for_func=False)
s = True
testnum = 0
x = func.args[0]
while s:
if testnum == 0:
# First pass, try substituting a solved solution directly into the
# ODE. This has the highest chance of succeeding.
ode_diff = ode.lhs - ode.rhs
if sol.lhs == func:
s = sub_func_doit(ode_diff, func, sol.rhs)
else:
testnum += 1
continue
ss = simplify(s)
if ss:
# with the new numer_denom in power.py, if we do a simple
# expansion then testnum == 0 verifies all solutions.
s = ss.expand(force=True)
else:
s = 0
testnum += 1
elif testnum == 1:
# Second pass. If we cannot substitute f, try seeing if the nth
# derivative is equal, this will only work for odes that are exact,
# by definition.
s = simplify(
trigsimp(diff(sol.lhs, x, order) - diff(sol.rhs, x, order)) -
trigsimp(ode.lhs) + trigsimp(ode.rhs))
# s2 = simplify(
# diff(sol.lhs, x, order) - diff(sol.rhs, x, order) - \
# ode.lhs + ode.rhs)
testnum += 1
elif testnum == 2:
# Third pass. Try solving for df/dx and substituting that into the
# ODE. Thanks to Chris Smith for suggesting this method. Many of
# the comments below are his, too.
# The method:
# - Take each of 1..n derivatives of the solution.
# - Solve each nth derivative for d^(n)f/dx^(n)
# (the differential of that order)
# - Back substitute into the ODE in decreasing order
# (i.e., n, n-1, ...)
# - Check the result for zero equivalence
if sol.lhs == func and not sol.rhs.has(func):
diffsols = {0: sol.rhs}
elif sol.rhs == func and not sol.lhs.has(func):
diffsols = {0: sol.lhs}
else:
diffsols = {}
sol = sol.lhs - sol.rhs
for i in range(1, order + 1):
# Differentiation is a linear operator, so there should always
# be 1 solution. Nonetheless, we test just to make sure.
# We only need to solve once. After that, we automatically
# have the solution to the differential in the order we want.
if i == 1:
ds = sol.diff(x)
try:
sdf = solve(ds, func.diff(x, i))
if not sdf:
raise NotImplementedError
except NotImplementedError:
testnum += 1
break
else:
diffsols[i] = sdf[0]
else:
# This is what the solution says df/dx should be.
diffsols[i] = diffsols[i - 1].diff(x)
# Make sure the above didn't fail.
if testnum > 2:
continue
else:
# Substitute it into ODE to check for self consistency.
lhs, rhs = ode.lhs, ode.rhs
for i in range(order, -1, -1):
if i == 0 and 0 not in diffsols:
# We can only substitute f(x) if the solution was
# solved for f(x).
break
lhs = sub_func_doit(lhs, func.diff(x, i), diffsols[i])
rhs = sub_func_doit(rhs, func.diff(x, i), diffsols[i])
ode_or_bool = Eq(lhs, rhs)
ode_or_bool = simplify(ode_or_bool)
if isinstance(ode_or_bool, (bool, BooleanAtom)):
if ode_or_bool:
lhs = rhs = S.Zero
else:
lhs = ode_or_bool.lhs
rhs = ode_or_bool.rhs
# No sense in overworking simplify -- just prove that the
# numerator goes to zero
num = trigsimp((lhs - rhs).as_numer_denom()[0])
# since solutions are obtained using force=True we test
# using the same level of assumptions
## replace function with dummy so assumptions will work
_func = Dummy('func')
num = num.subs(func, _func)
## posify the expression
num, reps = posify(num)
s = simplify(num).xreplace(reps).xreplace({_func: func})
testnum += 1
else:
break
if not s:
return (True, s)
elif s is True: # The code above never was able to change s
raise NotImplementedError("Unable to test if " + str(sol) +
" is a solution to " + str(ode) + ".")
else:
return (False, s)
def ode_sol_simplicity(sol, func, trysolving=True):
r"""
Returns an extended integer representing how simple a solution to an ODE
is.
The following things are considered, in order from most simple to least:
- ``sol`` is solved for ``func``.
- ``sol`` is not solved for ``func``, but can be if passed to solve (e.g.,
a solution returned by ``dsolve(ode, func, simplify=False``).
- If ``sol`` is not solved for ``func``, then base the result on the
length of ``sol``, as computed by ``len(str(sol))``.
- If ``sol`` has any unevaluated :py:class:`~sympy.integrals.Integral`\s,
this will automatically be considered less simple than any of the above.
This function returns an integer such that if solution A is simpler than
solution B by above metric, then ``ode_sol_simplicity(sola, func) <
ode_sol_simplicity(solb, func)``.
Currently, the following are the numbers returned, but if the heuristic is
ever improved, this may change. Only the ordering is guaranteed.
+----------------------------------------------+-------------------+
| Simplicity | Return |
+==============================================+===================+
| ``sol`` solved for ``func`` | ``-2`` |
+----------------------------------------------+-------------------+
| ``sol`` not solved for ``func`` but can be | ``-1`` |
+----------------------------------------------+-------------------+
| ``sol`` is not solved nor solvable for | ``len(str(sol))`` |
| ``func`` | |
+----------------------------------------------+-------------------+
| ``sol`` contains an | ``oo`` |
| :py:class:`~sympy.integrals.Integral` | |
+----------------------------------------------+-------------------+
``oo`` here means the SymPy infinity, which should compare greater than
any integer.
If you already know :py:meth:`~sympy.solvers.solvers.solve` cannot solve
``sol``, you can use ``trysolving=False`` to skip that step, which is the
only potentially slow step. For example,
:py:meth:`~sympy.solvers.ode.dsolve` with the ``simplify=False`` flag
should do this.
If ``sol`` is a list of solutions, if the worst solution in the list
returns ``oo`` it returns that, otherwise it returns ``len(str(sol))``,
that is, the length of the string representation of the whole list.
Examples
========
This function is designed to be passed to ``min`` as the key argument,
such as ``min(listofsolutions, key=lambda i: ode_sol_simplicity(i,
f(x)))``.
>>> from sympy import symbols, Function, Eq, tan, cos, sqrt, Integral
>>> from sympy.solvers.ode import ode_sol_simplicity
>>> x, C1, C2 = symbols('x, C1, C2')
>>> f = Function('f')
>>> ode_sol_simplicity(Eq(f(x), C1*x**2), f(x))
-2
>>> ode_sol_simplicity(Eq(x**2 + f(x), C1), f(x))
-1
>>> ode_sol_simplicity(Eq(f(x), C1*Integral(2*x, x)), f(x))
oo
>>> eq1 = Eq(f(x)/tan(f(x)/(2*x)), C1)
>>> eq2 = Eq(f(x)/tan(f(x)/(2*x) + f(x)), C2)
>>> [ode_sol_simplicity(eq, f(x)) for eq in [eq1, eq2]]
[28, 35]
>>> min([eq1, eq2], key=lambda i: ode_sol_simplicity(i, f(x)))
Eq(f(x)/tan(f(x)/(2*x)), C1)
"""
# TODO: if two solutions are solved for f(x), we still want to be
# able to get the simpler of the two
# See the docstring for the coercion rules. We check easier (faster)
# things here first, to save time.
if iterable(sol):
# See if there are Integrals
for i in sol:
if ode_sol_simplicity(i, func, trysolving=trysolving) == oo:
return oo
return len(str(sol))
if sol.has(Integral):
return oo
# Next, try to solve for func. This code will change slightly when CRootOf
# is implemented in solve(). Probably a CRootOf solution should fall
# somewhere between a normal solution and an unsolvable expression.
# First, see if they are already solved
if sol.lhs == func and not sol.rhs.has(func) or \
sol.rhs == func and not sol.lhs.has(func):
return -2
# We are not so lucky, try solving manually
if trysolving:
try:
sols = solve(sol, func)
if not sols:
raise NotImplementedError
except NotImplementedError:
pass
else:
return -1
# Finally, a naive computation based on the length of the string version
# of the expression. This may favor combined fractions because they
# will not have duplicate denominators, and may slightly favor expressions
# with fewer additions and subtractions, as those are separated by spaces
# by the printer.
# Additional ideas for simplicity heuristics are welcome, like maybe
# checking if a equation has a larger domain, or if constantsimp has
# introduced arbitrary constants numbered higher than the order of a
# given ODE that sol is a solution of.
return len(str(sol))
def _get_constant_subexpressions(expr, Cs):
Cs = set(Cs)
Ces = []
def _recursive_walk(expr):
expr_syms = expr.free_symbols
if expr_syms and expr_syms.issubset(Cs):
Ces.append(expr)
else:
if expr.func == exp:
expr = expr.expand(mul=True)
if expr.func in (Add, Mul):
d = sift(expr.args, lambda i : i.free_symbols.issubset(Cs))
if len(d[True]) > 1:
x = expr.func(*d[True])
if not x.is_number:
Ces.append(x)
elif isinstance(expr, Integral):
if expr.free_symbols.issubset(Cs) and \
all(len(x) == 3 for x in expr.limits):
Ces.append(expr)
for i in expr.args:
_recursive_walk(i)
return
_recursive_walk(expr)
return Ces
def __remove_linear_redundancies(expr, Cs):
cnts = {i: expr.count(i) for i in Cs}
Cs = [i for i in Cs if cnts[i] > 0]
def _linear(expr):
if isinstance(expr, Add):
xs = [i for i in Cs if expr.count(i)==cnts[i] \
and 0 == expr.diff(i, 2)]
d = {}
for x in xs:
y = expr.diff(x)
if y not in d:
d[y]=[]
d[y].append(x)
for y in d:
if len(d[y]) > 1:
d[y].sort(key=str)
for x in d[y][1:]:
expr = expr.subs(x, 0)
return expr
def _recursive_walk(expr):
if len(expr.args) != 0:
expr = expr.func(*[_recursive_walk(i) for i in expr.args])
expr = _linear(expr)
return expr
if isinstance(expr, Equality):
lhs, rhs = [_recursive_walk(i) for i in expr.args]
f = lambda i: isinstance(i, Number) or i in Cs
if isinstance(lhs, Symbol) and lhs in Cs:
rhs, lhs = lhs, rhs
if lhs.func in (Add, Symbol) and rhs.func in (Add, Symbol):
dlhs = sift([lhs] if isinstance(lhs, AtomicExpr) else lhs.args, f)
drhs = sift([rhs] if isinstance(rhs, AtomicExpr) else rhs.args, f)
for i in [True, False]:
for hs in [dlhs, drhs]:
if i not in hs:
hs[i] = [0]
# this calculation can be simplified
lhs = Add(*dlhs[False]) - Add(*drhs[False])
rhs = Add(*drhs[True]) - Add(*dlhs[True])
elif lhs.func in (Mul, Symbol) and rhs.func in (Mul, Symbol):
dlhs = sift([lhs] if isinstance(lhs, AtomicExpr) else lhs.args, f)
if True in dlhs:
if False not in dlhs:
dlhs[False] = [1]
lhs = Mul(*dlhs[False])
rhs = rhs/Mul(*dlhs[True])
return Eq(lhs, rhs)
else:
return _recursive_walk(expr)
@vectorize(0)
def constantsimp(expr, constants):
r"""
Simplifies an expression with arbitrary constants in it.
This function is written specifically to work with
:py:meth:`~sympy.solvers.ode.dsolve`, and is not intended for general use.
Simplification is done by "absorbing" the arbitrary constants into other
arbitrary constants, numbers, and symbols that they are not independent
of.
The symbols must all have the same name with numbers after it, for
example, ``C1``, ``C2``, ``C3``. The ``symbolname`` here would be
'``C``', the ``startnumber`` would be 1, and the ``endnumber`` would be 3.
If the arbitrary constants are independent of the variable ``x``, then the
independent symbol would be ``x``. There is no need to specify the
dependent function, such as ``f(x)``, because it already has the
independent symbol, ``x``, in it.
Because terms are "absorbed" into arbitrary constants and because
constants are renumbered after simplifying, the arbitrary constants in
expr are not necessarily equal to the ones of the same name in the
returned result.
If two or more arbitrary constants are added, multiplied, or raised to the
power of each other, they are first absorbed together into a single
arbitrary constant. Then the new constant is combined into other terms if
necessary.
Absorption of constants is done with limited assistance:
1. terms of :py:class:`~sympy.core.add.Add`\s are collected to try join
constants so `e^x (C_1 \cos(x) + C_2 \cos(x))` will simplify to `e^x
C_1 \cos(x)`;
2. powers with exponents that are :py:class:`~sympy.core.add.Add`\s are
expanded so `e^{C_1 + x}` will be simplified to `C_1 e^x`.
Use :py:meth:`~sympy.solvers.ode.constant_renumber` to renumber constants
after simplification or else arbitrary numbers on constants may appear,
e.g. `C_1 + C_3 x`.
In rare cases, a single constant can be "simplified" into two constants.
Every differential equation solution should have as many arbitrary
constants as the order of the differential equation. The result here will
be technically correct, but it may, for example, have `C_1` and `C_2` in
an expression, when `C_1` is actually equal to `C_2`. Use your discretion
in such situations, and also take advantage of the ability to use hints in
:py:meth:`~sympy.solvers.ode.dsolve`.
Examples
========
>>> from sympy import symbols
>>> from sympy.solvers.ode import constantsimp
>>> C1, C2, C3, x, y = symbols('C1, C2, C3, x, y')
>>> constantsimp(2*C1*x, {C1, C2, C3})
C1*x
>>> constantsimp(C1 + 2 + x, {C1, C2, C3})
C1 + x
>>> constantsimp(C1*C2 + 2 + C2 + C3*x, {C1, C2, C3})
C1 + C3*x
"""
# This function works recursively. The idea is that, for Mul,
# Add, Pow, and Function, if the class has a constant in it, then
# we can simplify it, which we do by recursing down and
# simplifying up. Otherwise, we can skip that part of the
# expression.
Cs = constants
orig_expr = expr
constant_subexprs = _get_constant_subexpressions(expr, Cs)
for xe in constant_subexprs:
xes = list(xe.free_symbols)
if not xes:
continue
if all([expr.count(c) == xe.count(c) for c in xes]):
xes.sort(key=str)
expr = expr.subs(xe, xes[0])
# try to perform common sub-expression elimination of constant terms
try:
commons, rexpr = cse(expr)
commons.reverse()
rexpr = rexpr[0]
for s in commons:
cs = list(s[1].atoms(Symbol))
if len(cs) == 1 and cs[0] in Cs and \
cs[0] not in rexpr.atoms(Symbol) and \
not any(cs[0] in ex for ex in commons if ex != s):
rexpr = rexpr.subs(s[0], cs[0])
else:
rexpr = rexpr.subs(*s)
expr = rexpr
except Exception:
pass
expr = __remove_linear_redundancies(expr, Cs)
def _conditional_term_factoring(expr):
new_expr = terms_gcd(expr, clear=False, deep=True, expand=False)
# we do not want to factor exponentials, so handle this separately
if new_expr.is_Mul:
infac = False
asfac = False
for m in new_expr.args:
if isinstance(m, exp):
asfac = True
elif m.is_Add:
infac = any(isinstance(fi, exp) for t in m.args
for fi in Mul.make_args(t))
if asfac and infac:
new_expr = expr
break
return new_expr
expr = _conditional_term_factoring(expr)
# call recursively if more simplification is possible
if orig_expr != expr:
return constantsimp(expr, Cs)
return expr
def constant_renumber(expr, variables=None, newconstants=None):
r"""
Renumber arbitrary constants in ``expr`` to use the symbol names as given
in ``newconstants``. In the process, this reorders expression terms in a
standard way.
If ``newconstants`` is not provided then the new constant names will be
``C1``, ``C2`` etc. Otherwise ``newconstants`` should be an iterable
giving the new symbols to use for the constants in order.
The ``variables`` argument is a list of non-constant symbols. All other
free symbols found in ``expr`` are assumed to be constants and will be
renumbered. If ``variables`` is not given then any numbered symbol
beginning with ``C`` (e.g. ``C1``) is assumed to be a constant.
Symbols are renumbered based on ``.sort_key()``, so they should be
numbered roughly in the order that they appear in the final, printed
expression. Note that this ordering is based in part on hashes, so it can
produce different results on different machines.
The structure of this function is very similar to that of
:py:meth:`~sympy.solvers.ode.constantsimp`.
Examples
========
>>> from sympy import symbols, Eq, pprint
>>> from sympy.solvers.ode import constant_renumber
>>> x, C1, C2, C3 = symbols('x,C1:4')
>>> expr = C3 + C2*x + C1*x**2
>>> expr
C1*x**2 + C2*x + C3
>>> constant_renumber(expr)
C1 + C2*x + C3*x**2
The ``variables`` argument specifies which are constants so that the
other symbols will not be renumbered:
>>> constant_renumber(expr, [C1, x])
C1*x**2 + C2 + C3*x
The ``newconstants`` argument is used to specify what symbols to use when
replacing the constants:
>>> constant_renumber(expr, [x], newconstants=symbols('E1:4'))
E1 + E2*x + E3*x**2
"""
if type(expr) in (set, list, tuple):
renumbered = [constant_renumber(e, variables, newconstants) for e in expr]
return type(expr)(renumbered)
# Symbols in solution but not ODE are constants
if variables is not None:
variables = set(variables)
constantsymbols = list(expr.free_symbols - variables)
# Any Cn is a constant...
else:
variables = set()
isconstant = lambda s: s.startswith('C') and s[1:].isdigit()
constantsymbols = [sym for sym in expr.free_symbols if isconstant(sym.name)]
# Find new constants checking that they aren't already in the ODE
if newconstants is None:
iter_constants = numbered_symbols(start=1, prefix='C', exclude=variables)
else:
iter_constants = (sym for sym in newconstants if sym not in variables)
global newstartnumber
newstartnumber = 1
endnumber = len(constantsymbols)
constants_found = [None]*(endnumber + 2)
# make a mapping to send all constantsymbols to S.One and use
# that to make sure that term ordering is not dependent on
# the indexed value of C
C_1 = [(ci, S.One) for ci in constantsymbols]
sort_key=lambda arg: default_sort_key(arg.subs(C_1))
def _constant_renumber(expr):
r"""
We need to have an internal recursive function so that
newstartnumber maintains its values throughout recursive calls.
"""
# FIXME: Use nonlocal here when support for Py2 is dropped:
global newstartnumber
if isinstance(expr, Equality):
return Eq(
_constant_renumber(expr.lhs),
_constant_renumber(expr.rhs))
if type(expr) not in (Mul, Add, Pow) and not expr.is_Function and \
not expr.has(*constantsymbols):
# Base case, as above. Hope there aren't constants inside
# of some other class, because they won't be renumbered.
return expr
elif expr.is_Piecewise:
return expr
elif expr in constantsymbols:
if expr not in constants_found:
constants_found[newstartnumber] = expr
newstartnumber += 1
return expr
elif expr.is_Function or expr.is_Pow or isinstance(expr, Tuple):
return expr.func(
*[_constant_renumber(x) for x in expr.args])
else:
sortedargs = list(expr.args)
sortedargs.sort(key=sort_key)
return expr.func(*[_constant_renumber(x) for x in sortedargs])
expr = _constant_renumber(expr)
# Don't renumber symbols present in the ODE.
constants_found = [c for c in constants_found if c not in variables]
# Renumbering happens here
expr = expr.subs(zip(constants_found[1:], iter_constants), simultaneous=True)
return expr
def _handle_Integral(expr, func, hint):
r"""
Converts a solution with Integrals in it into an actual solution.
For most hints, this simply runs ``expr.doit()``.
"""
global y
x = func.args[0]
f = func.func
if hint == "1st_exact":
sol = (expr.doit()).subs(y, f(x))
del y
elif hint == "1st_exact_Integral":
sol = Eq(Subs(expr.lhs, y, f(x)), expr.rhs)
del y
elif hint == "nth_linear_constant_coeff_homogeneous":
sol = expr
elif not hint.endswith("_Integral"):
sol = expr.doit()
else:
sol = expr
return sol
# FIXME: replace the general solution in the docstring with
# dsolve(equation, hint='1st_exact_Integral'). You will need to be able
# to have assumptions on P and Q that dP/dy = dQ/dx.
def ode_1st_exact(eq, func, order, match):
r"""
Solves 1st order exact ordinary differential equations.
A 1st order differential equation is called exact if it is the total
differential of a function. That is, the differential equation
.. math:: P(x, y) \,\partial{}x + Q(x, y) \,\partial{}y = 0
is exact if there is some function `F(x, y)` such that `P(x, y) =
\partial{}F/\partial{}x` and `Q(x, y) = \partial{}F/\partial{}y`. It can
be shown that a necessary and sufficient condition for a first order ODE
to be exact is that `\partial{}P/\partial{}y = \partial{}Q/\partial{}x`.
Then, the solution will be as given below::
>>> from sympy import Function, Eq, Integral, symbols, pprint
>>> x, y, t, x0, y0, C1= symbols('x,y,t,x0,y0,C1')
>>> P, Q, F= map(Function, ['P', 'Q', 'F'])
>>> pprint(Eq(Eq(F(x, y), Integral(P(t, y), (t, x0, x)) +
... Integral(Q(x0, t), (t, y0, y))), C1))
x y
/ /
| |
F(x, y) = | P(t, y) dt + | Q(x0, t) dt = C1
| |
/ /
x0 y0
Where the first partials of `P` and `Q` exist and are continuous in a
simply connected region.
A note: SymPy currently has no way to represent inert substitution on an
expression, so the hint ``1st_exact_Integral`` will return an integral
with `dy`. This is supposed to represent the function that you are
solving for.
Examples
========
>>> from sympy import Function, dsolve, cos, sin
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(cos(f(x)) - (x*sin(f(x)) - f(x)**2)*f(x).diff(x),
... f(x), hint='1st_exact')
Eq(x*cos(f(x)) + f(x)**3/3, C1)
References
==========
- https://en.wikipedia.org/wiki/Exact_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 73
# indirect doctest
"""
x = func.args[0]
r = match # d+e*diff(f(x),x)
e = r[r['e']]
d = r[r['d']]
global y # This is the only way to pass dummy y to _handle_Integral
y = r['y']
C1 = get_numbered_constants(eq, num=1)
# Refer Joel Moses, "Symbolic Integration - The Stormy Decade",
# Communications of the ACM, Volume 14, Number 8, August 1971, pp. 558
# which gives the method to solve an exact differential equation.
sol = Integral(d, x) + Integral((e - (Integral(d, x).diff(y))), y)
return Eq(sol, C1)
def ode_1st_homogeneous_coeff_best(eq, func, order, match):
r"""
Returns the best solution to an ODE from the two hints
``1st_homogeneous_coeff_subs_dep_div_indep`` and
``1st_homogeneous_coeff_subs_indep_div_dep``.
This is as determined by :py:meth:`~sympy.solvers.ode.ode_sol_simplicity`.
See the
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep`
and
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep`
docstrings for more information on these hints. Note that there is no
``ode_1st_homogeneous_coeff_best_Integral`` hint.
Examples
========
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_best', simplify=False))
/ 2 \
| 3*x |
log|----- + 1|
| 2 |
\f (x) /
log(f(x)) = log(C1) - --------------
3
References
==========
- https://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
# There are two substitutions that solve the equation, u1=y/x and u2=x/y
# They produce different integrals, so try them both and see which
# one is easier.
sol1 = ode_1st_homogeneous_coeff_subs_indep_div_dep(eq,
func, order, match)
sol2 = ode_1st_homogeneous_coeff_subs_dep_div_indep(eq,
func, order, match)
simplify = match.get('simplify', True)
if simplify:
# why is odesimp called here? Should it be at the usual spot?
sol1 = odesimp(eq, sol1, func, "1st_homogeneous_coeff_subs_indep_div_dep")
sol2 = odesimp(eq, sol2, func, "1st_homogeneous_coeff_subs_dep_div_indep")
return min([sol1, sol2], key=lambda x: ode_sol_simplicity(x, func,
trysolving=not simplify))
def ode_1st_homogeneous_coeff_subs_dep_div_indep(eq, func, order, match):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution `u_1 = \frac{\text{<dependent
variable>}}{\text{<independent variable>}}`.
This is a differential equation
.. math:: P(x, y) + Q(x, y) dy/dx = 0
such that `P` and `Q` are homogeneous and of the same order. A function
`F(x, y)` is homogeneous of order `n` if `F(x t, y t) = t^n F(x, y)`.
Equivalently, `F(x, y)` can be rewritten as `G(y/x)` or `H(x/y)`. See
also the docstring of :py:meth:`~sympy.solvers.ode.homogeneous_order`.
If the coefficients `P` and `Q` in the differential equation above are
homogeneous functions of the same order, then it can be shown that the
substitution `y = u_1 x` (i.e. `u_1 = y/x`) will turn the differential
equation into an equation separable in the variables `x` and `u`. If
`h(u_1)` is the function that results from making the substitution `u_1 =
f(x)/x` on `P(x, f(x))` and `g(u_2)` is the function that results from the
substitution on `Q(x, f(x))` in the differential equation `P(x, f(x)) +
Q(x, f(x)) f'(x) = 0`, then the general solution is::
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(f(x)/x) + h(f(x)/x)*f(x).diff(x)
>>> pprint(genform)
/f(x)\ /f(x)\ d
g|----| + h|----|*--(f(x))
\ x / \ x / dx
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_dep_div_indep_Integral'))
f(x)
----
x
/
|
| -h(u1)
log(x) = C1 + | ---------------- d(u1)
| u1*h(u1) + g(u1)
|
/
Where `u_1 h(u_1) + g(u_1) \ne 0` and `x \ne 0`.
See also the docstrings of
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_best` and
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep`.
Examples
========
>>> from sympy import Function, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_dep_div_indep', simplify=False))
/ 3 \
|3*f(x) f (x)|
log|------ + -----|
| x 3 |
\ x /
log(x) = log(C1) - -------------------
3
References
==========
- https://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
x = func.args[0]
f = func.func
u = Dummy('u')
u1 = Dummy('u1') # u1 == f(x)/x
r = match # d+e*diff(f(x),x)
C1 = get_numbered_constants(eq, num=1)
xarg = match.get('xarg', 0)
yarg = match.get('yarg', 0)
int = Integral(
(-r[r['e']]/(r[r['d']] + u1*r[r['e']])).subs({x: 1, r['y']: u1}),
(u1, None, f(x)/x))
sol = logcombine(Eq(log(x), int + log(C1)), force=True)
sol = sol.subs(f(x), u).subs(((u, u - yarg), (x, x - xarg), (u, f(x))))
return sol
def ode_1st_homogeneous_coeff_subs_indep_div_dep(eq, func, order, match):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution `u_2 = \frac{\text{<independent
variable>}}{\text{<dependent variable>}}`.
This is a differential equation
.. math:: P(x, y) + Q(x, y) dy/dx = 0
such that `P` and `Q` are homogeneous and of the same order. A function
`F(x, y)` is homogeneous of order `n` if `F(x t, y t) = t^n F(x, y)`.
Equivalently, `F(x, y)` can be rewritten as `G(y/x)` or `H(x/y)`. See
also the docstring of :py:meth:`~sympy.solvers.ode.homogeneous_order`.
If the coefficients `P` and `Q` in the differential equation above are
homogeneous functions of the same order, then it can be shown that the
substitution `x = u_2 y` (i.e. `u_2 = x/y`) will turn the differential
equation into an equation separable in the variables `y` and `u_2`. If
`h(u_2)` is the function that results from making the substitution `u_2 =
x/f(x)` on `P(x, f(x))` and `g(u_2)` is the function that results from the
substitution on `Q(x, f(x))` in the differential equation `P(x, f(x)) +
Q(x, f(x)) f'(x) = 0`, then the general solution is:
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(x/f(x)) + h(x/f(x))*f(x).diff(x)
>>> pprint(genform)
/ x \ / x \ d
g|----| + h|----|*--(f(x))
\f(x)/ \f(x)/ dx
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral'))
x
----
f(x)
/
|
| -g(u2)
| ---------------- d(u2)
| u2*g(u2) + h(u2)
|
/
<BLANKLINE>
f(x) = C1*e
Where `u_2 g(u_2) + h(u_2) \ne 0` and `f(x) \ne 0`.
See also the docstrings of
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_best` and
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep`.
Examples
========
>>> from sympy import Function, pprint, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep',
... simplify=False))
/ 2 \
| 3*x |
log|----- + 1|
| 2 |
\f (x) /
log(f(x)) = log(C1) - --------------
3
References
==========
- https://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
x = func.args[0]
f = func.func
u = Dummy('u')
u2 = Dummy('u2') # u2 == x/f(x)
r = match # d+e*diff(f(x),x)
C1 = get_numbered_constants(eq, num=1)
xarg = match.get('xarg', 0) # If xarg present take xarg, else zero
yarg = match.get('yarg', 0) # If yarg present take yarg, else zero
int = Integral(
simplify(
(-r[r['d']]/(r[r['e']] + u2*r[r['d']])).subs({x: u2, r['y']: 1})),
(u2, None, x/f(x)))
sol = logcombine(Eq(log(f(x)), int + log(C1)), force=True)
sol = sol.subs(f(x), u).subs(((u, u - yarg), (x, x - xarg), (u, f(x))))
return sol
# XXX: Should this function maybe go somewhere else?
def homogeneous_order(eq, *symbols):
r"""
Returns the order `n` if `g` is homogeneous and ``None`` if it is not
homogeneous.
Determines if a function is homogeneous and if so of what order. A
function `f(x, y, \cdots)` is homogeneous of order `n` if `f(t x, t y,
\cdots) = t^n f(x, y, \cdots)`.
If the function is of two variables, `F(x, y)`, then `f` being homogeneous
of any order is equivalent to being able to rewrite `F(x, y)` as `G(x/y)`
or `H(y/x)`. This fact is used to solve 1st order ordinary differential
equations whose coefficients are homogeneous of the same order (see the
docstrings of
:py:meth:`~solvers.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep` and
:py:meth:`~solvers.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep`).
Symbols can be functions, but every argument of the function must be a
symbol, and the arguments of the function that appear in the expression
must match those given in the list of symbols. If a declared function
appears with different arguments than given in the list of symbols,
``None`` is returned.
Examples
========
>>> from sympy import Function, homogeneous_order, sqrt
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> homogeneous_order(f(x), f(x)) is None
True
>>> homogeneous_order(f(x,y), f(y, x), x, y) is None
True
>>> homogeneous_order(f(x), f(x), x)
1
>>> homogeneous_order(x**2*f(x)/sqrt(x**2+f(x)**2), x, f(x))
2
>>> homogeneous_order(x**2+f(x), x, f(x)) is None
True
"""
if not symbols:
raise ValueError("homogeneous_order: no symbols were given.")
symset = set(symbols)
eq = sympify(eq)
# The following are not supported
if eq.has(Order, Derivative):
return None
# These are all constants
if (eq.is_Number or
eq.is_NumberSymbol or
eq.is_number
):
return S.Zero
# Replace all functions with dummy variables
dum = numbered_symbols(prefix='d', cls=Dummy)
newsyms = set()
for i in [j for j in symset if getattr(j, 'is_Function')]:
iargs = set(i.args)
if iargs.difference(symset):
return None
else:
dummyvar = next(dum)
eq = eq.subs(i, dummyvar)
symset.remove(i)
newsyms.add(dummyvar)
symset.update(newsyms)
if not eq.free_symbols & symset:
return None
# assuming order of a nested function can only be equal to zero
if isinstance(eq, Function):
return None if homogeneous_order(
eq.args[0], *tuple(symset)) != 0 else S.Zero
# make the replacement of x with x*t and see if t can be factored out
t = Dummy('t', positive=True) # It is sufficient that t > 0
eqs = separatevars(eq.subs([(i, t*i) for i in symset]), [t], dict=True)[t]
if eqs is S.One:
return S.Zero # there was no term with only t
i, d = eqs.as_independent(t, as_Add=False)
b, e = d.as_base_exp()
if b == t:
return e
def ode_1st_linear(eq, func, order, match):
r"""
Solves 1st order linear differential equations.
These are differential equations of the form
.. math:: dy/dx + P(x) y = Q(x)\text{.}
These kinds of differential equations can be solved in a general way. The
integrating factor `e^{\int P(x) \,dx}` will turn the equation into a
separable equation. The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint, diff, sin
>>> from sympy.abc import x
>>> f, P, Q = map(Function, ['f', 'P', 'Q'])
>>> genform = Eq(f(x).diff(x) + P(x)*f(x), Q(x))
>>> pprint(genform)
d
P(x)*f(x) + --(f(x)) = Q(x)
dx
>>> pprint(dsolve(genform, f(x), hint='1st_linear_Integral'))
/ / \
| | |
| | / | /
| | | | |
| | | P(x) dx | - | P(x) dx
| | | | |
| | / | /
f(x) = |C1 + | Q(x)*e dx|*e
| | |
\ / /
Examples
========
>>> f = Function('f')
>>> pprint(dsolve(Eq(x*diff(f(x), x) - f(x), x**2*sin(x)),
... f(x), '1st_linear'))
f(x) = x*(C1 - cos(x))
References
==========
- https://en.wikipedia.org/wiki/Linear_differential_equation#First_order_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 92
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match # a*diff(f(x),x) + b*f(x) + c
C1 = get_numbered_constants(eq, num=1)
t = exp(Integral(r[r['b']]/r[r['a']], x))
tt = Integral(t*(-r[r['c']]/r[r['a']]), x)
f = match.get('u', f(x)) # take almost-linear u if present, else f(x)
return Eq(f, (tt + C1)/t)
def ode_Bernoulli(eq, func, order, match):
r"""
Solves Bernoulli differential equations.
These are equations of the form
.. math:: dy/dx + P(x) y = Q(x) y^n\text{, }n \ne 1`\text{.}
The substitution `w = 1/y^{1-n}` will transform an equation of this form
into one that is linear (see the docstring of
:py:meth:`~sympy.solvers.ode.ode_1st_linear`). The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x, n
>>> f, P, Q = map(Function, ['f', 'P', 'Q'])
>>> genform = Eq(f(x).diff(x) + P(x)*f(x), Q(x)*f(x)**n)
>>> pprint(genform)
d n
P(x)*f(x) + --(f(x)) = Q(x)*f (x)
dx
>>> pprint(dsolve(genform, f(x), hint='Bernoulli_Integral')) #doctest: +SKIP
1
----
1 - n
// / \ \
|| | | |
|| | / | / |
|| | | | | |
|| | (1 - n)* | P(x) dx | (-1 + n)* | P(x) dx|
|| | | | | |
|| | / | / |
f(x) = ||C1 + (-1 + n)* | -Q(x)*e dx|*e |
|| | | |
\\ / / /
Note that the equation is separable when `n = 1` (see the docstring of
:py:meth:`~sympy.solvers.ode.ode_separable`).
>>> pprint(dsolve(Eq(f(x).diff(x) + P(x)*f(x), Q(x)*f(x)), f(x),
... hint='separable_Integral'))
f(x)
/
| /
| 1 |
| - dy = C1 + | (-P(x) + Q(x)) dx
| y |
| /
/
Examples
========
>>> from sympy import Function, dsolve, Eq, pprint, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(Eq(x*f(x).diff(x) + f(x), log(x)*f(x)**2),
... f(x), hint='Bernoulli'))
1
f(x) = -------------------
/ log(x) 1\
x*|C1 + ------ + -|
\ x x/
References
==========
- https://en.wikipedia.org/wiki/Bernoulli_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 95
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match # a*diff(f(x),x) + b*f(x) + c*f(x)**n, n != 1
C1 = get_numbered_constants(eq, num=1)
t = exp((1 - r[r['n']])*Integral(r[r['b']]/r[r['a']], x))
tt = (r[r['n']] - 1)*Integral(t*r[r['c']]/r[r['a']], x)
return Eq(f(x), ((tt + C1)/t)**(1/(1 - r[r['n']])))
def ode_Riccati_special_minus2(eq, func, order, match):
r"""
The general Riccati equation has the form
.. math:: dy/dx = f(x) y^2 + g(x) y + h(x)\text{.}
While it does not have a general solution [1], the "special" form, `dy/dx
= a y^2 - b x^c`, does have solutions in many cases [2]. This routine
returns a solution for `a(dy/dx) = b y^2 + c y/x + d/x^2` that is obtained
by using a suitable change of variables to reduce it to the special form
and is valid when neither `a` nor `b` are zero and either `c` or `d` is
zero.
>>> from sympy.abc import x, y, a, b, c, d
>>> from sympy.solvers.ode import dsolve, checkodesol
>>> from sympy import pprint, Function
>>> f = Function('f')
>>> y = f(x)
>>> genform = a*y.diff(x) - (b*y**2 + c*y/x + d/x**2)
>>> sol = dsolve(genform, y)
>>> pprint(sol, wrap_line=False)
/ / __________________ \\
| __________________ | / 2 ||
| / 2 | \/ 4*b*d - (a + c) *log(x)||
-|a + c - \/ 4*b*d - (a + c) *tan|C1 + ----------------------------||
\ \ 2*a //
f(x) = ------------------------------------------------------------------------
2*b*x
>>> checkodesol(genform, sol, order=1)[0]
True
References
==========
1. http://www.maplesoft.com/support/help/Maple/view.aspx?path=odeadvisor/Riccati
2. http://eqworld.ipmnet.ru/en/solutions/ode/ode0106.pdf -
http://eqworld.ipmnet.ru/en/solutions/ode/ode0123.pdf
"""
x = func.args[0]
f = func.func
r = match # a2*diff(f(x),x) + b2*f(x) + c2*f(x)/x + d2/x**2
a2, b2, c2, d2 = [r[r[s]] for s in 'a2 b2 c2 d2'.split()]
C1 = get_numbered_constants(eq, num=1)
mu = sqrt(4*d2*b2 - (a2 - c2)**2)
return Eq(f(x), (a2 - c2 - mu*tan(mu/(2*a2)*log(x) + C1))/(2*b2*x))
def ode_Liouville(eq, func, order, match):
r"""
Solves 2nd order Liouville differential equations.
The general form of a Liouville ODE is
.. math:: \frac{d^2 y}{dx^2} + g(y) \left(\!
\frac{dy}{dx}\!\right)^2 + h(x)
\frac{dy}{dx}\text{.}
The general solution is:
>>> from sympy import Function, dsolve, Eq, pprint, diff
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = Eq(diff(f(x),x,x) + g(f(x))*diff(f(x),x)**2 +
... h(x)*diff(f(x),x), 0)
>>> pprint(genform)
2 2
/d \ d d
g(f(x))*|--(f(x))| + h(x)*--(f(x)) + ---(f(x)) = 0
\dx / dx 2
dx
>>> pprint(dsolve(genform, f(x), hint='Liouville_Integral'))
f(x)
/ /
| |
| / | /
| | | |
| - | h(x) dx | | g(y) dy
| | | |
| / | /
C1 + C2* | e dx + | e dy = 0
| |
/ /
Examples
========
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(diff(f(x), x, x) + diff(f(x), x)**2/f(x) +
... diff(f(x), x)/x, f(x), hint='Liouville'))
________________ ________________
[f(x) = -\/ C1 + C2*log(x) , f(x) = \/ C1 + C2*log(x) ]
References
==========
- Goldstein and Braun, "Advanced Methods for the Solution of Differential
Equations", pp. 98
- http://www.maplesoft.com/support/help/Maple/view.aspx?path=odeadvisor/Liouville
# indirect doctest
"""
# Liouville ODE:
# f(x).diff(x, 2) + g(f(x))*(f(x).diff(x, 2))**2 + h(x)*f(x).diff(x)
# See Goldstein and Braun, "Advanced Methods for the Solution of
# Differential Equations", pg. 98, as well as
# http://www.maplesoft.com/support/help/view.aspx?path=odeadvisor/Liouville
x = func.args[0]
f = func.func
r = match # f(x).diff(x, 2) + g*f(x).diff(x)**2 + h*f(x).diff(x)
y = r['y']
C1, C2 = get_numbered_constants(eq, num=2)
int = Integral(exp(Integral(r['g'], y)), (y, None, f(x)))
sol = Eq(int + C1*Integral(exp(-Integral(r['h'], x)), x) + C2, 0)
return sol
def ode_2nd_power_series_ordinary(eq, func, order, match):
r"""
Gives a power series solution to a second order homogeneous differential
equation with polynomial coefficients at an ordinary point. A homogeneous
differential equation is of the form
.. math :: P(x)\frac{d^2y}{dx^2} + Q(x)\frac{dy}{dx} + R(x) = 0
For simplicity it is assumed that `P(x)`, `Q(x)` and `R(x)` are polynomials,
it is sufficient that `\frac{Q(x)}{P(x)}` and `\frac{R(x)}{P(x)}` exists at
`x_{0}`. A recurrence relation is obtained by substituting `y` as `\sum_{n=0}^\infty a_{n}x^{n}`,
in the differential equation, and equating the nth term. Using this relation
various terms can be generated.
Examples
========
>>> from sympy import dsolve, Function, pprint
>>> from sympy.abc import x, y
>>> f = Function("f")
>>> eq = f(x).diff(x, 2) + f(x)
>>> pprint(dsolve(eq, hint='2nd_power_series_ordinary'))
/ 4 2 \ / 2\
|x x | | x | / 6\
f(x) = C2*|-- - -- + 1| + C1*x*|1 - --| + O\x /
\24 2 / \ 6 /
References
==========
- http://tutorial.math.lamar.edu/Classes/DE/SeriesSolutions.aspx
- George E. Simmons, "Differential Equations with Applications and
Historical Notes", p.p 176 - 184
"""
x = func.args[0]
f = func.func
C0, C1 = get_numbered_constants(eq, num=2)
n = Dummy("n", integer=True)
s = Wild("s")
k = Wild("k", exclude=[x])
x0 = match.get('x0')
terms = match.get('terms', 5)
p = match[match['a3']]
q = match[match['b3']]
r = match[match['c3']]
seriesdict = {}
recurr = Function("r")
# Generating the recurrence relation which works this way:
# for the second order term the summation begins at n = 2. The coefficients
# p is multiplied with an*(n - 1)*(n - 2)*x**n-2 and a substitution is made such that
# the exponent of x becomes n.
# For example, if p is x, then the second degree recurrence term is
# an*(n - 1)*(n - 2)*x**n-1, substituting (n - 1) as n, it transforms to
# an+1*n*(n - 1)*x**n.
# A similar process is done with the first order and zeroth order term.
coefflist = [(recurr(n), r), (n*recurr(n), q), (n*(n - 1)*recurr(n), p)]
for index, coeff in enumerate(coefflist):
if coeff[1]:
f2 = powsimp(expand((coeff[1]*(x - x0)**(n - index)).subs(x, x + x0)))
if f2.is_Add:
addargs = f2.args
else:
addargs = [f2]
for arg in addargs:
powm = arg.match(s*x**k)
term = coeff[0]*powm[s]
if not powm[k].is_Symbol:
term = term.subs(n, n - powm[k].as_independent(n)[0])
startind = powm[k].subs(n, index)
# Seeing if the startterm can be reduced further.
# If it vanishes for n lesser than startind, it is
# equal to summation from n.
if startind:
for i in reversed(range(startind)):
if not term.subs(n, i):
seriesdict[term] = i
else:
seriesdict[term] = i + 1
break
else:
seriesdict[term] = S(0)
# Stripping of terms so that the sum starts with the same number.
teq = S(0)
suminit = seriesdict.values()
rkeys = seriesdict.keys()
req = Add(*rkeys)
if any(suminit):
maxval = max(suminit)
for term in seriesdict:
val = seriesdict[term]
if val != maxval:
for i in range(val, maxval):
teq += term.subs(n, val)
finaldict = {}
if teq:
fargs = teq.atoms(AppliedUndef)
if len(fargs) == 1:
finaldict[fargs.pop()] = 0
else:
maxf = max(fargs, key = lambda x: x.args[0])
sol = solve(teq, maxf)
if isinstance(sol, list):
sol = sol[0]
finaldict[maxf] = sol
# Finding the recurrence relation in terms of the largest term.
fargs = req.atoms(AppliedUndef)
maxf = max(fargs, key = lambda x: x.args[0])
minf = min(fargs, key = lambda x: x.args[0])
if minf.args[0].is_Symbol:
startiter = 0
else:
startiter = -minf.args[0].as_independent(n)[0]
lhs = maxf
rhs = solve(req, maxf)
if isinstance(rhs, list):
rhs = rhs[0]
# Checking how many values are already present
tcounter = len([t for t in finaldict.values() if t])
for _ in range(tcounter, terms - 3): # Assuming c0 and c1 to be arbitrary
check = rhs.subs(n, startiter)
nlhs = lhs.subs(n, startiter)
nrhs = check.subs(finaldict)
finaldict[nlhs] = nrhs
startiter += 1
# Post processing
series = C0 + C1*(x - x0)
for term in finaldict:
if finaldict[term]:
fact = term.args[0]
series += (finaldict[term].subs([(recurr(0), C0), (recurr(1), C1)])*(
x - x0)**fact)
series = collect(expand_mul(series), [C0, C1]) + Order(x**terms)
return Eq(f(x), series)
def ode_2nd_power_series_regular(eq, func, order, match):
r"""
Gives a power series solution to a second order homogeneous differential
equation with polynomial coefficients at a regular point. A second order
homogeneous differential equation is of the form
.. math :: P(x)\frac{d^2y}{dx^2} + Q(x)\frac{dy}{dx} + R(x) = 0
A point is said to regular singular at `x0` if `x - x0\frac{Q(x)}{P(x)}`
and `(x - x0)^{2}\frac{R(x)}{P(x)}` are analytic at `x0`. For simplicity
`P(x)`, `Q(x)` and `R(x)` are assumed to be polynomials. The algorithm for
finding the power series solutions is:
1. Try expressing `(x - x0)P(x)` and `((x - x0)^{2})Q(x)` as power series
solutions about x0. Find `p0` and `q0` which are the constants of the
power series expansions.
2. Solve the indicial equation `f(m) = m(m - 1) + m*p0 + q0`, to obtain the
roots `m1` and `m2` of the indicial equation.
3. If `m1 - m2` is a non integer there exists two series solutions. If
`m1 = m2`, there exists only one solution. If `m1 - m2` is an integer,
then the existence of one solution is confirmed. The other solution may
or may not exist.
The power series solution is of the form `x^{m}\sum_{n=0}^\infty a_{n}x^{n}`. The
coefficients are determined by the following recurrence relation.
`a_{n} = -\frac{\sum_{k=0}^{n-1} q_{n-k} + (m + k)p_{n-k}}{f(m + n)}`. For the case
in which `m1 - m2` is an integer, it can be seen from the recurrence relation
that for the lower root `m`, when `n` equals the difference of both the
roots, the denominator becomes zero. So if the numerator is not equal to zero,
a second series solution exists.
Examples
========
>>> from sympy import dsolve, Function, pprint
>>> from sympy.abc import x, y
>>> f = Function("f")
>>> eq = x*(f(x).diff(x, 2)) + 2*(f(x).diff(x)) + x*f(x)
>>> pprint(dsolve(eq))
/ 6 4 2 \
| x x x |
/ 4 2 \ C1*|- --- + -- - -- + 1|
| x x | \ 720 24 2 / / 6\
f(x) = C2*|--- - -- + 1| + ------------------------ + O\x /
\120 6 / x
References
==========
- George E. Simmons, "Differential Equations with Applications and
Historical Notes", p.p 176 - 184
"""
x = func.args[0]
f = func.func
C0, C1 = get_numbered_constants(eq, num=2)
m = Dummy("m") # for solving the indicial equation
x0 = match.get('x0')
terms = match.get('terms', 5)
p = match['p']
q = match['q']
# Generating the indicial equation
indicial = []
for term in [p, q]:
if not term.has(x):
indicial.append(term)
else:
term = series(term, n=1, x0=x0)
if isinstance(term, Order):
indicial.append(S(0))
else:
for arg in term.args:
if not arg.has(x):
indicial.append(arg)
break
p0, q0 = indicial
sollist = solve(m*(m - 1) + m*p0 + q0, m)
if sollist and isinstance(sollist, list) and all(
[sol.is_real for sol in sollist]):
serdict1 = {}
serdict2 = {}
if len(sollist) == 1:
# Only one series solution exists in this case.
m1 = m2 = sollist.pop()
if terms-m1-1 <= 0:
return Eq(f(x), Order(terms))
serdict1 = _frobenius(terms-m1-1, m1, p0, q0, p, q, x0, x, C0)
else:
m1 = sollist[0]
m2 = sollist[1]
if m1 < m2:
m1, m2 = m2, m1
# Irrespective of whether m1 - m2 is an integer or not, one
# Frobenius series solution exists.
serdict1 = _frobenius(terms-m1-1, m1, p0, q0, p, q, x0, x, C0)
if not (m1 - m2).is_integer:
# Second frobenius series solution exists.
serdict2 = _frobenius(terms-m2-1, m2, p0, q0, p, q, x0, x, C1)
else:
# Check if second frobenius series solution exists.
serdict2 = _frobenius(terms-m2-1, m2, p0, q0, p, q, x0, x, C1, check=m1)
if serdict1:
finalseries1 = C0
for key in serdict1:
power = int(key.name[1:])
finalseries1 += serdict1[key]*(x - x0)**power
finalseries1 = (x - x0)**m1*finalseries1
finalseries2 = S(0)
if serdict2:
for key in serdict2:
power = int(key.name[1:])
finalseries2 += serdict2[key]*(x - x0)**power
finalseries2 += C1
finalseries2 = (x - x0)**m2*finalseries2
return Eq(f(x), collect(finalseries1 + finalseries2,
[C0, C1]) + Order(x**terms))
def _frobenius(n, m, p0, q0, p, q, x0, x, c, check=None):
r"""
Returns a dict with keys as coefficients and values as their values in terms of C0
"""
n = int(n)
# In cases where m1 - m2 is not an integer
m2 = check
d = Dummy("d")
numsyms = numbered_symbols("C", start=0)
numsyms = [next(numsyms) for i in range(n + 1)]
serlist = []
for ser in [p, q]:
# Order term not present
if ser.is_polynomial(x) and Poly(ser, x).degree() <= n:
if x0:
ser = ser.subs(x, x + x0)
dict_ = Poly(ser, x).as_dict()
# Order term present
else:
tseries = series(ser, x=x0, n=n+1)
# Removing order
dict_ = Poly(list(ordered(tseries.args))[: -1], x).as_dict()
# Fill in with zeros, if coefficients are zero.
for i in range(n + 1):
if (i,) not in dict_:
dict_[(i,)] = S(0)
serlist.append(dict_)
pseries = serlist[0]
qseries = serlist[1]
indicial = d*(d - 1) + d*p0 + q0
frobdict = {}
for i in range(1, n + 1):
num = c*(m*pseries[(i,)] + qseries[(i,)])
for j in range(1, i):
sym = Symbol("C" + str(j))
num += frobdict[sym]*((m + j)*pseries[(i - j,)] + qseries[(i - j,)])
# Checking for cases when m1 - m2 is an integer. If num equals zero
# then a second Frobenius series solution cannot be found. If num is not zero
# then set constant as zero and proceed.
if m2 is not None and i == m2 - m:
if num:
return False
else:
frobdict[numsyms[i]] = S(0)
else:
frobdict[numsyms[i]] = -num/(indicial.subs(d, m+i))
return frobdict
def _nth_order_reducible_match(eq, func):
r"""
Matches any differential equation that can be rewritten with a smaller
order. Only derivatives of ``func`` alone, wrt a single variable,
are considered, and only in them should ``func`` appear.
"""
# ODE only handles functions of 1 variable so this affirms that state
assert len(func.args) == 1
x = func.args[0]
vc = [d.variable_count[0] for d in eq.atoms(Derivative)
if d.expr == func and len(d.variable_count) == 1]
ords = [c for v, c in vc if v == x]
if len(ords) < 2:
return
smallest = min(ords)
# make sure func does not appear outside of derivatives
D = Dummy()
if eq.subs(func.diff(x, smallest), D).has(func):
return
return {'n': smallest}
def ode_nth_order_reducible(eq, func, order, match):
r"""
Solves ODEs that only involve derivatives of the dependent variable using
a substitution of the form `f^n(x) = g(x)`.
For example any second order ODE of the form `f''(x) = h(f'(x), x)` can be
transformed into a pair of 1st order ODEs `g'(x) = h(g(x), x)` and
`f'(x) = g(x)`. Usually the 1st order ODE for `g` is easier to solve. If
that gives an explicit solution for `g` then `f` is found simply by
integration.
Examples
========
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = Eq(x*f(x).diff(x)**2 + f(x).diff(x, 2), 0)
>>> dsolve(eq, f(x), hint='nth_order_reducible')
... # doctest: +NORMALIZE_WHITESPACE
Eq(f(x), C1 - sqrt(-1/C2)*log(-C2*sqrt(-1/C2) + x) + sqrt(-1/C2)*log(C2*sqrt(-1/C2) + x))
"""
x = func.args[0]
f = func.func
n = match['n']
# get a unique function name for g
names = [a.name for a in eq.atoms(AppliedUndef)]
while True:
name = Dummy().name
if name not in names:
g = Function(name)
break
w = f(x).diff(x, n)
geq = eq.subs(w, g(x))
gsol = dsolve(geq, g(x))
if not isinstance(gsol, list):
gsol = [gsol]
# Might be multiple solutions to the reduced ODE:
fsol = []
for gsoli in gsol:
fsoli = dsolve(gsoli.subs(g(x), w), f(x)) # or do integration n times
fsol.append(fsoli)
if len(fsol) == 1:
fsol = fsol[0]
return fsol
# This needs to produce an invertible function but the inverse depends
# which variable we are integrating with respect to. Since the class can
# be stored in cached results we need to ensure that we always get the
# same class back for each particular integration variable so we store these
# classes in a global dict:
_nth_algebraic_diffx_stored = {}
def _nth_algebraic_diffx(var):
cls = _nth_algebraic_diffx_stored.get(var, None)
if cls is None:
# A class that behaves like Derivative wrt var but is "invertible".
class diffx(Function):
def inverse(self):
# don't use integrate here because fx has been replaced by _t
# in the equation; integrals will not be correct while solve
# is at work.
return lambda expr: Integral(expr, var) + Dummy('C')
cls = _nth_algebraic_diffx_stored.setdefault(var, diffx)
return cls
def _nth_algebraic_match(eq, func):
r"""
Matches any differential equation that nth_algebraic can solve. Uses
`sympy.solve` but teaches it how to integrate derivatives.
This involves calling `sympy.solve` and does most of the work of finding a
solution (apart from evaluating the integrals).
"""
# The independent variable
var = func.args[0]
# Derivative that solve can handle:
diffx = _nth_algebraic_diffx(var)
# Replace derivatives wrt the independent variable with diffx
def replace(eq, var):
def expand_diffx(*args):
differand, diffs = args[0], args[1:]
toreplace = differand
for v, n in diffs:
for _ in range(n):
if v == var:
toreplace = diffx(toreplace)
else:
toreplace = Derivative(toreplace, v)
return toreplace
return eq.replace(Derivative, expand_diffx)
# Restore derivatives in solution afterwards
def unreplace(eq, var):
return eq.replace(diffx, lambda e: Derivative(e, var))
subs_eqn = replace(eq, var)
try:
# turn off simplification to protect Integrals that have
# _t instead of fx in them and would otherwise factor
# as t_*Integral(1, x)
solns = solve(subs_eqn, func, simplify=False)
except NotImplementedError:
solns = []
solns = [simplify(unreplace(soln, var)) for soln in solns]
solns = [Equality(func, soln) for soln in solns]
return {'var':var, 'solutions':solns}
def ode_nth_algebraic(eq, func, order, match):
r"""
Solves an `n`\th order ordinary differential equation using algebra and
integrals.
There is no general form for the kind of equation that this can solve. The
the equation is solved algebraically treating differentiation as an
invertible algebraic function.
Examples
========
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = Eq(f(x) * (f(x).diff(x)**2 - 1), 0)
>>> dsolve(eq, f(x), hint='nth_algebraic')
... # doctest: +NORMALIZE_WHITESPACE
[Eq(f(x), 0), Eq(f(x), C1 - x), Eq(f(x), C1 + x)]
Note that this solver can return algebraic solutions that do not have any
integration constants (f(x) = 0 in the above example).
# indirect doctest
"""
solns = match['solutions']
var = match['var']
solns = _nth_algebraic_remove_redundant_solutions(eq, solns, order, var)
if len(solns) == 1:
return solns[0]
else:
return solns
# FIXME: Maybe something like this function should be applied to the solutions
# returned by dsolve in general rather than just for nth_algebraic...
def _nth_algebraic_remove_redundant_solutions(eq, solns, order, var):
r"""
Remove redundant solutions from the set of solutions returned by
nth_algebraic.
This function is needed because otherwise nth_algebraic can return
redundant solutions where both algebraic solutions and integral
solutions are found to the ODE. As an example consider:
eq = Eq(f(x) * f(x).diff(x), 0)
There are two ways to find solutions to eq. The first is the algebraic
solution f(x)=0. The second is to solve the equation f(x).diff(x) = 0
leading to the solution f(x) = C1. In this particular case we then see
that the first solution is a special case of the second and we don't
want to return it.
This does not always happen for algebraic solutions though since if we
have
eq = Eq(f(x)*(1 + f(x).diff(x)), 0)
then we get the algebraic solution f(x) = 0 and the integral solution
f(x) = -x + C1 and in this case the two solutions are not equivalent wrt
initial conditions so both should be returned.
"""
def is_special_case_of(soln1, soln2):
return _nth_algebraic_is_special_case_of(soln1, soln2, eq, order, var)
unique_solns = []
for soln1 in solns:
for soln2 in unique_solns[:]:
if is_special_case_of(soln1, soln2):
break
elif is_special_case_of(soln2, soln1):
unique_solns.remove(soln2)
else:
unique_solns.append(soln1)
return unique_solns
def _nth_algebraic_is_special_case_of(soln1, soln2, eq, order, var):
r"""
True if soln1 is found to be a special case of soln2 wrt some value of the
constants that appear in soln2. False otherwise.
"""
# The solutions returned by nth_algebraic should be given explicitly as in
# Eq(f(x), expr). We will equate the RHSs of the two solutions giving an
# equation f1(x) = f2(x).
#
# Since this is supposed to hold for all x it also holds for derivatives
# f1'(x) and f2'(x). For an order n ode we should be able to differentiate
# each solution n times to get n+1 equations.
#
# We then try to solve those n+1 equations for the integrations constants
# in f2(x). If we can find a solution that doesn't depend on x then it
# means that some value of the constants in f1(x) is a special case of
# f2(x) corresponding to a paritcular choice of the integration constants.
constants1 = soln1.free_symbols.difference(eq.free_symbols)
constants2 = soln2.free_symbols.difference(eq.free_symbols)
constants1_new = get_numbered_constants(soln1.rhs - soln2.rhs, len(constants1))
if len(constants1) == 1:
constants1_new = {constants1_new}
for c_old, c_new in zip(constants1, constants1_new):
soln1 = soln1.subs(c_old, c_new)
# n equations for f1(x)=f2(x), f1'(x)=f2'(x), ...
lhs = soln1.rhs.doit()
rhs = soln2.rhs.doit()
eqns = [Eq(lhs, rhs)]
for n in range(1, order):
lhs = lhs.diff(var)
rhs = rhs.diff(var)
eq = Eq(lhs, rhs)
eqns.append(eq)
# BooleanTrue/False awkwardly show up for trivial equations
if any(isinstance(eq, BooleanFalse) for eq in eqns):
return False
eqns = [eq for eq in eqns if not isinstance(eq, BooleanTrue)]
constant_solns = solve(eqns, constants2)
# Sometimes returns a dict and sometimes a list of dicts
if isinstance(constant_solns, dict):
constant_solns = [constant_solns]
# If any solution gives all constants as expressions that don't depend on
# x then there exists constants for soln2 that give soln1
for constant_soln in constant_solns:
if not any(c.has(var) for c in constant_soln.values()):
return True
return False
def _nth_linear_match(eq, func, order):
r"""
Matches a differential equation to the linear form:
.. math:: a_n(x) y^{(n)} + \cdots + a_1(x)y' + a_0(x) y + B(x) = 0
Returns a dict of order:coeff terms, where order is the order of the
derivative on each term, and coeff is the coefficient of that derivative.
The key ``-1`` holds the function `B(x)`. Returns ``None`` if the ODE is
not linear. This function assumes that ``func`` has already been checked
to be good.
Examples
========
>>> from sympy import Function, cos, sin
>>> from sympy.abc import x
>>> from sympy.solvers.ode import _nth_linear_match
>>> f = Function('f')
>>> _nth_linear_match(f(x).diff(x, 3) + 2*f(x).diff(x) +
... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) -
... sin(x), f(x), 3)
{-1: x - sin(x), 0: -1, 1: cos(x) + 2, 2: x, 3: 1}
>>> _nth_linear_match(f(x).diff(x, 3) + 2*f(x).diff(x) +
... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) -
... sin(f(x)), f(x), 3) == None
True
"""
x = func.args[0]
one_x = {x}
terms = {i: S.Zero for i in range(-1, order + 1)}
for i in Add.make_args(eq):
if not i.has(func):
terms[-1] += i
else:
c, f = i.as_independent(func)
if (isinstance(f, Derivative)
and set(f.variables) == one_x
and f.args[0] == func):
terms[f.derivative_count] += c
elif f == func:
terms[len(f.args[1:])] += c
else:
return None
return terms
def ode_nth_linear_euler_eq_homogeneous(eq, func, order, match, returns='sol'):
r"""
Solves an `n`\th order linear homogeneous variable-coefficient
Cauchy-Euler equidimensional ordinary differential equation.
This is an equation with form `0 = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x)
\cdots`.
These equations can be solved in a general manner, by substituting
solutions of the form `f(x) = x^r`, and deriving a characteristic equation
for `r`. When there are repeated roots, we include extra terms of the
form `C_{r k} \ln^k(x) x^r`, where `C_{r k}` is an arbitrary integration
constant, `r` is a root of the characteristic equation, and `k` ranges
over the multiplicity of `r`. In the cases where the roots are complex,
solutions of the form `C_1 x^a \sin(b \log(x)) + C_2 x^a \cos(b \log(x))`
are returned, based on expansions with Euler's formula. The general
solution is the sum of the terms found. If SymPy cannot find exact roots
to the characteristic equation, a
:py:class:`~sympy.polys.rootoftools.CRootOf` instance will be returned
instead.
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(4*x**2*f(x).diff(x, 2) + f(x), f(x),
... hint='nth_linear_euler_eq_homogeneous')
... # doctest: +NORMALIZE_WHITESPACE
Eq(f(x), sqrt(x)*(C1 + C2*log(x)))
Note that because this method does not involve integration, there is no
``nth_linear_euler_eq_homogeneous_Integral`` hint.
The following is for internal use:
- ``returns = 'sol'`` returns the solution to the ODE.
- ``returns = 'list'`` returns a list of linearly independent solutions,
corresponding to the fundamental solution set, for use with non
homogeneous solution methods like variation of parameters and
undetermined coefficients. Note that, though the solutions should be
linearly independent, this function does not explicitly check that. You
can do ``assert simplify(wronskian(sollist)) != 0`` to check for linear
independence. Also, ``assert len(sollist) == order`` will need to pass.
- ``returns = 'both'``, return a dictionary ``{'sol': <solution to ODE>,
'list': <list of linearly independent solutions>}``.
Examples
========
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = f(x).diff(x, 2)*x**2 - 4*f(x).diff(x)*x + 6*f(x)
>>> pprint(dsolve(eq, f(x),
... hint='nth_linear_euler_eq_homogeneous'))
2
f(x) = x *(C1 + C2*x)
References
==========
- https://en.wikipedia.org/wiki/Cauchy%E2%80%93Euler_equation
- C. Bender & S. Orszag, "Advanced Mathematical Methods for Scientists and
Engineers", Springer 1999, pp. 12
# indirect doctest
"""
global collectterms
collectterms = []
x = func.args[0]
f = func.func
r = match
# First, set up characteristic equation.
chareq, symbol = S.Zero, Dummy('x')
for i in r.keys():
if not isinstance(i, string_types) and i >= 0:
chareq += (r[i]*diff(x**symbol, x, i)*x**-symbol).expand()
chareq = Poly(chareq, symbol)
chareqroots = [rootof(chareq, k) for k in range(chareq.degree())]
# A generator of constants
constants = list(get_numbered_constants(eq, num=chareq.degree()*2))
constants.reverse()
# Create a dict root: multiplicity or charroots
charroots = defaultdict(int)
for root in chareqroots:
charroots[root] += 1
gsol = S(0)
# We need keep track of terms so we can run collect() at the end.
# This is necessary for constantsimp to work properly.
ln = log
for root, multiplicity in charroots.items():
for i in range(multiplicity):
if isinstance(root, RootOf):
gsol += (x**root) * constants.pop()
if multiplicity != 1:
raise ValueError("Value should be 1")
collectterms = [(0, root, 0)] + collectterms
elif root.is_real:
gsol += ln(x)**i*(x**root) * constants.pop()
collectterms = [(i, root, 0)] + collectterms
else:
reroot = re(root)
imroot = im(root)
gsol += ln(x)**i * (x**reroot) * (
constants.pop() * sin(abs(imroot)*ln(x))
+ constants.pop() * cos(imroot*ln(x)))
# Preserve ordering (multiplicity, real part, imaginary part)
# It will be assumed implicitly when constructing
# fundamental solution sets.
collectterms = [(i, reroot, imroot)] + collectterms
if returns == 'sol':
return Eq(f(x), gsol)
elif returns in ('list' 'both'):
# HOW TO TEST THIS CODE? (dsolve does not pass 'returns' through)
# Create a list of (hopefully) linearly independent solutions
gensols = []
# Keep track of when to use sin or cos for nonzero imroot
for i, reroot, imroot in collectterms:
if imroot == 0:
gensols.append(ln(x)**i*x**reroot)
else:
sin_form = ln(x)**i*x**reroot*sin(abs(imroot)*ln(x))
if sin_form in gensols:
cos_form = ln(x)**i*x**reroot*cos(imroot*ln(x))
gensols.append(cos_form)
else:
gensols.append(sin_form)
if returns == 'list':
return gensols
else:
return {'sol': Eq(f(x), gsol), 'list': gensols}
else:
raise ValueError('Unknown value for key "returns".')
def ode_nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients(eq, func, order, match, returns='sol'):
r"""
Solves an `n`\th order linear non homogeneous Cauchy-Euler equidimensional
ordinary differential equation using undetermined coefficients.
This is an equation with form `g(x) = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x)
\cdots`.
These equations can be solved in a general manner, by substituting
solutions of the form `x = exp(t)`, and deriving a characteristic equation
of form `g(exp(t)) = b_0 f(t) + b_1 f'(t) + b_2 f''(t) \cdots` which can
be then solved by nth_linear_constant_coeff_undetermined_coefficients if
g(exp(t)) has finite number of linearly independent derivatives.
Functions that fit this requirement are finite sums functions of the form
`a x^i e^{b x} \sin(c x + d)` or `a x^i e^{b x} \cos(c x + d)`, where `i`
is a non-negative integer and `a`, `b`, `c`, and `d` are constants. For
example any polynomial in `x`, functions like `x^2 e^{2 x}`, `x \sin(x)`,
and `e^x \cos(x)` can all be used. Products of `\sin`'s and `\cos`'s have
a finite number of derivatives, because they can be expanded into `\sin(a
x)` and `\cos(b x)` terms. However, SymPy currently cannot do that
expansion, so you will need to manually rewrite the expression in terms of
the above to use this method. So, for example, you will need to manually
convert `\sin^2(x)` into `(1 + \cos(2 x))/2` to properly apply the method
of undetermined coefficients on it.
After replacement of x by exp(t), this method works by creating a trial function
from the expression and all of its linear independent derivatives and
substituting them into the original ODE. The coefficients for each term
will be a system of linear equations, which are be solved for and
substituted, giving the solution. If any of the trial functions are linearly
dependent on the solution to the homogeneous equation, they are multiplied
by sufficient `x` to make them linearly independent.
Examples
========
>>> from sympy import dsolve, Function, Derivative, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = x**2*Derivative(f(x), x, x) - 2*x*Derivative(f(x), x) + 2*f(x) - log(x)
>>> dsolve(eq, f(x),
... hint='nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients').expand()
Eq(f(x), C1*x + C2*x**2 + log(x)/2 + 3/4)
"""
x = func.args[0]
f = func.func
r = match
chareq, eq, symbol = S.Zero, S.Zero, Dummy('x')
for i in r.keys():
if not isinstance(i, string_types) and i >= 0:
chareq += (r[i]*diff(x**symbol, x, i)*x**-symbol).expand()
for i in range(1,degree(Poly(chareq, symbol))+1):
eq += chareq.coeff(symbol**i)*diff(f(x), x, i)
if chareq.as_coeff_add(symbol)[0]:
eq += chareq.as_coeff_add(symbol)[0]*f(x)
e, re = posify(r[-1].subs(x, exp(x)))
eq += e.subs(re)
match = _nth_linear_match(eq, f(x), ode_order(eq, f(x)))
match['trialset'] = r['trialset']
return ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match).subs(x, log(x)).subs(f(log(x)), f(x)).expand()
def ode_nth_linear_euler_eq_nonhomogeneous_variation_of_parameters(eq, func, order, match, returns='sol'):
r"""
Solves an `n`\th order linear non homogeneous Cauchy-Euler equidimensional
ordinary differential equation using variation of parameters.
This is an equation with form `g(x) = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x)
\cdots`.
This method works by assuming that the particular solution takes the form
.. math:: \sum_{x=1}^{n} c_i(x) y_i(x) {a_n} {x^n} \text{,}
where `y_i` is the `i`\th solution to the homogeneous equation. The
solution is then solved using Wronskian's and Cramer's Rule. The
particular solution is given by multiplying eq given below with `a_n x^{n}`
.. math:: \sum_{x=1}^n \left( \int \frac{W_i(x)}{W(x)} \,dx
\right) y_i(x) \text{,}
where `W(x)` is the Wronskian of the fundamental system (the system of `n`
linearly independent solutions to the homogeneous equation), and `W_i(x)`
is the Wronskian of the fundamental system with the `i`\th column replaced
with `[0, 0, \cdots, 0, \frac{x^{- n}}{a_n} g{\left(x \right)}]`.
This method is general enough to solve any `n`\th order inhomogeneous
linear differential equation, but sometimes SymPy cannot simplify the
Wronskian well enough to integrate it. If this method hangs, try using the
``nth_linear_constant_coeff_variation_of_parameters_Integral`` hint and
simplifying the integrals manually. Also, prefer using
``nth_linear_constant_coeff_undetermined_coefficients`` when it
applies, because it doesn't use integration, making it faster and more
reliable.
Warning, using simplify=False with
'nth_linear_constant_coeff_variation_of_parameters' in
:py:meth:`~sympy.solvers.ode.dsolve` may cause it to hang, because it will
not attempt to simplify the Wronskian before integrating. It is
recommended that you only use simplify=False with
'nth_linear_constant_coeff_variation_of_parameters_Integral' for this
method, especially if the solution to the homogeneous equation has
trigonometric functions in it.
Examples
========
>>> from sympy import Function, dsolve, Derivative
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = x**2*Derivative(f(x), x, x) - 2*x*Derivative(f(x), x) + 2*f(x) - x**4
>>> dsolve(eq, f(x),
... hint='nth_linear_euler_eq_nonhomogeneous_variation_of_parameters').expand()
Eq(f(x), C1*x + C2*x**2 + x**4/6)
"""
x = func.args[0]
f = func.func
r = match
gensol = ode_nth_linear_euler_eq_homogeneous(eq, func, order, match, returns='both')
match.update(gensol)
r[-1] = r[-1]/r[ode_order(eq, f(x))]
sol = _solve_variation_of_parameters(eq, func, order, match)
return Eq(f(x), r['sol'].rhs + (sol.rhs - r['sol'].rhs)*r[ode_order(eq, f(x))])
def ode_almost_linear(eq, func, order, match):
r"""
Solves an almost-linear differential equation.
The general form of an almost linear differential equation is
.. math:: f(x) g(y) y + k(x) l(y) + m(x) = 0
\text{where} l'(y) = g(y)\text{.}
This can be solved by substituting `l(y) = u(y)`. Making the given
substitution reduces it to a linear differential equation of the form `u'
+ P(x) u + Q(x) = 0`.
The general solution is
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x, y, n
>>> f, g, k, l = map(Function, ['f', 'g', 'k', 'l'])
>>> genform = Eq(f(x)*(l(y).diff(y)) + k(x)*l(y) + g(x), 0)
>>> pprint(genform)
d
f(x)*--(l(y)) + g(x) + k(x)*l(y) = 0
dy
>>> pprint(dsolve(genform, hint = 'almost_linear'))
/ // y*k(x) \\
| || ------ ||
| || f(x) || -y*k(x)
| ||-g(x)*e || --------
| ||-------------- for k(x) != 0|| f(x)
l(y) = |C1 + |< k(x) ||*e
| || ||
| || -y*g(x) ||
| || -------- otherwise ||
| || f(x) ||
\ \\ //
See Also
========
:meth:`sympy.solvers.ode.ode_1st_linear`
Examples
========
>>> from sympy import Function, Derivative, pprint
>>> from sympy.solvers.ode import dsolve, classify_ode
>>> from sympy.abc import x
>>> f = Function('f')
>>> d = f(x).diff(x)
>>> eq = x*d + x*f(x) + 1
>>> dsolve(eq, f(x), hint='almost_linear')
Eq(f(x), (C1 - Ei(x))*exp(-x))
>>> pprint(dsolve(eq, f(x), hint='almost_linear'))
-x
f(x) = (C1 - Ei(x))*e
References
==========
- Joel Moses, "Symbolic Integration - The Stormy Decade", Communications
of the ACM, Volume 14, Number 8, August 1971, pp. 558
"""
# Since ode_1st_linear has already been implemented, and the
# coefficients have been modified to the required form in
# classify_ode, just passing eq, func, order and match to
# ode_1st_linear will give the required output.
return ode_1st_linear(eq, func, order, match)
def _linear_coeff_match(expr, func):
r"""
Helper function to match hint ``linear_coefficients``.
Matches the expression to the form `(a_1 x + b_1 f(x) + c_1)/(a_2 x + b_2
f(x) + c_2)` where the following conditions hold:
1. `a_1`, `b_1`, `c_1`, `a_2`, `b_2`, `c_2` are Rationals;
2. `c_1` or `c_2` are not equal to zero;
3. `a_2 b_1 - a_1 b_2` is not equal to zero.
Return ``xarg``, ``yarg`` where
1. ``xarg`` = `(b_2 c_1 - b_1 c_2)/(a_2 b_1 - a_1 b_2)`
2. ``yarg`` = `(a_1 c_2 - a_2 c_1)/(a_2 b_1 - a_1 b_2)`
Examples
========
>>> from sympy import Function
>>> from sympy.abc import x
>>> from sympy.solvers.ode import _linear_coeff_match
>>> from sympy.functions.elementary.trigonometric import sin
>>> f = Function('f')
>>> _linear_coeff_match((
... (-25*f(x) - 8*x + 62)/(4*f(x) + 11*x - 11)), f(x))
(1/9, 22/9)
>>> _linear_coeff_match(
... sin((-5*f(x) - 8*x + 6)/(4*f(x) + x - 1)), f(x))
(19/27, 2/27)
>>> _linear_coeff_match(sin(f(x)/x), f(x))
"""
f = func.func
x = func.args[0]
def abc(eq):
r'''
Internal function of _linear_coeff_match
that returns Rationals a, b, c
if eq is a*x + b*f(x) + c, else None.
'''
eq = _mexpand(eq)
c = eq.as_independent(x, f(x), as_Add=True)[0]
if not c.is_Rational:
return
a = eq.coeff(x)
if not a.is_Rational:
return
b = eq.coeff(f(x))
if not b.is_Rational:
return
if eq == a*x + b*f(x) + c:
return a, b, c
def match(arg):
r'''
Internal function of _linear_coeff_match that returns Rationals a1,
b1, c1, a2, b2, c2 and a2*b1 - a1*b2 of the expression (a1*x + b1*f(x)
+ c1)/(a2*x + b2*f(x) + c2) if one of c1 or c2 and a2*b1 - a1*b2 is
non-zero, else None.
'''
n, d = arg.together().as_numer_denom()
m = abc(n)
if m is not None:
a1, b1, c1 = m
m = abc(d)
if m is not None:
a2, b2, c2 = m
d = a2*b1 - a1*b2
if (c1 or c2) and d:
return a1, b1, c1, a2, b2, c2, d
m = [fi.args[0] for fi in expr.atoms(Function) if fi.func != f and
len(fi.args) == 1 and not fi.args[0].is_Function] or {expr}
m1 = match(m.pop())
if m1 and all(match(mi) == m1 for mi in m):
a1, b1, c1, a2, b2, c2, denom = m1
return (b2*c1 - b1*c2)/denom, (a1*c2 - a2*c1)/denom
def ode_linear_coefficients(eq, func, order, match):
r"""
Solves a differential equation with linear coefficients.
The general form of a differential equation with linear coefficients is
.. math:: y' + F\left(\!\frac{a_1 x + b_1 y + c_1}{a_2 x + b_2 y +
c_2}\!\right) = 0\text{,}
where `a_1`, `b_1`, `c_1`, `a_2`, `b_2`, `c_2` are constants and `a_1 b_2
- a_2 b_1 \ne 0`.
This can be solved by substituting:
.. math:: x = x' + \frac{b_2 c_1 - b_1 c_2}{a_2 b_1 - a_1 b_2}
y = y' + \frac{a_1 c_2 - a_2 c_1}{a_2 b_1 - a_1
b_2}\text{.}
This substitution reduces the equation to a homogeneous differential
equation.
See Also
========
:meth:`sympy.solvers.ode.ode_1st_homogeneous_coeff_best`
:meth:`sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep`
:meth:`sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep`
Examples
========
>>> from sympy import Function, Derivative, pprint
>>> from sympy.solvers.ode import dsolve, classify_ode
>>> from sympy.abc import x
>>> f = Function('f')
>>> df = f(x).diff(x)
>>> eq = (x + f(x) + 1)*df + (f(x) - 6*x + 1)
>>> dsolve(eq, hint='linear_coefficients')
[Eq(f(x), -x - sqrt(C1 + 7*x**2) - 1), Eq(f(x), -x + sqrt(C1 + 7*x**2) - 1)]
>>> pprint(dsolve(eq, hint='linear_coefficients'))
___________ ___________
/ 2 / 2
[f(x) = -x - \/ C1 + 7*x - 1, f(x) = -x + \/ C1 + 7*x - 1]
References
==========
- Joel Moses, "Symbolic Integration - The Stormy Decade", Communications
of the ACM, Volume 14, Number 8, August 1971, pp. 558
"""
return ode_1st_homogeneous_coeff_best(eq, func, order, match)
def ode_separable_reduced(eq, func, order, match):
r"""
Solves a differential equation that can be reduced to the separable form.
The general form of this equation is
.. math:: y' + (y/x) H(x^n y) = 0\text{}.
This can be solved by substituting `u(y) = x^n y`. The equation then
reduces to the separable form `\frac{u'}{u (\mathrm{power} - H(u))} -
\frac{1}{x} = 0`.
The general solution is:
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x, n
>>> f, g = map(Function, ['f', 'g'])
>>> genform = f(x).diff(x) + (f(x)/x)*g(x**n*f(x))
>>> pprint(genform)
/ n \
d f(x)*g\x *f(x)/
--(f(x)) + ---------------
dx x
>>> pprint(dsolve(genform, hint='separable_reduced'))
n
x *f(x)
/
|
| 1
| ------------ dy = C1 + log(x)
| y*(n - g(y))
|
/
See Also
========
:meth:`sympy.solvers.ode.ode_separable`
Examples
========
>>> from sympy import Function, Derivative, pprint
>>> from sympy.solvers.ode import dsolve, classify_ode
>>> from sympy.abc import x
>>> f = Function('f')
>>> d = f(x).diff(x)
>>> eq = (x - x**2*f(x))*d - f(x)
>>> dsolve(eq, hint='separable_reduced')
[Eq(f(x), (1 - sqrt(C1*x**2 + 1))/x), Eq(f(x), (sqrt(C1*x**2 + 1) + 1)/x)]
>>> pprint(dsolve(eq, hint='separable_reduced'))
___________ ___________
/ 2 / 2
1 - \/ C1*x + 1 \/ C1*x + 1 + 1
[f(x) = ------------------, f(x) = ------------------]
x x
References
==========
- Joel Moses, "Symbolic Integration - The Stormy Decade", Communications
of the ACM, Volume 14, Number 8, August 1971, pp. 558
"""
# Arguments are passed in a way so that they are coherent with the
# ode_separable function
x = func.args[0]
f = func.func
y = Dummy('y')
u = match['u'].subs(match['t'], y)
ycoeff = 1/(y*(match['power'] - u))
m1 = {y: 1, x: -1/x, 'coeff': 1}
m2 = {y: ycoeff, x: 1, 'coeff': 1}
r = {'m1': m1, 'm2': m2, 'y': y, 'hint': x**match['power']*f(x)}
return ode_separable(eq, func, order, r)
def ode_1st_power_series(eq, func, order, match):
r"""
The power series solution is a method which gives the Taylor series expansion
to the solution of a differential equation.
For a first order differential equation `\frac{dy}{dx} = h(x, y)`, a power
series solution exists at a point `x = x_{0}` if `h(x, y)` is analytic at `x_{0}`.
The solution is given by
.. math:: y(x) = y(x_{0}) + \sum_{n = 1}^{\infty} \frac{F_{n}(x_{0},b)(x - x_{0})^n}{n!},
where `y(x_{0}) = b` is the value of y at the initial value of `x_{0}`.
To compute the values of the `F_{n}(x_{0},b)` the following algorithm is
followed, until the required number of terms are generated.
1. `F_1 = h(x_{0}, b)`
2. `F_{n+1} = \frac{\partial F_{n}}{\partial x} + \frac{\partial F_{n}}{\partial y}F_{1}`
Examples
========
>>> from sympy import Function, Derivative, pprint, exp
>>> from sympy.solvers.ode import dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = exp(x)*(f(x).diff(x)) - f(x)
>>> pprint(dsolve(eq, hint='1st_power_series'))
3 4 5
C1*x C1*x C1*x / 6\
f(x) = C1 + C1*x - ----- + ----- + ----- + O\x /
6 24 60
References
==========
- Travis W. Walker, Analytic power series technique for solving first-order
differential equations, p.p 17, 18
"""
x = func.args[0]
y = match['y']
f = func.func
h = -match[match['d']]/match[match['e']]
point = match.get('f0')
value = match.get('f0val')
terms = match.get('terms')
# First term
F = h
if not h:
return Eq(f(x), value)
# Initialization
series = value
if terms > 1:
hc = h.subs({x: point, y: value})
if hc.has(oo) or hc.has(NaN) or hc.has(zoo):
# Derivative does not exist, not analytic
return Eq(f(x), oo)
elif hc:
series += hc*(x - point)
for factcount in range(2, terms):
Fnew = F.diff(x) + F.diff(y)*h
Fnewc = Fnew.subs({x: point, y: value})
# Same logic as above
if Fnewc.has(oo) or Fnewc.has(NaN) or Fnewc.has(-oo) or Fnewc.has(zoo):
return Eq(f(x), oo)
series += Fnewc*((x - point)**factcount)/factorial(factcount)
F = Fnew
series += Order(x**terms)
return Eq(f(x), series)
def ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='sol'):
r"""
Solves an `n`\th order linear homogeneous differential equation with
constant coefficients.
This is an equation of the form
.. math:: a_n f^{(n)}(x) + a_{n-1} f^{(n-1)}(x) + \cdots + a_1 f'(x)
+ a_0 f(x) = 0\text{.}
These equations can be solved in a general manner, by taking the roots of
the characteristic equation `a_n m^n + a_{n-1} m^{n-1} + \cdots + a_1 m +
a_0 = 0`. The solution will then be the sum of `C_n x^i e^{r x}` terms,
for each where `C_n` is an arbitrary constant, `r` is a root of the
characteristic equation and `i` is one of each from 0 to the multiplicity
of the root - 1 (for example, a root 3 of multiplicity 2 would create the
terms `C_1 e^{3 x} + C_2 x e^{3 x}`). The exponential is usually expanded
for complex roots using Euler's equation `e^{I x} = \cos(x) + I \sin(x)`.
Complex roots always come in conjugate pairs in polynomials with real
coefficients, so the two roots will be represented (after simplifying the
constants) as `e^{a x} \left(C_1 \cos(b x) + C_2 \sin(b x)\right)`.
If SymPy cannot find exact roots to the characteristic equation, a
:py:class:`~sympy.polys.rootoftools.CRootOf` instance will be return
instead.
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(f(x).diff(x, 5) + 10*f(x).diff(x) - 2*f(x), f(x),
... hint='nth_linear_constant_coeff_homogeneous')
... # doctest: +NORMALIZE_WHITESPACE
Eq(f(x), C5*exp(x*CRootOf(_x**5 + 10*_x - 2, 0))
+ (C1*sin(x*im(CRootOf(_x**5 + 10*_x - 2, 1)))
+ C2*cos(x*im(CRootOf(_x**5 + 10*_x - 2, 1))))*exp(x*re(CRootOf(_x**5 + 10*_x - 2, 1)))
+ (C3*sin(x*im(CRootOf(_x**5 + 10*_x - 2, 3)))
+ C4*cos(x*im(CRootOf(_x**5 + 10*_x - 2, 3))))*exp(x*re(CRootOf(_x**5 + 10*_x - 2, 3))))
Note that because this method does not involve integration, there is no
``nth_linear_constant_coeff_homogeneous_Integral`` hint.
The following is for internal use:
- ``returns = 'sol'`` returns the solution to the ODE.
- ``returns = 'list'`` returns a list of linearly independent solutions,
for use with non homogeneous solution methods like variation of
parameters and undetermined coefficients. Note that, though the
solutions should be linearly independent, this function does not
explicitly check that. You can do ``assert simplify(wronskian(sollist))
!= 0`` to check for linear independence. Also, ``assert len(sollist) ==
order`` will need to pass.
- ``returns = 'both'``, return a dictionary ``{'sol': <solution to ODE>,
'list': <list of linearly independent solutions>}``.
Examples
========
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 4) + 2*f(x).diff(x, 3) -
... 2*f(x).diff(x, 2) - 6*f(x).diff(x) + 5*f(x), f(x),
... hint='nth_linear_constant_coeff_homogeneous'))
x -2*x
f(x) = (C1 + C2*x)*e + (C3*sin(x) + C4*cos(x))*e
References
==========
- https://en.wikipedia.org/wiki/Linear_differential_equation section:
Nonhomogeneous_equation_with_constant_coefficients
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 211
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match
# First, set up characteristic equation.
chareq, symbol = S.Zero, Dummy('x')
for i in r.keys():
if type(i) == str or i < 0:
pass
else:
chareq += r[i]*symbol**i
chareq = Poly(chareq, symbol)
# Can't just call roots because it doesn't return rootof for unsolveable
# polynomials.
chareqroots = roots(chareq, multiple=True)
if len(chareqroots) != order:
chareqroots = [rootof(chareq, k) for k in range(chareq.degree())]
chareq_is_complex = not all([i.is_real for i in chareq.all_coeffs()])
# A generator of constants
constants = list(get_numbered_constants(eq, num=chareq.degree()*2))
# Create a dict root: multiplicity or charroots
charroots = defaultdict(int)
for root in chareqroots:
charroots[root] += 1
# We need to keep track of terms so we can run collect() at the end.
# This is necessary for constantsimp to work properly.
global collectterms
collectterms = []
gensols = []
conjugate_roots = [] # used to prevent double-use of conjugate roots
# Loop over roots in theorder provided by roots/rootof...
for root in chareqroots:
# but don't repoeat multiple roots.
if root not in charroots:
continue
multiplicity = charroots.pop(root)
for i in range(multiplicity):
if chareq_is_complex:
gensols.append(x**i*exp(root*x))
collectterms = [(i, root, 0)] + collectterms
continue
reroot = re(root)
imroot = im(root)
if imroot.has(atan2) and reroot.has(atan2):
# Remove this condition when re and im stop returning
# circular atan2 usages.
gensols.append(x**i*exp(root*x))
collectterms = [(i, root, 0)] + collectterms
else:
if root in conjugate_roots:
collectterms = [(i, reroot, imroot)] + collectterms
continue
if imroot == 0:
gensols.append(x**i*exp(reroot*x))
collectterms = [(i, reroot, 0)] + collectterms
continue
conjugate_roots.append(conjugate(root))
gensols.append(x**i*exp(reroot*x) * sin(abs(imroot) * x))
gensols.append(x**i*exp(reroot*x) * cos( imroot * x))
# This ordering is important
collectterms = [(i, reroot, imroot)] + collectterms
if returns == 'list':
return gensols
elif returns in ('sol' 'both'):
gsol = Add(*[i*j for (i, j) in zip(constants, gensols)])
if returns == 'sol':
return Eq(f(x), gsol)
else:
return {'sol': Eq(f(x), gsol), 'list': gensols}
else:
raise ValueError('Unknown value for key "returns".')
def ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match):
r"""
Solves an `n`\th order linear differential equation with constant
coefficients using the method of undetermined coefficients.
This method works on differential equations of the form
.. math:: a_n f^{(n)}(x) + a_{n-1} f^{(n-1)}(x) + \cdots + a_1 f'(x)
+ a_0 f(x) = P(x)\text{,}
where `P(x)` is a function that has a finite number of linearly
independent derivatives.
Functions that fit this requirement are finite sums functions of the form
`a x^i e^{b x} \sin(c x + d)` or `a x^i e^{b x} \cos(c x + d)`, where `i`
is a non-negative integer and `a`, `b`, `c`, and `d` are constants. For
example any polynomial in `x`, functions like `x^2 e^{2 x}`, `x \sin(x)`,
and `e^x \cos(x)` can all be used. Products of `\sin`'s and `\cos`'s have
a finite number of derivatives, because they can be expanded into `\sin(a
x)` and `\cos(b x)` terms. However, SymPy currently cannot do that
expansion, so you will need to manually rewrite the expression in terms of
the above to use this method. So, for example, you will need to manually
convert `\sin^2(x)` into `(1 + \cos(2 x))/2` to properly apply the method
of undetermined coefficients on it.
This method works by creating a trial function from the expression and all
of its linear independent derivatives and substituting them into the
original ODE. The coefficients for each term will be a system of linear
equations, which are be solved for and substituted, giving the solution.
If any of the trial functions are linearly dependent on the solution to
the homogeneous equation, they are multiplied by sufficient `x` to make
them linearly independent.
Examples
========
>>> from sympy import Function, dsolve, pprint, exp, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 2) + 2*f(x).diff(x) + f(x) -
... 4*exp(-x)*x**2 + cos(2*x), f(x),
... hint='nth_linear_constant_coeff_undetermined_coefficients'))
/ 4\
| x | -x 4*sin(2*x) 3*cos(2*x)
f(x) = |C1 + C2*x + --|*e - ---------- + ----------
\ 3 / 25 25
References
==========
- https://en.wikipedia.org/wiki/Method_of_undetermined_coefficients
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 221
# indirect doctest
"""
gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='both')
match.update(gensol)
return _solve_undetermined_coefficients(eq, func, order, match)
def _solve_undetermined_coefficients(eq, func, order, match):
r"""
Helper function for the method of undetermined coefficients.
See the
:py:meth:`~sympy.solvers.ode.ode_nth_linear_constant_coeff_undetermined_coefficients`
docstring for more information on this method.
The parameter ``match`` should be a dictionary that has the following
keys:
``list``
A list of solutions to the homogeneous equation, such as the list
returned by
``ode_nth_linear_constant_coeff_homogeneous(returns='list')``.
``sol``
The general solution, such as the solution returned by
``ode_nth_linear_constant_coeff_homogeneous(returns='sol')``.
``trialset``
The set of trial functions as returned by
``_undetermined_coefficients_match()['trialset']``.
"""
x = func.args[0]
f = func.func
r = match
coeffs = numbered_symbols('a', cls=Dummy)
coefflist = []
gensols = r['list']
gsol = r['sol']
trialset = r['trialset']
notneedset = set([])
global collectterms
if len(gensols) != order:
raise NotImplementedError("Cannot find " + str(order) +
" solutions to the homogeneous equation necessary to apply" +
" undetermined coefficients to " + str(eq) +
" (number of terms != order)")
usedsin = set([])
mult = 0 # The multiplicity of the root
getmult = True
for i, reroot, imroot in collectterms:
if getmult:
mult = i + 1
getmult = False
if i == 0:
getmult = True
if imroot:
# Alternate between sin and cos
if (i, reroot) in usedsin:
check = x**i*exp(reroot*x)*cos(imroot*x)
else:
check = x**i*exp(reroot*x)*sin(abs(imroot)*x)
usedsin.add((i, reroot))
else:
check = x**i*exp(reroot*x)
if check in trialset:
# If an element of the trial function is already part of the
# homogeneous solution, we need to multiply by sufficient x to
# make it linearly independent. We also don't need to bother
# checking for the coefficients on those elements, since we
# already know it will be 0.
while True:
if check*x**mult in trialset:
mult += 1
else:
break
trialset.add(check*x**mult)
notneedset.add(check)
newtrialset = trialset - notneedset
trialfunc = 0
for i in newtrialset:
c = next(coeffs)
coefflist.append(c)
trialfunc += c*i
eqs = sub_func_doit(eq, f(x), trialfunc)
coeffsdict = dict(list(zip(trialset, [0]*(len(trialset) + 1))))
eqs = _mexpand(eqs)
for i in Add.make_args(eqs):
s = separatevars(i, dict=True, symbols=[x])
coeffsdict[s[x]] += s['coeff']
coeffvals = solve(list(coeffsdict.values()), coefflist)
if not coeffvals:
raise NotImplementedError(
"Could not solve `%s` using the "
"method of undetermined coefficients "
"(unable to solve for coefficients)." % eq)
psol = trialfunc.subs(coeffvals)
return Eq(f(x), gsol.rhs + psol)
def _undetermined_coefficients_match(expr, x):
r"""
Returns a trial function match if undetermined coefficients can be applied
to ``expr``, and ``None`` otherwise.
A trial expression can be found for an expression for use with the method
of undetermined coefficients if the expression is an
additive/multiplicative combination of constants, polynomials in `x` (the
independent variable of expr), `\sin(a x + b)`, `\cos(a x + b)`, and
`e^{a x}` terms (in other words, it has a finite number of linearly
independent derivatives).
Note that you may still need to multiply each term returned here by
sufficient `x` to make it linearly independent with the solutions to the
homogeneous equation.
This is intended for internal use by ``undetermined_coefficients`` hints.
SymPy currently has no way to convert `\sin^n(x) \cos^m(y)` into a sum of
only `\sin(a x)` and `\cos(b x)` terms, so these are not implemented. So,
for example, you will need to manually convert `\sin^2(x)` into `[1 +
\cos(2 x)]/2` to properly apply the method of undetermined coefficients on
it.
Examples
========
>>> from sympy import log, exp
>>> from sympy.solvers.ode import _undetermined_coefficients_match
>>> from sympy.abc import x
>>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x)
{'test': True, 'trialset': {x*exp(x), exp(-x), exp(x)}}
>>> _undetermined_coefficients_match(log(x), x)
{'test': False}
"""
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1)
retdict = {}
def _test_term(expr, x):
r"""
Test if ``expr`` fits the proper form for undetermined coefficients.
"""
if not expr.has(x):
return True
elif expr.is_Add:
return all(_test_term(i, x) for i in expr.args)
elif expr.is_Mul:
if expr.has(sin, cos):
foundtrig = False
# Make sure that there is only one trig function in the args.
# See the docstring.
for i in expr.args:
if i.has(sin, cos):
if foundtrig:
return False
else:
foundtrig = True
return all(_test_term(i, x) for i in expr.args)
elif expr.is_Function:
if expr.func in (sin, cos, exp):
if expr.args[0].match(a*x + b):
return True
else:
return False
else:
return False
elif expr.is_Pow and expr.base.is_Symbol and expr.exp.is_Integer and \
expr.exp >= 0:
return True
elif expr.is_Pow and expr.base.is_number:
if expr.exp.match(a*x + b):
return True
else:
return False
elif expr.is_Symbol or expr.is_number:
return True
else:
return False
def _get_trial_set(expr, x, exprs=set([])):
r"""
Returns a set of trial terms for undetermined coefficients.
The idea behind undetermined coefficients is that the terms expression
repeat themselves after a finite number of derivatives, except for the
coefficients (they are linearly dependent). So if we collect these,
we should have the terms of our trial function.
"""
def _remove_coefficient(expr, x):
r"""
Returns the expression without a coefficient.
Similar to expr.as_independent(x)[1], except it only works
multiplicatively.
"""
term = S.One
if expr.is_Mul:
for i in expr.args:
if i.has(x):
term *= i
elif expr.has(x):
term = expr
return term
expr = expand_mul(expr)
if expr.is_Add:
for term in expr.args:
if _remove_coefficient(term, x) in exprs:
pass
else:
exprs.add(_remove_coefficient(term, x))
exprs = exprs.union(_get_trial_set(term, x, exprs))
else:
term = _remove_coefficient(expr, x)
tmpset = exprs.union({term})
oldset = set([])
while tmpset != oldset:
# If you get stuck in this loop, then _test_term is probably
# broken
oldset = tmpset.copy()
expr = expr.diff(x)
term = _remove_coefficient(expr, x)
if term.is_Add:
tmpset = tmpset.union(_get_trial_set(term, x, tmpset))
else:
tmpset.add(term)
exprs = tmpset
return exprs
retdict['test'] = _test_term(expr, x)
if retdict['test']:
# Try to generate a list of trial solutions that will have the
# undetermined coefficients. Note that if any of these are not linearly
# independent with any of the solutions to the homogeneous equation,
# then they will need to be multiplied by sufficient x to make them so.
# This function DOES NOT do that (it doesn't even look at the
# homogeneous equation).
retdict['trialset'] = _get_trial_set(expr, x)
return retdict
def ode_nth_linear_constant_coeff_variation_of_parameters(eq, func, order, match):
r"""
Solves an `n`\th order linear differential equation with constant
coefficients using the method of variation of parameters.
This method works on any differential equations of the form
.. math:: f^{(n)}(x) + a_{n-1} f^{(n-1)}(x) + \cdots + a_1 f'(x) + a_0
f(x) = P(x)\text{.}
This method works by assuming that the particular solution takes the form
.. math:: \sum_{x=1}^{n} c_i(x) y_i(x)\text{,}
where `y_i` is the `i`\th solution to the homogeneous equation. The
solution is then solved using Wronskian's and Cramer's Rule. The
particular solution is given by
.. math:: \sum_{x=1}^n \left( \int \frac{W_i(x)}{W(x)} \,dx
\right) y_i(x) \text{,}
where `W(x)` is the Wronskian of the fundamental system (the system of `n`
linearly independent solutions to the homogeneous equation), and `W_i(x)`
is the Wronskian of the fundamental system with the `i`\th column replaced
with `[0, 0, \cdots, 0, P(x)]`.
This method is general enough to solve any `n`\th order inhomogeneous
linear differential equation with constant coefficients, but sometimes
SymPy cannot simplify the Wronskian well enough to integrate it. If this
method hangs, try using the
``nth_linear_constant_coeff_variation_of_parameters_Integral`` hint and
simplifying the integrals manually. Also, prefer using
``nth_linear_constant_coeff_undetermined_coefficients`` when it
applies, because it doesn't use integration, making it faster and more
reliable.
Warning, using simplify=False with
'nth_linear_constant_coeff_variation_of_parameters' in
:py:meth:`~sympy.solvers.ode.dsolve` may cause it to hang, because it will
not attempt to simplify the Wronskian before integrating. It is
recommended that you only use simplify=False with
'nth_linear_constant_coeff_variation_of_parameters_Integral' for this
method, especially if the solution to the homogeneous equation has
trigonometric functions in it.
Examples
========
>>> from sympy import Function, dsolve, pprint, exp, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 3) - 3*f(x).diff(x, 2) +
... 3*f(x).diff(x) - f(x) - exp(x)*log(x), f(x),
... hint='nth_linear_constant_coeff_variation_of_parameters'))
/ 3 \
| 2 x *(6*log(x) - 11)| x
f(x) = |C1 + C2*x + C3*x + ------------------|*e
\ 36 /
References
==========
- https://en.wikipedia.org/wiki/Variation_of_parameters
- http://planetmath.org/VariationOfParameters
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 233
# indirect doctest
"""
gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='both')
match.update(gensol)
return _solve_variation_of_parameters(eq, func, order, match)
def _solve_variation_of_parameters(eq, func, order, match):
r"""
Helper function for the method of variation of parameters and nonhomogeneous euler eq.
See the
:py:meth:`~sympy.solvers.ode.ode_nth_linear_constant_coeff_variation_of_parameters`
docstring for more information on this method.
The parameter ``match`` should be a dictionary that has the following
keys:
``list``
A list of solutions to the homogeneous equation, such as the list
returned by
``ode_nth_linear_constant_coeff_homogeneous(returns='list')``.
``sol``
The general solution, such as the solution returned by
``ode_nth_linear_constant_coeff_homogeneous(returns='sol')``.
"""
x = func.args[0]
f = func.func
r = match
psol = 0
gensols = r['list']
gsol = r['sol']
wr = wronskian(gensols, x)
if r.get('simplify', True):
wr = simplify(wr) # We need much better simplification for
# some ODEs. See issue 4662, for example.
# To reduce commonly occurring sin(x)**2 + cos(x)**2 to 1
wr = trigsimp(wr, deep=True, recursive=True)
if not wr:
# The wronskian will be 0 iff the solutions are not linearly
# independent.
raise NotImplementedError("Cannot find " + str(order) +
" solutions to the homogeneous equation necessary to apply " +
"variation of parameters to " + str(eq) + " (Wronskian == 0)")
if len(gensols) != order:
raise NotImplementedError("Cannot find " + str(order) +
" solutions to the homogeneous equation necessary to apply " +
"variation of parameters to " +
str(eq) + " (number of terms != order)")
negoneterm = (-1)**(order)
for i in gensols:
psol += negoneterm*Integral(wronskian([sol for sol in gensols if sol != i], x)*r[-1]/wr, x)*i/r[order]
negoneterm *= -1
if r.get('simplify', True):
psol = simplify(psol)
psol = trigsimp(psol, deep=True)
return Eq(f(x), gsol.rhs + psol)
def ode_separable(eq, func, order, match):
r"""
Solves separable 1st order differential equations.
This is any differential equation that can be written as `P(y)
\tfrac{dy}{dx} = Q(x)`. The solution can then just be found by
rearranging terms and integrating: `\int P(y) \,dy = \int Q(x) \,dx`.
This hint uses :py:meth:`sympy.simplify.simplify.separatevars` as its back
end, so if a separable equation is not caught by this solver, it is most
likely the fault of that function.
:py:meth:`~sympy.simplify.simplify.separatevars` is
smart enough to do most expansion and factoring necessary to convert a
separable equation `F(x, y)` into the proper form `P(x)\cdot{}Q(y)`. The
general solution is::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x
>>> a, b, c, d, f = map(Function, ['a', 'b', 'c', 'd', 'f'])
>>> genform = Eq(a(x)*b(f(x))*f(x).diff(x), c(x)*d(f(x)))
>>> pprint(genform)
d
a(x)*b(f(x))*--(f(x)) = c(x)*d(f(x))
dx
>>> pprint(dsolve(genform, f(x), hint='separable_Integral'))
f(x)
/ /
| |
| b(y) | c(x)
| ---- dy = C1 + | ---- dx
| d(y) | a(x)
| |
/ /
Examples
========
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(Eq(f(x)*f(x).diff(x) + x, 3*x*f(x)**2), f(x),
... hint='separable', simplify=False))
/ 2 \ 2
log\3*f (x) - 1/ x
---------------- = C1 + --
6 2
References
==========
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 52
# indirect doctest
"""
x = func.args[0]
f = func.func
C1 = get_numbered_constants(eq, num=1)
r = match # {'m1':m1, 'm2':m2, 'y':y}
u = r.get('hint', f(x)) # get u from separable_reduced else get f(x)
return Eq(Integral(r['m2']['coeff']*r['m2'][r['y']]/r['m1'][r['y']],
(r['y'], None, u)), Integral(-r['m1']['coeff']*r['m1'][x]/
r['m2'][x], x) + C1)
def checkinfsol(eq, infinitesimals, func=None, order=None):
r"""
This function is used to check if the given infinitesimals are the
actual infinitesimals of the given first order differential equation.
This method is specific to the Lie Group Solver of ODEs.
As of now, it simply checks, by substituting the infinitesimals in the
partial differential equation.
.. math:: \frac{\partial \eta}{\partial x} + \left(\frac{\partial \eta}{\partial y}
- \frac{\partial \xi}{\partial x}\right)*h
- \frac{\partial \xi}{\partial y}*h^{2}
- \xi\frac{\partial h}{\partial x} - \eta\frac{\partial h}{\partial y} = 0
where `\eta`, and `\xi` are the infinitesimals and `h(x,y) = \frac{dy}{dx}`
The infinitesimals should be given in the form of a list of dicts
``[{xi(x, y): inf, eta(x, y): inf}]``, corresponding to the
output of the function infinitesimals. It returns a list
of values of the form ``[(True/False, sol)]`` where ``sol`` is the value
obtained after substituting the infinitesimals in the PDE. If it
is ``True``, then ``sol`` would be 0.
"""
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
if not func:
eq, func = _preprocess(eq)
variables = func.args
if len(variables) != 1:
raise ValueError("ODE's have only one independent variable")
else:
x = variables[0]
if not order:
order = ode_order(eq, func)
if order != 1:
raise NotImplementedError("Lie groups solver has been implemented "
"only for first order differential equations")
else:
df = func.diff(x)
a = Wild('a', exclude = [df])
b = Wild('b', exclude = [df])
match = collect(expand(eq), df).match(a*df + b)
if match:
h = -simplify(match[b]/match[a])
else:
try:
sol = solve(eq, df)
except NotImplementedError:
raise NotImplementedError("Infinitesimals for the "
"first order ODE could not be found")
else:
h = sol[0] # Find infinitesimals for one solution
y = Dummy('y')
h = h.subs(func, y)
xi = Function('xi')(x, y)
eta = Function('eta')(x, y)
dxi = Function('xi')(x, func)
deta = Function('eta')(x, func)
pde = (eta.diff(x) + (eta.diff(y) - xi.diff(x))*h -
(xi.diff(y))*h**2 - xi*(h.diff(x)) - eta*(h.diff(y)))
soltup = []
for sol in infinitesimals:
tsol = {xi: S(sol[dxi]).subs(func, y),
eta: S(sol[deta]).subs(func, y)}
sol = simplify(pde.subs(tsol).doit())
if sol:
soltup.append((False, sol.subs(y, func)))
else:
soltup.append((True, 0))
return soltup
def ode_lie_group(eq, func, order, match):
r"""
This hint implements the Lie group method of solving first order differential
equations. The aim is to convert the given differential equation from the
given coordinate given system into another coordinate system where it becomes
invariant under the one-parameter Lie group of translations. The converted ODE is
quadrature and can be solved easily. It makes use of the
:py:meth:`sympy.solvers.ode.infinitesimals` function which returns the
infinitesimals of the transformation.
The coordinates `r` and `s` can be found by solving the following Partial
Differential Equations.
.. math :: \xi\frac{\partial r}{\partial x} + \eta\frac{\partial r}{\partial y}
= 0
.. math :: \xi\frac{\partial s}{\partial x} + \eta\frac{\partial s}{\partial y}
= 1
The differential equation becomes separable in the new coordinate system
.. math :: \frac{ds}{dr} = \frac{\frac{\partial s}{\partial x} +
h(x, y)\frac{\partial s}{\partial y}}{
\frac{\partial r}{\partial x} + h(x, y)\frac{\partial r}{\partial y}}
After finding the solution by integration, it is then converted back to the original
coordinate system by substituting `r` and `s` in terms of `x` and `y` again.
Examples
========
>>> from sympy import Function, dsolve, Eq, exp, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x) + 2*x*f(x) - x*exp(-x**2), f(x),
... hint='lie_group'))
/ 2\ 2
| x | -x
f(x) = |C1 + --|*e
\ 2 /
References
==========
- Solving differential equations by Symmetry Groups,
John Starrett, pp. 1 - pp. 14
"""
heuristics = lie_heuristics
inf = {}
f = func.func
x = func.args[0]
df = func.diff(x)
xi = Function("xi")
eta = Function("eta")
xis = match.pop('xi')
etas = match.pop('eta')
if match:
h = -simplify(match[match['d']]/match[match['e']])
y = match['y']
else:
try:
sol = solve(eq, df)
if sol == []:
raise NotImplementedError
except NotImplementedError:
raise NotImplementedError("Unable to solve the differential equation " +
str(eq) + " by the lie group method")
else:
y = Dummy("y")
h = sol[0].subs(func, y)
if xis is not None and etas is not None:
inf = [{xi(x, f(x)): S(xis), eta(x, f(x)): S(etas)}]
if not checkinfsol(eq, inf, func=f(x), order=1)[0][0]:
raise ValueError("The given infinitesimals xi and eta"
" are not the infinitesimals to the given equation")
else:
heuristics = ["user_defined"]
match = {'h': h, 'y': y}
# This is done so that if:
# a] solve raises a NotImplementedError.
# b] any heuristic raises a ValueError
# another heuristic can be used.
tempsol = [] # Used by solve below
for heuristic in heuristics:
try:
if not inf:
inf = infinitesimals(eq, hint=heuristic, func=func, order=1, match=match)
except ValueError:
continue
else:
for infsim in inf:
xiinf = (infsim[xi(x, func)]).subs(func, y)
etainf = (infsim[eta(x, func)]).subs(func, y)
# This condition creates recursion while using pdsolve.
# Since the first step while solving a PDE of form
# a*(f(x, y).diff(x)) + b*(f(x, y).diff(y)) + c = 0
# is to solve the ODE dy/dx = b/a
if simplify(etainf/xiinf) == h:
continue
rpde = f(x, y).diff(x)*xiinf + f(x, y).diff(y)*etainf
r = pdsolve(rpde, func=f(x, y)).rhs
s = pdsolve(rpde - 1, func=f(x, y)).rhs
newcoord = [_lie_group_remove(coord) for coord in [r, s]]
r = Dummy("r")
s = Dummy("s")
C1 = Symbol("C1")
rcoord = newcoord[0]
scoord = newcoord[-1]
try:
sol = solve([r - rcoord, s - scoord], x, y, dict=True)
except NotImplementedError:
continue
else:
sol = sol[0]
xsub = sol[x]
ysub = sol[y]
num = simplify(scoord.diff(x) + scoord.diff(y)*h)
denom = simplify(rcoord.diff(x) + rcoord.diff(y)*h)
if num and denom:
diffeq = simplify((num/denom).subs([(x, xsub), (y, ysub)]))
sep = separatevars(diffeq, symbols=[r, s], dict=True)
if sep:
# Trying to separate, r and s coordinates
deq = integrate((1/sep[s]), s) + C1 - integrate(sep['coeff']*sep[r], r)
# Substituting and reverting back to original coordinates
deq = deq.subs([(r, rcoord), (s, scoord)])
try:
sdeq = solve(deq, y)
except NotImplementedError:
tempsol.append(deq)
else:
if len(sdeq) == 1:
return Eq(f(x), sdeq.pop())
else:
return [Eq(f(x), sol) for sol in sdeq]
elif denom: # (ds/dr) is zero which means s is constant
return Eq(f(x), solve(scoord - C1, y)[0])
elif num: # (dr/ds) is zero which means r is constant
return Eq(f(x), solve(rcoord - C1, y)[0])
# If nothing works, return solution as it is, without solving for y
if tempsol:
if len(tempsol) == 1:
return Eq(tempsol.pop().subs(y, f(x)), 0)
else:
return [Eq(sol.subs(y, f(x)), 0) for sol in tempsol]
raise NotImplementedError("The given ODE " + str(eq) + " cannot be solved by"
+ " the lie group method")
def _lie_group_remove(coords):
r"""
This function is strictly meant for internal use by the Lie group ODE solving
method. It replaces arbitrary functions returned by pdsolve with either 0 or 1 or the
args of the arbitrary function.
The algorithm used is:
1] If coords is an instance of an Undefined Function, then the args are returned
2] If the arbitrary function is present in an Add object, it is replaced by zero.
3] If the arbitrary function is present in an Mul object, it is replaced by one.
4] If coords has no Undefined Function, it is returned as it is.
Examples
========
>>> from sympy.solvers.ode import _lie_group_remove
>>> from sympy import Function
>>> from sympy.abc import x, y
>>> F = Function("F")
>>> eq = x**2*y
>>> _lie_group_remove(eq)
x**2*y
>>> eq = F(x**2*y)
>>> _lie_group_remove(eq)
x**2*y
>>> eq = y**2*x + F(x**3)
>>> _lie_group_remove(eq)
x*y**2
>>> eq = (F(x**3) + y)*x**4
>>> _lie_group_remove(eq)
x**4*y
"""
if isinstance(coords, AppliedUndef):
return coords.args[0]
elif coords.is_Add:
subfunc = coords.atoms(AppliedUndef)
if subfunc:
for func in subfunc:
coords = coords.subs(func, 0)
return coords
elif coords.is_Pow:
base, expr = coords.as_base_exp()
base = _lie_group_remove(base)
expr = _lie_group_remove(expr)
return base**expr
elif coords.is_Mul:
mulargs = []
coordargs = coords.args
for arg in coordargs:
if not isinstance(coords, AppliedUndef):
mulargs.append(_lie_group_remove(arg))
return Mul(*mulargs)
return coords
def infinitesimals(eq, func=None, order=None, hint='default', match=None):
r"""
The infinitesimal functions of an ordinary differential equation, `\xi(x,y)`
and `\eta(x,y)`, are the infinitesimals of the Lie group of point transformations
for which the differential equation is invariant. So, the ODE `y'=f(x,y)`
would admit a Lie group `x^*=X(x,y;\varepsilon)=x+\varepsilon\xi(x,y)`,
`y^*=Y(x,y;\varepsilon)=y+\varepsilon\eta(x,y)` such that `(y^*)'=f(x^*, y^*)`.
A change of coordinates, to `r(x,y)` and `s(x,y)`, can be performed so this Lie group
becomes the translation group, `r^*=r` and `s^*=s+\varepsilon`.
They are tangents to the coordinate curves of the new system.
Consider the transformation `(x, y) \to (X, Y)` such that the
differential equation remains invariant. `\xi` and `\eta` are the tangents to
the transformed coordinates `X` and `Y`, at `\varepsilon=0`.
.. math:: \left(\frac{\partial X(x,y;\varepsilon)}{\partial\varepsilon
}\right)|_{\varepsilon=0} = \xi,
\left(\frac{\partial Y(x,y;\varepsilon)}{\partial\varepsilon
}\right)|_{\varepsilon=0} = \eta,
The infinitesimals can be found by solving the following PDE:
>>> from sympy import Function, diff, Eq, pprint
>>> from sympy.abc import x, y
>>> xi, eta, h = map(Function, ['xi', 'eta', 'h'])
>>> h = h(x, y) # dy/dx = h
>>> eta = eta(x, y)
>>> xi = xi(x, y)
>>> genform = Eq(eta.diff(x) + (eta.diff(y) - xi.diff(x))*h
... - (xi.diff(y))*h**2 - xi*(h.diff(x)) - eta*(h.diff(y)), 0)
>>> pprint(genform)
/d d \ d 2 d
|--(eta(x, y)) - --(xi(x, y))|*h(x, y) - eta(x, y)*--(h(x, y)) - h (x, y)*--(x
\dy dx / dy dy
<BLANKLINE>
d d
i(x, y)) - xi(x, y)*--(h(x, y)) + --(eta(x, y)) = 0
dx dx
Solving the above mentioned PDE is not trivial, and can be solved only by
making intelligent assumptions for `\xi` and `\eta` (heuristics). Once an
infinitesimal is found, the attempt to find more heuristics stops. This is done to
optimise the speed of solving the differential equation. If a list of all the
infinitesimals is needed, ``hint`` should be flagged as ``all``, which gives
the complete list of infinitesimals. If the infinitesimals for a particular
heuristic needs to be found, it can be passed as a flag to ``hint``.
Examples
========
>>> from sympy import Function, diff
>>> from sympy.solvers.ode import infinitesimals
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = f(x).diff(x) - x**2*f(x)
>>> infinitesimals(eq)
[{eta(x, f(x)): exp(x**3/3), xi(x, f(x)): 0}]
References
==========
- Solving differential equations by Symmetry Groups,
John Starrett, pp. 1 - pp. 14
"""
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
if not func:
eq, func = _preprocess(eq)
variables = func.args
if len(variables) != 1:
raise ValueError("ODE's have only one independent variable")
else:
x = variables[0]
if not order:
order = ode_order(eq, func)
if order != 1:
raise NotImplementedError("Infinitesimals for only "
"first order ODE's have been implemented")
else:
df = func.diff(x)
# Matching differential equation of the form a*df + b
a = Wild('a', exclude = [df])
b = Wild('b', exclude = [df])
if match: # Used by lie_group hint
h = match['h']
y = match['y']
else:
match = collect(expand(eq), df).match(a*df + b)
if match:
h = -simplify(match[b]/match[a])
else:
try:
sol = solve(eq, df)
except NotImplementedError:
raise NotImplementedError("Infinitesimals for the "
"first order ODE could not be found")
else:
h = sol[0] # Find infinitesimals for one solution
y = Dummy("y")
h = h.subs(func, y)
u = Dummy("u")
hx = h.diff(x)
hy = h.diff(y)
hinv = ((1/h).subs([(x, u), (y, x)])).subs(u, y) # Inverse ODE
match = {'h': h, 'func': func, 'hx': hx, 'hy': hy, 'y': y, 'hinv': hinv}
if hint == 'all':
xieta = []
for heuristic in lie_heuristics:
function = globals()['lie_heuristic_' + heuristic]
inflist = function(match, comp=True)
if inflist:
xieta.extend([inf for inf in inflist if inf not in xieta])
if xieta:
return xieta
else:
raise NotImplementedError("Infinitesimals could not be found for "
"the given ODE")
elif hint == 'default':
for heuristic in lie_heuristics:
function = globals()['lie_heuristic_' + heuristic]
xieta = function(match, comp=False)
if xieta:
return xieta
raise NotImplementedError("Infinitesimals could not be found for"
" the given ODE")
elif hint not in lie_heuristics:
raise ValueError("Heuristic not recognized: " + hint)
else:
function = globals()['lie_heuristic_' + hint]
xieta = function(match, comp=True)
if xieta:
return xieta
else:
raise ValueError("Infinitesimals could not be found using the"
" given heuristic")
def lie_heuristic_abaco1_simple(match, comp=False):
r"""
The first heuristic uses the following four sets of
assumptions on `\xi` and `\eta`
.. math:: \xi = 0, \eta = f(x)
.. math:: \xi = 0, \eta = f(y)
.. math:: \xi = f(x), \eta = 0
.. math:: \xi = f(y), \eta = 0
The success of this heuristic is determined by algebraic factorisation.
For the first assumption `\xi = 0` and `\eta` to be a function of `x`, the PDE
.. math:: \frac{\partial \eta}{\partial x} + (\frac{\partial \eta}{\partial y}
- \frac{\partial \xi}{\partial x})*h
- \frac{\partial \xi}{\partial y}*h^{2}
- \xi*\frac{\partial h}{\partial x} - \eta*\frac{\partial h}{\partial y} = 0
reduces to `f'(x) - f\frac{\partial h}{\partial y} = 0`
If `\frac{\partial h}{\partial y}` is a function of `x`, then this can usually
be integrated easily. A similar idea is applied to the other 3 assumptions as well.
References
==========
- E.S Cheb-Terrab, L.G.S Duarte and L.A,C.P da Mota, Computer Algebra
Solving of First Order ODEs Using Symmetry Methods, pp. 8
"""
xieta = []
y = match['y']
h = match['h']
func = match['func']
x = func.args[0]
hx = match['hx']
hy = match['hy']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
hysym = hy.free_symbols
if y not in hysym:
try:
fx = exp(integrate(hy, x))
except NotImplementedError:
pass
else:
inf = {xi: S(0), eta: fx}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
factor = hy/h
facsym = factor.free_symbols
if x not in facsym:
try:
fy = exp(integrate(factor, y))
except NotImplementedError:
pass
else:
inf = {xi: S(0), eta: fy.subs(y, func)}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
factor = -hx/h
facsym = factor.free_symbols
if y not in facsym:
try:
fx = exp(integrate(factor, x))
except NotImplementedError:
pass
else:
inf = {xi: fx, eta: S(0)}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
factor = -hx/(h**2)
facsym = factor.free_symbols
if x not in facsym:
try:
fy = exp(integrate(factor, y))
except NotImplementedError:
pass
else:
inf = {xi: fy.subs(y, func), eta: S(0)}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
if xieta:
return xieta
def lie_heuristic_abaco1_product(match, comp=False):
r"""
The second heuristic uses the following two assumptions on `\xi` and `\eta`
.. math:: \eta = 0, \xi = f(x)*g(y)
.. math:: \eta = f(x)*g(y), \xi = 0
The first assumption of this heuristic holds good if
`\frac{1}{h^{2}}\frac{\partial^2}{\partial x \partial y}\log(h)` is
separable in `x` and `y`, then the separated factors containing `x`
is `f(x)`, and `g(y)` is obtained by
.. math:: e^{\int f\frac{\partial}{\partial x}\left(\frac{1}{f*h}\right)\,dy}
provided `f\frac{\partial}{\partial x}\left(\frac{1}{f*h}\right)` is a function
of `y` only.
The second assumption holds good if `\frac{dy}{dx} = h(x, y)` is rewritten as
`\frac{dy}{dx} = \frac{1}{h(y, x)}` and the same properties of the first assumption
satisfies. After obtaining `f(x)` and `g(y)`, the coordinates are again
interchanged, to get `\eta` as `f(x)*g(y)`
References
==========
- E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order
ODE Patterns, pp. 7 - pp. 8
"""
xieta = []
y = match['y']
h = match['h']
hinv = match['hinv']
func = match['func']
x = func.args[0]
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
inf = separatevars(((log(h).diff(y)).diff(x))/h**2, dict=True, symbols=[x, y])
if inf and inf['coeff']:
fx = inf[x]
gy = simplify(fx*((1/(fx*h)).diff(x)))
gysyms = gy.free_symbols
if x not in gysyms:
gy = exp(integrate(gy, y))
inf = {eta: S(0), xi: (fx*gy).subs(y, func)}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
u1 = Dummy("u1")
inf = separatevars(((log(hinv).diff(y)).diff(x))/hinv**2, dict=True, symbols=[x, y])
if inf and inf['coeff']:
fx = inf[x]
gy = simplify(fx*((1/(fx*hinv)).diff(x)))
gysyms = gy.free_symbols
if x not in gysyms:
gy = exp(integrate(gy, y))
etaval = fx*gy
etaval = (etaval.subs([(x, u1), (y, x)])).subs(u1, y)
inf = {eta: etaval.subs(y, func), xi: S(0)}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
if xieta:
return xieta
def lie_heuristic_bivariate(match, comp=False):
r"""
The third heuristic assumes the infinitesimals `\xi` and `\eta`
to be bi-variate polynomials in `x` and `y`. The assumption made here
for the logic below is that `h` is a rational function in `x` and `y`
though that may not be necessary for the infinitesimals to be
bivariate polynomials. The coefficients of the infinitesimals
are found out by substituting them in the PDE and grouping similar terms
that are polynomials and since they form a linear system, solve and check
for non trivial solutions. The degree of the assumed bivariates
are increased till a certain maximum value.
References
==========
- Lie Groups and Differential Equations
pp. 327 - pp. 329
"""
h = match['h']
hx = match['hx']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
if h.is_rational_function():
# The maximum degree that the infinitesimals can take is
# calculated by this technique.
etax, etay, etad, xix, xiy, xid = symbols("etax etay etad xix xiy xid")
ipde = etax + (etay - xix)*h - xiy*h**2 - xid*hx - etad*hy
num, denom = cancel(ipde).as_numer_denom()
deg = Poly(num, x, y).total_degree()
deta = Function('deta')(x, y)
dxi = Function('dxi')(x, y)
ipde = (deta.diff(x) + (deta.diff(y) - dxi.diff(x))*h - (dxi.diff(y))*h**2
- dxi*hx - deta*hy)
xieq = Symbol("xi0")
etaeq = Symbol("eta0")
for i in range(deg + 1):
if i:
xieq += Add(*[
Symbol("xi_" + str(power) + "_" + str(i - power))*x**power*y**(i - power)
for power in range(i + 1)])
etaeq += Add(*[
Symbol("eta_" + str(power) + "_" + str(i - power))*x**power*y**(i - power)
for power in range(i + 1)])
pden, denom = (ipde.subs({dxi: xieq, deta: etaeq}).doit()).as_numer_denom()
pden = expand(pden)
# If the individual terms are monomials, the coefficients
# are grouped
if pden.is_polynomial(x, y) and pden.is_Add:
polyy = Poly(pden, x, y).as_dict()
if polyy:
symset = xieq.free_symbols.union(etaeq.free_symbols) - {x, y}
soldict = solve(polyy.values(), *symset)
if isinstance(soldict, list):
soldict = soldict[0]
if any(soldict.values()):
xired = xieq.subs(soldict)
etared = etaeq.subs(soldict)
# Scaling is done by substituting one for the parameters
# This can be any number except zero.
dict_ = dict((sym, 1) for sym in symset)
inf = {eta: etared.subs(dict_).subs(y, func),
xi: xired.subs(dict_).subs(y, func)}
return [inf]
def lie_heuristic_chi(match, comp=False):
r"""
The aim of the fourth heuristic is to find the function `\chi(x, y)`
that satisfies the PDE `\frac{d\chi}{dx} + h\frac{d\chi}{dx}
- \frac{\partial h}{\partial y}\chi = 0`.
This assumes `\chi` to be a bivariate polynomial in `x` and `y`. By intuition,
`h` should be a rational function in `x` and `y`. The method used here is
to substitute a general binomial for `\chi` up to a certain maximum degree
is reached. The coefficients of the polynomials, are calculated by by collecting
terms of the same order in `x` and `y`.
After finding `\chi`, the next step is to use `\eta = \xi*h + \chi`, to
determine `\xi` and `\eta`. This can be done by dividing `\chi` by `h`
which would give `-\xi` as the quotient and `\eta` as the remainder.
References
==========
- E.S Cheb-Terrab, L.G.S Duarte and L.A,C.P da Mota, Computer Algebra
Solving of First Order ODEs Using Symmetry Methods, pp. 8
"""
h = match['h']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
if h.is_rational_function():
schi, schix, schiy = symbols("schi, schix, schiy")
cpde = schix + h*schiy - hy*schi
num, denom = cancel(cpde).as_numer_denom()
deg = Poly(num, x, y).total_degree()
chi = Function('chi')(x, y)
chix = chi.diff(x)
chiy = chi.diff(y)
cpde = chix + h*chiy - hy*chi
chieq = Symbol("chi")
for i in range(1, deg + 1):
chieq += Add(*[
Symbol("chi_" + str(power) + "_" + str(i - power))*x**power*y**(i - power)
for power in range(i + 1)])
cnum, cden = cancel(cpde.subs({chi : chieq}).doit()).as_numer_denom()
cnum = expand(cnum)
if cnum.is_polynomial(x, y) and cnum.is_Add:
cpoly = Poly(cnum, x, y).as_dict()
if cpoly:
solsyms = chieq.free_symbols - {x, y}
soldict = solve(cpoly.values(), *solsyms)
if isinstance(soldict, list):
soldict = soldict[0]
if any(soldict.values()):
chieq = chieq.subs(soldict)
dict_ = dict((sym, 1) for sym in solsyms)
chieq = chieq.subs(dict_)
# After finding chi, the main aim is to find out
# eta, xi by the equation eta = xi*h + chi
# One method to set xi, would be rearranging it to
# (eta/h) - xi = (chi/h). This would mean dividing
# chi by h would give -xi as the quotient and eta
# as the remainder. Thanks to Sean Vig for suggesting
# this method.
xic, etac = div(chieq, h)
inf = {eta: etac.subs(y, func), xi: -xic.subs(y, func)}
return [inf]
def lie_heuristic_function_sum(match, comp=False):
r"""
This heuristic uses the following two assumptions on `\xi` and `\eta`
.. math:: \eta = 0, \xi = f(x) + g(y)
.. math:: \eta = f(x) + g(y), \xi = 0
The first assumption of this heuristic holds good if
.. math:: \frac{\partial}{\partial y}[(h\frac{\partial^{2}}{
\partial x^{2}}(h^{-1}))^{-1}]
is separable in `x` and `y`,
1. The separated factors containing `y` is `\frac{\partial g}{\partial y}`.
From this `g(y)` can be determined.
2. The separated factors containing `x` is `f''(x)`.
3. `h\frac{\partial^{2}}{\partial x^{2}}(h^{-1})` equals
`\frac{f''(x)}{f(x) + g(y)}`. From this `f(x)` can be determined.
The second assumption holds good if `\frac{dy}{dx} = h(x, y)` is rewritten as
`\frac{dy}{dx} = \frac{1}{h(y, x)}` and the same properties of the first
assumption satisfies. After obtaining `f(x)` and `g(y)`, the coordinates
are again interchanged, to get `\eta` as `f(x) + g(y)`.
For both assumptions, the constant factors are separated among `g(y)`
and `f''(x)`, such that `f''(x)` obtained from 3] is the same as that
obtained from 2]. If not possible, then this heuristic fails.
References
==========
- E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order
ODE Patterns, pp. 7 - pp. 8
"""
xieta = []
h = match['h']
func = match['func']
hinv = match['hinv']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
for odefac in [h, hinv]:
factor = odefac*((1/odefac).diff(x, 2))
sep = separatevars((1/factor).diff(y), dict=True, symbols=[x, y])
if sep and sep['coeff'] and sep[x].has(x) and sep[y].has(y):
k = Dummy("k")
try:
gy = k*integrate(sep[y], y)
except NotImplementedError:
pass
else:
fdd = 1/(k*sep[x]*sep['coeff'])
fx = simplify(fdd/factor - gy)
check = simplify(fx.diff(x, 2) - fdd)
if fx:
if not check:
fx = fx.subs(k, 1)
gy = (gy/k)
else:
sol = solve(check, k)
if sol:
sol = sol[0]
fx = fx.subs(k, sol)
gy = (gy/k)*sol
else:
continue
if odefac == hinv: # Inverse ODE
fx = fx.subs(x, y)
gy = gy.subs(y, x)
etaval = factor_terms(fx + gy)
if etaval.is_Mul:
etaval = Mul(*[arg for arg in etaval.args if arg.has(x, y)])
if odefac == hinv: # Inverse ODE
inf = {eta: etaval.subs(y, func), xi : S(0)}
else:
inf = {xi: etaval.subs(y, func), eta : S(0)}
if not comp:
return [inf]
else:
xieta.append(inf)
if xieta:
return xieta
def lie_heuristic_abaco2_similar(match, comp=False):
r"""
This heuristic uses the following two assumptions on `\xi` and `\eta`
.. math:: \eta = g(x), \xi = f(x)
.. math:: \eta = f(y), \xi = g(y)
For the first assumption,
1. First `\frac{\frac{\partial h}{\partial y}}{\frac{\partial^{2} h}{
\partial yy}}` is calculated. Let us say this value is A
2. If this is constant, then `h` is matched to the form `A(x) + B(x)e^{
\frac{y}{C}}` then, `\frac{e^{\int \frac{A(x)}{C} \,dx}}{B(x)}` gives `f(x)`
and `A(x)*f(x)` gives `g(x)`
3. Otherwise `\frac{\frac{\partial A}{\partial X}}{\frac{\partial A}{
\partial Y}} = \gamma` is calculated. If
a] `\gamma` is a function of `x` alone
b] `\frac{\gamma\frac{\partial h}{\partial y} - \gamma'(x) - \frac{
\partial h}{\partial x}}{h + \gamma} = G` is a function of `x` alone.
then, `e^{\int G \,dx}` gives `f(x)` and `-\gamma*f(x)` gives `g(x)`
The second assumption holds good if `\frac{dy}{dx} = h(x, y)` is rewritten as
`\frac{dy}{dx} = \frac{1}{h(y, x)}` and the same properties of the first assumption
satisfies. After obtaining `f(x)` and `g(x)`, the coordinates are again
interchanged, to get `\xi` as `f(x^*)` and `\eta` as `g(y^*)`
References
==========
- E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order
ODE Patterns, pp. 10 - pp. 12
"""
h = match['h']
hx = match['hx']
hy = match['hy']
func = match['func']
hinv = match['hinv']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
factor = cancel(h.diff(y)/h.diff(y, 2))
factorx = factor.diff(x)
factory = factor.diff(y)
if not factor.has(x) and not factor.has(y):
A = Wild('A', exclude=[y])
B = Wild('B', exclude=[y])
C = Wild('C', exclude=[x, y])
match = h.match(A + B*exp(y/C))
try:
tau = exp(-integrate(match[A]/match[C]), x)/match[B]
except NotImplementedError:
pass
else:
gx = match[A]*tau
return [{xi: tau, eta: gx}]
else:
gamma = cancel(factorx/factory)
if not gamma.has(y):
tauint = cancel((gamma*hy - gamma.diff(x) - hx)/(h + gamma))
if not tauint.has(y):
try:
tau = exp(integrate(tauint, x))
except NotImplementedError:
pass
else:
gx = -tau*gamma
return [{xi: tau, eta: gx}]
factor = cancel(hinv.diff(y)/hinv.diff(y, 2))
factorx = factor.diff(x)
factory = factor.diff(y)
if not factor.has(x) and not factor.has(y):
A = Wild('A', exclude=[y])
B = Wild('B', exclude=[y])
C = Wild('C', exclude=[x, y])
match = h.match(A + B*exp(y/C))
try:
tau = exp(-integrate(match[A]/match[C]), x)/match[B]
except NotImplementedError:
pass
else:
gx = match[A]*tau
return [{eta: tau.subs(x, func), xi: gx.subs(x, func)}]
else:
gamma = cancel(factorx/factory)
if not gamma.has(y):
tauint = cancel((gamma*hinv.diff(y) - gamma.diff(x) - hinv.diff(x))/(
hinv + gamma))
if not tauint.has(y):
try:
tau = exp(integrate(tauint, x))
except NotImplementedError:
pass
else:
gx = -tau*gamma
return [{eta: tau.subs(x, func), xi: gx.subs(x, func)}]
def lie_heuristic_abaco2_unique_unknown(match, comp=False):
r"""
This heuristic assumes the presence of unknown functions or known functions
with non-integer powers.
1. A list of all functions and non-integer powers containing x and y
2. Loop over each element `f` in the list, find `\frac{\frac{\partial f}{\partial x}}{
\frac{\partial f}{\partial x}} = R`
If it is separable in `x` and `y`, let `X` be the factors containing `x`. Then
a] Check if `\xi = X` and `\eta = -\frac{X}{R}` satisfy the PDE. If yes, then return
`\xi` and `\eta`
b] Check if `\xi = \frac{-R}{X}` and `\eta = -\frac{1}{X}` satisfy the PDE.
If yes, then return `\xi` and `\eta`
If not, then check if
a] :math:`\xi = -R,\eta = 1`
b] :math:`\xi = 1, \eta = -\frac{1}{R}`
are solutions.
References
==========
- E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order
ODE Patterns, pp. 10 - pp. 12
"""
h = match['h']
hx = match['hx']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
funclist = []
for atom in h.atoms(Pow):
base, exp = atom.as_base_exp()
if base.has(x) and base.has(y):
if not exp.is_Integer:
funclist.append(atom)
for function in h.atoms(AppliedUndef):
syms = function.free_symbols
if x in syms and y in syms:
funclist.append(function)
for f in funclist:
frac = cancel(f.diff(y)/f.diff(x))
sep = separatevars(frac, dict=True, symbols=[x, y])
if sep and sep['coeff']:
xitry1 = sep[x]
etatry1 = -1/(sep[y]*sep['coeff'])
pde1 = etatry1.diff(y)*h - xitry1.diff(x)*h - xitry1*hx - etatry1*hy
if not simplify(pde1):
return [{xi: xitry1, eta: etatry1.subs(y, func)}]
xitry2 = 1/etatry1
etatry2 = 1/xitry1
pde2 = etatry2.diff(x) - (xitry2.diff(y))*h**2 - xitry2*hx - etatry2*hy
if not simplify(expand(pde2)):
return [{xi: xitry2.subs(y, func), eta: etatry2}]
else:
etatry = -1/frac
pde = etatry.diff(x) + etatry.diff(y)*h - hx - etatry*hy
if not simplify(pde):
return [{xi: S(1), eta: etatry.subs(y, func)}]
xitry = -frac
pde = -xitry.diff(x)*h -xitry.diff(y)*h**2 - xitry*hx -hy
if not simplify(expand(pde)):
return [{xi: xitry.subs(y, func), eta: S(1)}]
def lie_heuristic_abaco2_unique_general(match, comp=False):
r"""
This heuristic finds if infinitesimals of the form `\eta = f(x)`, `\xi = g(y)`
without making any assumptions on `h`.
The complete sequence of steps is given in the paper mentioned below.
References
==========
- E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order
ODE Patterns, pp. 10 - pp. 12
"""
hx = match['hx']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
A = hx.diff(y)
B = hy.diff(y) + hy**2
C = hx.diff(x) - hx**2
if not (A and B and C):
return
Ax = A.diff(x)
Ay = A.diff(y)
Axy = Ax.diff(y)
Axx = Ax.diff(x)
Ayy = Ay.diff(y)
D = simplify(2*Axy + hx*Ay - Ax*hy + (hx*hy + 2*A)*A)*A - 3*Ax*Ay
if not D:
E1 = simplify(3*Ax**2 + ((hx**2 + 2*C)*A - 2*Axx)*A)
if E1:
E2 = simplify((2*Ayy + (2*B - hy**2)*A)*A - 3*Ay**2)
if not E2:
E3 = simplify(
E1*((28*Ax + 4*hx*A)*A**3 - E1*(hy*A + Ay)) - E1.diff(x)*8*A**4)
if not E3:
etaval = cancel((4*A**3*(Ax - hx*A) + E1*(hy*A - Ay))/(S(2)*A*E1))
if x not in etaval:
try:
etaval = exp(integrate(etaval, y))
except NotImplementedError:
pass
else:
xival = -4*A**3*etaval/E1
if y not in xival:
return [{xi: xival, eta: etaval.subs(y, func)}]
else:
E1 = simplify((2*Ayy + (2*B - hy**2)*A)*A - 3*Ay**2)
if E1:
E2 = simplify(
4*A**3*D - D**2 + E1*((2*Axx - (hx**2 + 2*C)*A)*A - 3*Ax**2))
if not E2:
E3 = simplify(
-(A*D)*E1.diff(y) + ((E1.diff(x) - hy*D)*A + 3*Ay*D +
(A*hx - 3*Ax)*E1)*E1)
if not E3:
etaval = cancel(((A*hx - Ax)*E1 - (Ay + A*hy)*D)/(S(2)*A*D))
if x not in etaval:
try:
etaval = exp(integrate(etaval, y))
except NotImplementedError:
pass
else:
xival = -E1*etaval/D
if y not in xival:
return [{xi: xival, eta: etaval.subs(y, func)}]
def lie_heuristic_linear(match, comp=False):
r"""
This heuristic assumes
1. `\xi = ax + by + c` and
2. `\eta = fx + gy + h`
After substituting the following assumptions in the determining PDE, it
reduces to
.. math:: f + (g - a)h - bh^{2} - (ax + by + c)\frac{\partial h}{\partial x}
- (fx + gy + c)\frac{\partial h}{\partial y}
Solving the reduced PDE obtained, using the method of characteristics, becomes
impractical. The method followed is grouping similar terms and solving the system
of linear equations obtained. The difference between the bivariate heuristic is that
`h` need not be a rational function in this case.
References
==========
- E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order
ODE Patterns, pp. 10 - pp. 12
"""
h = match['h']
hx = match['hx']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
coeffdict = {}
symbols = numbered_symbols("c", cls=Dummy)
symlist = [next(symbols) for _ in islice(symbols, 6)]
C0, C1, C2, C3, C4, C5 = symlist
pde = C3 + (C4 - C0)*h - (C0*x + C1*y + C2)*hx - (C3*x + C4*y + C5)*hy - C1*h**2
pde, denom = pde.as_numer_denom()
pde = powsimp(expand(pde))
if pde.is_Add:
terms = pde.args
for term in terms:
if term.is_Mul:
rem = Mul(*[m for m in term.args if not m.has(x, y)])
xypart = term/rem
if xypart not in coeffdict:
coeffdict[xypart] = rem
else:
coeffdict[xypart] += rem
else:
if term not in coeffdict:
coeffdict[term] = S(1)
else:
coeffdict[term] += S(1)
sollist = coeffdict.values()
soldict = solve(sollist, symlist)
if soldict:
if isinstance(soldict, list):
soldict = soldict[0]
subval = soldict.values()
if any(t for t in subval):
onedict = dict(zip(symlist, [1]*6))
xival = C0*x + C1*func + C2
etaval = C3*x + C4*func + C5
xival = xival.subs(soldict)
etaval = etaval.subs(soldict)
xival = xival.subs(onedict)
etaval = etaval.subs(onedict)
return [{xi: xival, eta: etaval}]
def sysode_linear_2eq_order1(match_):
x = match_['func'][0].func
y = match_['func'][1].func
func = match_['func']
fc = match_['func_coeff']
eq = match_['eq']
r = dict()
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
for i in range(2):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
# for equations Eq(a1*diff(x(t),t), a*x(t) + b*y(t) + k1)
# and Eq(a2*diff(x(t),t), c*x(t) + d*y(t) + k2)
r['a'] = -fc[0,x(t),0]/fc[0,x(t),1]
r['c'] = -fc[1,x(t),0]/fc[1,y(t),1]
r['b'] = -fc[0,y(t),0]/fc[0,x(t),1]
r['d'] = -fc[1,y(t),0]/fc[1,y(t),1]
forcing = [S(0),S(0)]
for i in range(2):
for j in Add.make_args(eq[i]):
if not j.has(x(t), y(t)):
forcing[i] += j
if not (forcing[0].has(t) or forcing[1].has(t)):
r['k1'] = forcing[0]
r['k2'] = forcing[1]
else:
raise NotImplementedError("Only homogeneous problems are supported" +
" (and constant inhomogeneity)")
if match_['type_of_equation'] == 'type1':
sol = _linear_2eq_order1_type1(x, y, t, r, eq)
if match_['type_of_equation'] == 'type2':
gsol = _linear_2eq_order1_type1(x, y, t, r, eq)
psol = _linear_2eq_order1_type2(x, y, t, r, eq)
sol = [Eq(x(t), gsol[0].rhs+psol[0]), Eq(y(t), gsol[1].rhs+psol[1])]
if match_['type_of_equation'] == 'type3':
sol = _linear_2eq_order1_type3(x, y, t, r, eq)
if match_['type_of_equation'] == 'type4':
sol = _linear_2eq_order1_type4(x, y, t, r, eq)
if match_['type_of_equation'] == 'type5':
sol = _linear_2eq_order1_type5(x, y, t, r, eq)
if match_['type_of_equation'] == 'type6':
sol = _linear_2eq_order1_type6(x, y, t, r, eq)
if match_['type_of_equation'] == 'type7':
sol = _linear_2eq_order1_type7(x, y, t, r, eq)
return sol
def _linear_2eq_order1_type1(x, y, t, r, eq):
r"""
It is classified under system of two linear homogeneous first-order constant-coefficient
ordinary differential equations.
The equations which come under this type are
.. math:: x' = ax + by,
.. math:: y' = cx + dy
The characteristics equation is written as
.. math:: \lambda^{2} + (a+d) \lambda + ad - bc = 0
and its discriminant is `D = (a-d)^{2} + 4bc`. There are several cases
1. Case when `ad - bc \neq 0`. The origin of coordinates, `x = y = 0`,
is the only stationary point; it is
- a node if `D = 0`
- a node if `D > 0` and `ad - bc > 0`
- a saddle if `D > 0` and `ad - bc < 0`
- a focus if `D < 0` and `a + d \neq 0`
- a centre if `D < 0` and `a + d \neq 0`.
1.1. If `D > 0`. The characteristic equation has two distinct real roots
`\lambda_1` and `\lambda_ 2` . The general solution of the system in question is expressed as
.. math:: x = C_1 b e^{\lambda_1 t} + C_2 b e^{\lambda_2 t}
.. math:: y = C_1 (\lambda_1 - a) e^{\lambda_1 t} + C_2 (\lambda_2 - a) e^{\lambda_2 t}
where `C_1` and `C_2` being arbitrary constants
1.2. If `D < 0`. The characteristics equation has two conjugate
roots, `\lambda_1 = \sigma + i \beta` and `\lambda_2 = \sigma - i \beta`.
The general solution of the system is given by
.. math:: x = b e^{\sigma t} (C_1 \sin(\beta t) + C_2 \cos(\beta t))
.. math:: y = e^{\sigma t} ([(\sigma - a) C_1 - \beta C_2] \sin(\beta t) + [\beta C_1 + (\sigma - a) C_2 \cos(\beta t)])
1.3. If `D = 0` and `a \neq d`. The characteristic equation has
two equal roots, `\lambda_1 = \lambda_2`. The general solution of the system is written as
.. math:: x = 2b (C_1 + \frac{C_2}{a-d} + C_2 t) e^{\frac{a+d}{2} t}
.. math:: y = [(d - a) C_1 + C_2 + (d - a) C_2 t] e^{\frac{a+d}{2} t}
1.4. If `D = 0` and `a = d \neq 0` and `b = 0`
.. math:: x = C_1 e^{a t} , y = (c C_1 t + C_2) e^{a t}
1.5. If `D = 0` and `a = d \neq 0` and `c = 0`
.. math:: x = (b C_1 t + C_2) e^{a t} , y = C_1 e^{a t}
2. Case when `ad - bc = 0` and `a^{2} + b^{2} > 0`. The whole straight
line `ax + by = 0` consists of singular points. The original system of differential
equations can be rewritten as
.. math:: x' = ax + by , y' = k (ax + by)
2.1 If `a + bk \neq 0`, solution will be
.. math:: x = b C_1 + C_2 e^{(a + bk) t} , y = -a C_1 + k C_2 e^{(a + bk) t}
2.2 If `a + bk = 0`, solution will be
.. math:: x = C_1 (bk t - 1) + b C_2 t , y = k^{2} b C_1 t + (b k^{2} t + 1) C_2
"""
C1, C2 = get_numbered_constants(eq, num=2)
a, b, c, d = r['a'], r['b'], r['c'], r['d']
real_coeff = all(v.is_real for v in (a, b, c, d))
D = (a - d)**2 + 4*b*c
l1 = (a + d + sqrt(D))/2
l2 = (a + d - sqrt(D))/2
equal_roots = Eq(D, 0).expand()
gsol1, gsol2 = [], []
# Solutions have exponential form if either D > 0 with real coefficients
# or D != 0 with complex coefficients. Eigenvalues are distinct.
# For each eigenvalue lam, pick an eigenvector, making sure we don't get (0, 0)
# The candidates are (b, lam-a) and (lam-d, c).
exponential_form = D > 0 if real_coeff else Not(equal_roots)
bad_ab_vector1 = And(Eq(b, 0), Eq(l1, a))
bad_ab_vector2 = And(Eq(b, 0), Eq(l2, a))
vector1 = Matrix((Piecewise((l1 - d, bad_ab_vector1), (b, True)),
Piecewise((c, bad_ab_vector1), (l1 - a, True))))
vector2 = Matrix((Piecewise((l2 - d, bad_ab_vector2), (b, True)),
Piecewise((c, bad_ab_vector2), (l2 - a, True))))
sol_vector = C1*exp(l1*t)*vector1 + C2*exp(l2*t)*vector2
gsol1.append((sol_vector[0], exponential_form))
gsol2.append((sol_vector[1], exponential_form))
# Solutions have trigonometric form for real coefficients with D < 0
# Both b and c are nonzero in this case, so (b, lam-a) is an eigenvector
# It splits into real/imag parts as (b, sigma-a) and (0, beta). Then
# multiply it by C1(cos(beta*t) + I*C2*sin(beta*t)) and separate real/imag
trigonometric_form = D < 0 if real_coeff else False
sigma = re(l1)
if im(l1).is_positive:
beta = im(l1)
else:
beta = im(l2)
vector1 = Matrix((b, sigma - a))
vector2 = Matrix((0, beta))
sol_vector = exp(sigma*t) * (C1*(cos(beta*t)*vector1 - sin(beta*t)*vector2) + \
C2*(sin(beta*t)*vector1 + cos(beta*t)*vector2))
gsol1.append((sol_vector[0], trigonometric_form))
gsol2.append((sol_vector[1], trigonometric_form))
# Final case is D == 0, a single eigenvalue. If the eigenspace is 2-dimensional
# then we have a scalar matrix, deal with this case first.
scalar_matrix = And(Eq(a, d), Eq(b, 0), Eq(c, 0))
vector1 = Matrix((S.One, S.Zero))
vector2 = Matrix((S.Zero, S.One))
sol_vector = exp(l1*t) * (C1*vector1 + C2*vector2)
gsol1.append((sol_vector[0], scalar_matrix))
gsol2.append((sol_vector[1], scalar_matrix))
# Have one eigenvector. Get a generalized eigenvector from (A-lam)*vector2 = vector1
vector1 = Matrix((Piecewise((l1 - d, bad_ab_vector1), (b, True)),
Piecewise((c, bad_ab_vector1), (l1 - a, True))))
vector2 = Matrix((Piecewise((S.One, bad_ab_vector1), (S.Zero, Eq(a, l1)),
(b/(a - l1), True)),
Piecewise((S.Zero, bad_ab_vector1), (S.One, Eq(a, l1)),
(S.Zero, True))))
sol_vector = exp(l1*t) * (C1*vector1 + C2*(vector2 + t*vector1))
gsol1.append((sol_vector[0], equal_roots))
gsol2.append((sol_vector[1], equal_roots))
return [Eq(x(t), Piecewise(*gsol1)), Eq(y(t), Piecewise(*gsol2))]
def _linear_2eq_order1_type2(x, y, t, r, eq):
r"""
The equations of this type are
.. math:: x' = ax + by + k1 , y' = cx + dy + k2
The general solution of this system is given by sum of its particular solution and the
general solution of the corresponding homogeneous system is obtained from type1.
1. When `ad - bc \neq 0`. The particular solution will be
`x = x_0` and `y = y_0` where `x_0` and `y_0` are determined by solving linear system of equations
.. math:: a x_0 + b y_0 + k1 = 0 , c x_0 + d y_0 + k2 = 0
2. When `ad - bc = 0` and `a^{2} + b^{2} > 0`. In this case, the system of equation becomes
.. math:: x' = ax + by + k_1 , y' = k (ax + by) + k_2
2.1 If `\sigma = a + bk \neq 0`, particular solution is given by
.. math:: x = b \sigma^{-1} (c_1 k - c_2) t - \sigma^{-2} (a c_1 + b c_2)
.. math:: y = kx + (c_2 - c_1 k) t
2.2 If `\sigma = a + bk = 0`, particular solution is given by
.. math:: x = \frac{1}{2} b (c_2 - c_1 k) t^{2} + c_1 t
.. math:: y = kx + (c_2 - c_1 k) t
"""
r['k1'] = -r['k1']; r['k2'] = -r['k2']
if (r['a']*r['d'] - r['b']*r['c']) != 0:
x0, y0 = symbols('x0, y0', cls=Dummy)
sol = solve((r['a']*x0+r['b']*y0+r['k1'], r['c']*x0+r['d']*y0+r['k2']), x0, y0)
psol = [sol[x0], sol[y0]]
elif (r['a']*r['d'] - r['b']*r['c']) == 0 and (r['a']**2+r['b']**2) > 0:
k = r['c']/r['a']
sigma = r['a'] + r['b']*k
if sigma != 0:
sol1 = r['b']*sigma**-1*(r['k1']*k-r['k2'])*t - sigma**-2*(r['a']*r['k1']+r['b']*r['k2'])
sol2 = k*sol1 + (r['k2']-r['k1']*k)*t
else:
# FIXME: a previous typo fix shows this is not covered by tests
sol1 = r['b']*(r['k2']-r['k1']*k)*t**2 + r['k1']*t
sol2 = k*sol1 + (r['k2']-r['k1']*k)*t
psol = [sol1, sol2]
return psol
def _linear_2eq_order1_type3(x, y, t, r, eq):
r"""
The equations of this type of ode are
.. math:: x' = f(t) x + g(t) y
.. math:: y' = g(t) x + f(t) y
The solution of such equations is given by
.. math:: x = e^{F} (C_1 e^{G} + C_2 e^{-G}) , y = e^{F} (C_1 e^{G} - C_2 e^{-G})
where `C_1` and `C_2` are arbitrary constants, and
.. math:: F = \int f(t) \,dt , G = \int g(t) \,dt
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
F = Integral(r['a'], t)
G = Integral(r['b'], t)
sol1 = exp(F)*(C1*exp(G) + C2*exp(-G))
sol2 = exp(F)*(C1*exp(G) - C2*exp(-G))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order1_type4(x, y, t, r, eq):
r"""
The equations of this type of ode are .
.. math:: x' = f(t) x + g(t) y
.. math:: y' = -g(t) x + f(t) y
The solution is given by
.. math:: x = F (C_1 \cos(G) + C_2 \sin(G)), y = F (-C_1 \sin(G) + C_2 \cos(G))
where `C_1` and `C_2` are arbitrary constants, and
.. math:: F = \int f(t) \,dt , G = \int g(t) \,dt
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
if r['b'] == -r['c']:
F = exp(Integral(r['a'], t))
G = Integral(r['b'], t)
sol1 = F*(C1*cos(G) + C2*sin(G))
sol2 = F*(-C1*sin(G) + C2*cos(G))
elif r['d'] == -r['a']:
F = exp(Integral(r['c'], t))
G = Integral(r['d'], t)
sol1 = F*(-C1*sin(G) + C2*cos(G))
sol2 = F*(C1*cos(G) + C2*sin(G))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order1_type5(x, y, t, r, eq):
r"""
The equations of this type of ode are .
.. math:: x' = f(t) x + g(t) y
.. math:: y' = a g(t) x + [f(t) + b g(t)] y
The transformation of
.. math:: x = e^{\int f(t) \,dt} u , y = e^{\int f(t) \,dt} v , T = \int g(t) \,dt
leads to a system of constant coefficient linear differential equations
.. math:: u'(T) = v , v'(T) = au + bv
"""
u, v = symbols('u, v', cls=Function)
T = Symbol('T')
if not cancel(r['c']/r['b']).has(t):
p = cancel(r['c']/r['b'])
q = cancel((r['d']-r['a'])/r['b'])
eq = (Eq(diff(u(T),T), v(T)), Eq(diff(v(T),T), p*u(T)+q*v(T)))
sol = dsolve(eq)
sol1 = exp(Integral(r['a'], t))*sol[0].rhs.subs(T, Integral(r['b'], t))
sol2 = exp(Integral(r['a'], t))*sol[1].rhs.subs(T, Integral(r['b'], t))
if not cancel(r['a']/r['d']).has(t):
p = cancel(r['a']/r['d'])
q = cancel((r['b']-r['c'])/r['d'])
sol = dsolve(Eq(diff(u(T),T), v(T)), Eq(diff(v(T),T), p*u(T)+q*v(T)))
sol1 = exp(Integral(r['c'], t))*sol[1].rhs.subs(T, Integral(r['d'], t))
sol2 = exp(Integral(r['c'], t))*sol[0].rhs.subs(T, Integral(r['d'], t))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order1_type6(x, y, t, r, eq):
r"""
The equations of this type of ode are .
.. math:: x' = f(t) x + g(t) y
.. math:: y' = a [f(t) + a h(t)] x + a [g(t) - h(t)] y
This is solved by first multiplying the first equation by `-a` and adding
it to the second equation to obtain
.. math:: y' - a x' = -a h(t) (y - a x)
Setting `U = y - ax` and integrating the equation we arrive at
.. math:: y - ax = C_1 e^{-a \int h(t) \,dt}
and on substituting the value of y in first equation give rise to first order ODEs. After solving for
`x`, we can obtain `y` by substituting the value of `x` in second equation.
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
p = 0
q = 0
p1 = cancel(r['c']/cancel(r['c']/r['d']).as_numer_denom()[0])
p2 = cancel(r['a']/cancel(r['a']/r['b']).as_numer_denom()[0])
for n, i in enumerate([p1, p2]):
for j in Mul.make_args(collect_const(i)):
if not j.has(t):
q = j
if q!=0 and n==0:
if ((r['c']/j - r['a'])/(r['b'] - r['d']/j)) == j:
p = 1
s = j
break
if q!=0 and n==1:
if ((r['a']/j - r['c'])/(r['d'] - r['b']/j)) == j:
p = 2
s = j
break
if p == 1:
equ = diff(x(t),t) - r['a']*x(t) - r['b']*(s*x(t) + C1*exp(-s*Integral(r['b'] - r['d']/s, t)))
hint1 = classify_ode(equ)[1]
sol1 = dsolve(equ, hint=hint1+'_Integral').rhs
sol2 = s*sol1 + C1*exp(-s*Integral(r['b'] - r['d']/s, t))
elif p ==2:
equ = diff(y(t),t) - r['c']*y(t) - r['d']*s*y(t) + C1*exp(-s*Integral(r['d'] - r['b']/s, t))
hint1 = classify_ode(equ)[1]
sol2 = dsolve(equ, hint=hint1+'_Integral').rhs
sol1 = s*sol2 + C1*exp(-s*Integral(r['d'] - r['b']/s, t))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order1_type7(x, y, t, r, eq):
r"""
The equations of this type of ode are .
.. math:: x' = f(t) x + g(t) y
.. math:: y' = h(t) x + p(t) y
Differentiating the first equation and substituting the value of `y`
from second equation will give a second-order linear equation
.. math:: g x'' - (fg + gp + g') x' + (fgp - g^{2} h + f g' - f' g) x = 0
This above equation can be easily integrated if following conditions are satisfied.
1. `fgp - g^{2} h + f g' - f' g = 0`
2. `fgp - g^{2} h + f g' - f' g = ag, fg + gp + g' = bg`
If first condition is satisfied then it is solved by current dsolve solver and in second case it becomes
a constant coefficient differential equation which is also solved by current solver.
Otherwise if the above condition fails then,
a particular solution is assumed as `x = x_0(t)` and `y = y_0(t)`
Then the general solution is expressed as
.. math:: x = C_1 x_0(t) + C_2 x_0(t) \int \frac{g(t) F(t) P(t)}{x_0^{2}(t)} \,dt
.. math:: y = C_1 y_0(t) + C_2 [\frac{F(t) P(t)}{x_0(t)} + y_0(t) \int \frac{g(t) F(t) P(t)}{x_0^{2}(t)} \,dt]
where C1 and C2 are arbitrary constants and
.. math:: F(t) = e^{\int f(t) \,dt} , P(t) = e^{\int p(t) \,dt}
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
e1 = r['a']*r['b']*r['c'] - r['b']**2*r['c'] + r['a']*diff(r['b'],t) - diff(r['a'],t)*r['b']
e2 = r['a']*r['c']*r['d'] - r['b']*r['c']**2 + diff(r['c'],t)*r['d'] - r['c']*diff(r['d'],t)
m1 = r['a']*r['b'] + r['b']*r['d'] + diff(r['b'],t)
m2 = r['a']*r['c'] + r['c']*r['d'] + diff(r['c'],t)
if e1 == 0:
sol1 = dsolve(r['b']*diff(x(t),t,t) - m1*diff(x(t),t)).rhs
sol2 = dsolve(diff(y(t),t) - r['c']*sol1 - r['d']*y(t)).rhs
elif e2 == 0:
sol2 = dsolve(r['c']*diff(y(t),t,t) - m2*diff(y(t),t)).rhs
sol1 = dsolve(diff(x(t),t) - r['a']*x(t) - r['b']*sol2).rhs
elif not (e1/r['b']).has(t) and not (m1/r['b']).has(t):
sol1 = dsolve(diff(x(t),t,t) - (m1/r['b'])*diff(x(t),t) - (e1/r['b'])*x(t)).rhs
sol2 = dsolve(diff(y(t),t) - r['c']*sol1 - r['d']*y(t)).rhs
elif not (e2/r['c']).has(t) and not (m2/r['c']).has(t):
sol2 = dsolve(diff(y(t),t,t) - (m2/r['c'])*diff(y(t),t) - (e2/r['c'])*y(t)).rhs
sol1 = dsolve(diff(x(t),t) - r['a']*x(t) - r['b']*sol2).rhs
else:
x0 = Function('x0')(t) # x0 and y0 being particular solutions
y0 = Function('y0')(t)
F = exp(Integral(r['a'],t))
P = exp(Integral(r['d'],t))
sol1 = C1*x0 + C2*x0*Integral(r['b']*F*P/x0**2, t)
sol2 = C1*y0 + C2*(F*P/x0 + y0*Integral(r['b']*F*P/x0**2, t))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def sysode_linear_2eq_order2(match_):
x = match_['func'][0].func
y = match_['func'][1].func
func = match_['func']
fc = match_['func_coeff']
eq = match_['eq']
r = dict()
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
for i in range(2):
eqs = []
for terms in Add.make_args(eq[i]):
eqs.append(terms/fc[i,func[i],2])
eq[i] = Add(*eqs)
# for equations Eq(diff(x(t),t,t), a1*diff(x(t),t)+b1*diff(y(t),t)+c1*x(t)+d1*y(t)+e1)
# and Eq(a2*diff(y(t),t,t), a2*diff(x(t),t)+b2*diff(y(t),t)+c2*x(t)+d2*y(t)+e2)
r['a1'] = -fc[0,x(t),1]/fc[0,x(t),2] ; r['a2'] = -fc[1,x(t),1]/fc[1,y(t),2]
r['b1'] = -fc[0,y(t),1]/fc[0,x(t),2] ; r['b2'] = -fc[1,y(t),1]/fc[1,y(t),2]
r['c1'] = -fc[0,x(t),0]/fc[0,x(t),2] ; r['c2'] = -fc[1,x(t),0]/fc[1,y(t),2]
r['d1'] = -fc[0,y(t),0]/fc[0,x(t),2] ; r['d2'] = -fc[1,y(t),0]/fc[1,y(t),2]
const = [S(0), S(0)]
for i in range(2):
for j in Add.make_args(eq[i]):
if not (j.has(x(t)) or j.has(y(t))):
const[i] += j
r['e1'] = -const[0]
r['e2'] = -const[1]
if match_['type_of_equation'] == 'type1':
sol = _linear_2eq_order2_type1(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type2':
gsol = _linear_2eq_order2_type1(x, y, t, r, eq)
psol = _linear_2eq_order2_type2(x, y, t, r, eq)
sol = [Eq(x(t), gsol[0].rhs+psol[0]), Eq(y(t), gsol[1].rhs+psol[1])]
elif match_['type_of_equation'] == 'type3':
sol = _linear_2eq_order2_type3(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type4':
sol = _linear_2eq_order2_type4(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type5':
sol = _linear_2eq_order2_type5(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type6':
sol = _linear_2eq_order2_type6(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type7':
sol = _linear_2eq_order2_type7(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type8':
sol = _linear_2eq_order2_type8(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type9':
sol = _linear_2eq_order2_type9(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type10':
sol = _linear_2eq_order2_type10(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type11':
sol = _linear_2eq_order2_type11(x, y, t, r, eq)
return sol
def _linear_2eq_order2_type1(x, y, t, r, eq):
r"""
System of two constant-coefficient second-order linear homogeneous differential equations
.. math:: x'' = ax + by
.. math:: y'' = cx + dy
The characteristic equation for above equations
.. math:: \lambda^4 - (a + d) \lambda^2 + ad - bc = 0
whose discriminant is `D = (a - d)^2 + 4bc \neq 0`
1. When `ad - bc \neq 0`
1.1. If `D \neq 0`. The characteristic equation has four distinct roots, `\lambda_1, \lambda_2, \lambda_3, \lambda_4`.
The general solution of the system is
.. math:: x = C_1 b e^{\lambda_1 t} + C_2 b e^{\lambda_2 t} + C_3 b e^{\lambda_3 t} + C_4 b e^{\lambda_4 t}
.. math:: y = C_1 (\lambda_1^{2} - a) e^{\lambda_1 t} + C_2 (\lambda_2^{2} - a) e^{\lambda_2 t} + C_3 (\lambda_3^{2} - a) e^{\lambda_3 t} + C_4 (\lambda_4^{2} - a) e^{\lambda_4 t}
where `C_1,..., C_4` are arbitrary constants.
1.2. If `D = 0` and `a \neq d`:
.. math:: x = 2 C_1 (bt + \frac{2bk}{a - d}) e^{\frac{kt}{2}} + 2 C_2 (bt + \frac{2bk}{a - d}) e^{\frac{-kt}{2}} + 2b C_3 t e^{\frac{kt}{2}} + 2b C_4 t e^{\frac{-kt}{2}}
.. math:: y = C_1 (d - a) t e^{\frac{kt}{2}} + C_2 (d - a) t e^{\frac{-kt}{2}} + C_3 [(d - a) t + 2k] e^{\frac{kt}{2}} + C_4 [(d - a) t - 2k] e^{\frac{-kt}{2}}
where `C_1,..., C_4` are arbitrary constants and `k = \sqrt{2 (a + d)}`
1.3. If `D = 0` and `a = d \neq 0` and `b = 0`:
.. math:: x = 2 \sqrt{a} C_1 e^{\sqrt{a} t} + 2 \sqrt{a} C_2 e^{-\sqrt{a} t}
.. math:: y = c C_1 t e^{\sqrt{a} t} - c C_2 t e^{-\sqrt{a} t} + C_3 e^{\sqrt{a} t} + C_4 e^{-\sqrt{a} t}
1.4. If `D = 0` and `a = d \neq 0` and `c = 0`:
.. math:: x = b C_1 t e^{\sqrt{a} t} - b C_2 t e^{-\sqrt{a} t} + C_3 e^{\sqrt{a} t} + C_4 e^{-\sqrt{a} t}
.. math:: y = 2 \sqrt{a} C_1 e^{\sqrt{a} t} + 2 \sqrt{a} C_2 e^{-\sqrt{a} t}
2. When `ad - bc = 0` and `a^2 + b^2 > 0`. Then the original system becomes
.. math:: x'' = ax + by
.. math:: y'' = k (ax + by)
2.1. If `a + bk \neq 0`:
.. math:: x = C_1 e^{t \sqrt{a + bk}} + C_2 e^{-t \sqrt{a + bk}} + C_3 bt + C_4 b
.. math:: y = C_1 k e^{t \sqrt{a + bk}} + C_2 k e^{-t \sqrt{a + bk}} - C_3 at - C_4 a
2.2. If `a + bk = 0`:
.. math:: x = C_1 b t^3 + C_2 b t^2 + C_3 t + C_4
.. math:: y = kx + 6 C_1 t + 2 C_2
"""
r['a'] = r['c1']
r['b'] = r['d1']
r['c'] = r['c2']
r['d'] = r['d2']
l = Symbol('l')
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
chara_eq = l**4 - (r['a']+r['d'])*l**2 + r['a']*r['d'] - r['b']*r['c']
l1 = rootof(chara_eq, 0)
l2 = rootof(chara_eq, 1)
l3 = rootof(chara_eq, 2)
l4 = rootof(chara_eq, 3)
D = (r['a'] - r['d'])**2 + 4*r['b']*r['c']
if (r['a']*r['d'] - r['b']*r['c']) != 0:
if D != 0:
gsol1 = C1*r['b']*exp(l1*t) + C2*r['b']*exp(l2*t) + C3*r['b']*exp(l3*t) \
+ C4*r['b']*exp(l4*t)
gsol2 = C1*(l1**2-r['a'])*exp(l1*t) + C2*(l2**2-r['a'])*exp(l2*t) + \
C3*(l3**2-r['a'])*exp(l3*t) + C4*(l4**2-r['a'])*exp(l4*t)
else:
if r['a'] != r['d']:
k = sqrt(2*(r['a']+r['d']))
mid = r['b']*t+2*r['b']*k/(r['a']-r['d'])
gsol1 = 2*C1*mid*exp(k*t/2) + 2*C2*mid*exp(-k*t/2) + \
2*r['b']*C3*t*exp(k*t/2) + 2*r['b']*C4*t*exp(-k*t/2)
gsol2 = C1*(r['d']-r['a'])*t*exp(k*t/2) + C2*(r['d']-r['a'])*t*exp(-k*t/2) + \
C3*((r['d']-r['a'])*t+2*k)*exp(k*t/2) + C4*((r['d']-r['a'])*t-2*k)*exp(-k*t/2)
elif r['a'] == r['d'] != 0 and r['b'] == 0:
sa = sqrt(r['a'])
gsol1 = 2*sa*C1*exp(sa*t) + 2*sa*C2*exp(-sa*t)
gsol2 = r['c']*C1*t*exp(sa*t)-r['c']*C2*t*exp(-sa*t)+C3*exp(sa*t)+C4*exp(-sa*t)
elif r['a'] == r['d'] != 0 and r['c'] == 0:
sa = sqrt(r['a'])
gsol1 = r['b']*C1*t*exp(sa*t)-r['b']*C2*t*exp(-sa*t)+C3*exp(sa*t)+C4*exp(-sa*t)
gsol2 = 2*sa*C1*exp(sa*t) + 2*sa*C2*exp(-sa*t)
elif (r['a']*r['d'] - r['b']*r['c']) == 0 and (r['a']**2 + r['b']**2) > 0:
k = r['c']/r['a']
if r['a'] + r['b']*k != 0:
mid = sqrt(r['a'] + r['b']*k)
gsol1 = C1*exp(mid*t) + C2*exp(-mid*t) + C3*r['b']*t + C4*r['b']
gsol2 = C1*k*exp(mid*t) + C2*k*exp(-mid*t) - C3*r['a']*t - C4*r['a']
else:
gsol1 = C1*r['b']*t**3 + C2*r['b']*t**2 + C3*t + C4
gsol2 = k*gsol1 + 6*C1*t + 2*C2
return [Eq(x(t), gsol1), Eq(y(t), gsol2)]
def _linear_2eq_order2_type2(x, y, t, r, eq):
r"""
The equations in this type are
.. math:: x'' = a_1 x + b_1 y + c_1
.. math:: y'' = a_2 x + b_2 y + c_2
The general solution of this system is given by the sum of its particular solution
and the general solution of the homogeneous system. The general solution is given
by the linear system of 2 equation of order 2 and type 1
1. If `a_1 b_2 - a_2 b_1 \neq 0`. A particular solution will be `x = x_0` and `y = y_0`
where the constants `x_0` and `y_0` are determined by solving the linear algebraic system
.. math:: a_1 x_0 + b_1 y_0 + c_1 = 0, a_2 x_0 + b_2 y_0 + c_2 = 0
2. If `a_1 b_2 - a_2 b_1 = 0` and `a_1^2 + b_1^2 > 0`. In this case, the system in question becomes
.. math:: x'' = ax + by + c_1, y'' = k (ax + by) + c_2
2.1. If `\sigma = a + bk \neq 0`, the particular solution will be
.. math:: x = \frac{1}{2} b \sigma^{-1} (c_1 k - c_2) t^2 - \sigma^{-2} (a c_1 + b c_2)
.. math:: y = kx + \frac{1}{2} (c_2 - c_1 k) t^2
2.2. If `\sigma = a + bk = 0`, the particular solution will be
.. math:: x = \frac{1}{24} b (c_2 - c_1 k) t^4 + \frac{1}{2} c_1 t^2
.. math:: y = kx + \frac{1}{2} (c_2 - c_1 k) t^2
"""
x0, y0 = symbols('x0, y0')
if r['c1']*r['d2'] - r['c2']*r['d1'] != 0:
sol = solve((r['c1']*x0+r['d1']*y0+r['e1'], r['c2']*x0+r['d2']*y0+r['e2']), x0, y0)
psol = [sol[x0], sol[y0]]
elif r['c1']*r['d2'] - r['c2']*r['d1'] == 0 and (r['c1']**2 + r['d1']**2) > 0:
k = r['c2']/r['c1']
sig = r['c1'] + r['d1']*k
if sig != 0:
psol1 = r['d1']*sig**-1*(r['e1']*k-r['e2'])*t**2/2 - \
sig**-2*(r['c1']*r['e1']+r['d1']*r['e2'])
psol2 = k*psol1 + (r['e2'] - r['e1']*k)*t**2/2
psol = [psol1, psol2]
else:
psol1 = r['d1']*(r['e2']-r['e1']*k)*t**4/24 + r['e1']*t**2/2
psol2 = k*psol1 + (r['e2']-r['e1']*k)*t**2/2
psol = [psol1, psol2]
return psol
def _linear_2eq_order2_type3(x, y, t, r, eq):
r"""
These type of equation is used for describing the horizontal motion of a pendulum
taking into account the Earth rotation.
The solution is given with `a^2 + 4b > 0`:
.. math:: x = C_1 \cos(\alpha t) + C_2 \sin(\alpha t) + C_3 \cos(\beta t) + C_4 \sin(\beta t)
.. math:: y = -C_1 \sin(\alpha t) + C_2 \cos(\alpha t) - C_3 \sin(\beta t) + C_4 \cos(\beta t)
where `C_1,...,C_4` and
.. math:: \alpha = \frac{1}{2} a + \frac{1}{2} \sqrt{a^2 + 4b}, \beta = \frac{1}{2} a - \frac{1}{2} \sqrt{a^2 + 4b}
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
if r['b1']**2 - 4*r['c1'] > 0:
r['a'] = r['b1'] ; r['b'] = -r['c1']
alpha = r['a']/2 + sqrt(r['a']**2 + 4*r['b'])/2
beta = r['a']/2 - sqrt(r['a']**2 + 4*r['b'])/2
sol1 = C1*cos(alpha*t) + C2*sin(alpha*t) + C3*cos(beta*t) + C4*sin(beta*t)
sol2 = -C1*sin(alpha*t) + C2*cos(alpha*t) - C3*sin(beta*t) + C4*cos(beta*t)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type4(x, y, t, r, eq):
r"""
These equations are found in the theory of oscillations
.. math:: x'' + a_1 x' + b_1 y' + c_1 x + d_1 y = k_1 e^{i \omega t}
.. math:: y'' + a_2 x' + b_2 y' + c_2 x + d_2 y = k_2 e^{i \omega t}
The general solution of this linear nonhomogeneous system of constant-coefficient
differential equations is given by the sum of its particular solution and the
general solution of the corresponding homogeneous system (with `k_1 = k_2 = 0`)
1. A particular solution is obtained by the method of undetermined coefficients:
.. math:: x = A_* e^{i \omega t}, y = B_* e^{i \omega t}
On substituting these expressions into the original system of differential equations,
one arrive at a linear nonhomogeneous system of algebraic equations for the
coefficients `A` and `B`.
2. The general solution of the homogeneous system of differential equations is determined
by a linear combination of linearly independent particular solutions determined by
the method of undetermined coefficients in the form of exponentials:
.. math:: x = A e^{\lambda t}, y = B e^{\lambda t}
On substituting these expressions into the original system and collecting the
coefficients of the unknown `A` and `B`, one obtains
.. math:: (\lambda^{2} + a_1 \lambda + c_1) A + (b_1 \lambda + d_1) B = 0
.. math:: (a_2 \lambda + c_2) A + (\lambda^{2} + b_2 \lambda + d_2) B = 0
The determinant of this system must vanish for nontrivial solutions A, B to exist.
This requirement results in the following characteristic equation for `\lambda`
.. math:: (\lambda^2 + a_1 \lambda + c_1) (\lambda^2 + b_2 \lambda + d_2) - (b_1 \lambda + d_1) (a_2 \lambda + c_2) = 0
If all roots `k_1,...,k_4` of this equation are distinct, the general solution of the original
system of the differential equations has the form
.. math:: x = C_1 (b_1 \lambda_1 + d_1) e^{\lambda_1 t} - C_2 (b_1 \lambda_2 + d_1) e^{\lambda_2 t} - C_3 (b_1 \lambda_3 + d_1) e^{\lambda_3 t} - C_4 (b_1 \lambda_4 + d_1) e^{\lambda_4 t}
.. math:: y = C_1 (\lambda_1^{2} + a_1 \lambda_1 + c_1) e^{\lambda_1 t} + C_2 (\lambda_2^{2} + a_1 \lambda_2 + c_1) e^{\lambda_2 t} + C_3 (\lambda_3^{2} + a_1 \lambda_3 + c_1) e^{\lambda_3 t} + C_4 (\lambda_4^{2} + a_1 \lambda_4 + c_1) e^{\lambda_4 t}
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
k = Symbol('k')
Ra, Ca, Rb, Cb = symbols('Ra, Ca, Rb, Cb')
a1 = r['a1'] ; a2 = r['a2']
b1 = r['b1'] ; b2 = r['b2']
c1 = r['c1'] ; c2 = r['c2']
d1 = r['d1'] ; d2 = r['d2']
k1 = r['e1'].expand().as_independent(t)[0]
k2 = r['e2'].expand().as_independent(t)[0]
ew1 = r['e1'].expand().as_independent(t)[1]
ew2 = powdenest(ew1).as_base_exp()[1]
ew3 = collect(ew2, t).coeff(t)
w = cancel(ew3/I)
# The particular solution is assumed to be (Ra+I*Ca)*exp(I*w*t) and
# (Rb+I*Cb)*exp(I*w*t) for x(t) and y(t) respectively
peq1 = (-w**2+c1)*Ra - a1*w*Ca + d1*Rb - b1*w*Cb - k1
peq2 = a1*w*Ra + (-w**2+c1)*Ca + b1*w*Rb + d1*Cb
peq3 = c2*Ra - a2*w*Ca + (-w**2+d2)*Rb - b2*w*Cb - k2
peq4 = a2*w*Ra + c2*Ca + b2*w*Rb + (-w**2+d2)*Cb
# FIXME: solve for what in what? Ra, Rb, etc I guess
# but then psol not used for anything?
psol = solve([peq1, peq2, peq3, peq4])
chareq = (k**2+a1*k+c1)*(k**2+b2*k+d2) - (b1*k+d1)*(a2*k+c2)
[k1, k2, k3, k4] = roots_quartic(Poly(chareq))
sol1 = -C1*(b1*k1+d1)*exp(k1*t) - C2*(b1*k2+d1)*exp(k2*t) - \
C3*(b1*k3+d1)*exp(k3*t) - C4*(b1*k4+d1)*exp(k4*t) + (Ra+I*Ca)*exp(I*w*t)
a1_ = (a1-1)
sol2 = C1*(k1**2+a1_*k1+c1)*exp(k1*t) + C2*(k2**2+a1_*k2+c1)*exp(k2*t) + \
C3*(k3**2+a1_*k3+c1)*exp(k3*t) + C4*(k4**2+a1_*k4+c1)*exp(k4*t) + (Rb+I*Cb)*exp(I*w*t)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type5(x, y, t, r, eq):
r"""
The equation which come under this category are
.. math:: x'' = a (t y' - y)
.. math:: y'' = b (t x' - x)
The transformation
.. math:: u = t x' - x, b = t y' - y
leads to the first-order system
.. math:: u' = atv, v' = btu
The general solution of this system is given by
If `ab > 0`:
.. math:: u = C_1 a e^{\frac{1}{2} \sqrt{ab} t^2} + C_2 a e^{-\frac{1}{2} \sqrt{ab} t^2}
.. math:: v = C_1 \sqrt{ab} e^{\frac{1}{2} \sqrt{ab} t^2} - C_2 \sqrt{ab} e^{-\frac{1}{2} \sqrt{ab} t^2}
If `ab < 0`:
.. math:: u = C_1 a \cos(\frac{1}{2} \sqrt{\left|ab\right|} t^2) + C_2 a \sin(-\frac{1}{2} \sqrt{\left|ab\right|} t^2)
.. math:: v = C_1 \sqrt{\left|ab\right|} \sin(\frac{1}{2} \sqrt{\left|ab\right|} t^2) + C_2 \sqrt{\left|ab\right|} \cos(-\frac{1}{2} \sqrt{\left|ab\right|} t^2)
where `C_1` and `C_2` are arbitrary constants. On substituting the value of `u` and `v`
in above equations and integrating the resulting expressions, the general solution will become
.. math:: x = C_3 t + t \int \frac{u}{t^2} \,dt, y = C_4 t + t \int \frac{u}{t^2} \,dt
where `C_3` and `C_4` are arbitrary constants.
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
r['a'] = -r['d1'] ; r['b'] = -r['c2']
mul = sqrt(abs(r['a']*r['b']))
if r['a']*r['b'] > 0:
u = C1*r['a']*exp(mul*t**2/2) + C2*r['a']*exp(-mul*t**2/2)
v = C1*mul*exp(mul*t**2/2) - C2*mul*exp(-mul*t**2/2)
else:
u = C1*r['a']*cos(mul*t**2/2) + C2*r['a']*sin(mul*t**2/2)
v = -C1*mul*sin(mul*t**2/2) + C2*mul*cos(mul*t**2/2)
sol1 = C3*t + t*Integral(u/t**2, t)
sol2 = C4*t + t*Integral(v/t**2, t)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type6(x, y, t, r, eq):
r"""
The equations are
.. math:: x'' = f(t) (a_1 x + b_1 y)
.. math:: y'' = f(t) (a_2 x + b_2 y)
If `k_1` and `k_2` are roots of the quadratic equation
.. math:: k^2 - (a_1 + b_2) k + a_1 b_2 - a_2 b_1 = 0
Then by multiplying appropriate constants and adding together original equations
we obtain two independent equations:
.. math:: z_1'' = k_1 f(t) z_1, z_1 = a_2 x + (k_1 - a_1) y
.. math:: z_2'' = k_2 f(t) z_2, z_2 = a_2 x + (k_2 - a_1) y
Solving the equations will give the values of `x` and `y` after obtaining the value
of `z_1` and `z_2` by solving the differential equation and substituting the result.
"""
k = Symbol('k')
z = Function('z')
num, den = cancel(
(r['c1']*x(t) + r['d1']*y(t))/
(r['c2']*x(t) + r['d2']*y(t))).as_numer_denom()
f = r['c1']/num.coeff(x(t))
a1 = num.coeff(x(t))
b1 = num.coeff(y(t))
a2 = den.coeff(x(t))
b2 = den.coeff(y(t))
chareq = k**2 - (a1 + b2)*k + a1*b2 - a2*b1
k1, k2 = [rootof(chareq, k) for k in range(Poly(chareq).degree())]
z1 = dsolve(diff(z(t),t,t) - k1*f*z(t)).rhs
z2 = dsolve(diff(z(t),t,t) - k2*f*z(t)).rhs
sol1 = (k1*z2 - k2*z1 + a1*(z1 - z2))/(a2*(k1-k2))
sol2 = (z1 - z2)/(k1 - k2)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type7(x, y, t, r, eq):
r"""
The equations are given as
.. math:: x'' = f(t) (a_1 x' + b_1 y')
.. math:: y'' = f(t) (a_2 x' + b_2 y')
If `k_1` and 'k_2` are roots of the quadratic equation
.. math:: k^2 - (a_1 + b_2) k + a_1 b_2 - a_2 b_1 = 0
Then the system can be reduced by adding together the two equations multiplied
by appropriate constants give following two independent equations:
.. math:: z_1'' = k_1 f(t) z_1', z_1 = a_2 x + (k_1 - a_1) y
.. math:: z_2'' = k_2 f(t) z_2', z_2 = a_2 x + (k_2 - a_1) y
Integrating these and returning to the original variables, one arrives at a linear
algebraic system for the unknowns `x` and `y`:
.. math:: a_2 x + (k_1 - a_1) y = C_1 \int e^{k_1 F(t)} \,dt + C_2
.. math:: a_2 x + (k_2 - a_1) y = C_3 \int e^{k_2 F(t)} \,dt + C_4
where `C_1,...,C_4` are arbitrary constants and `F(t) = \int f(t) \,dt`
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
k = Symbol('k')
num, den = cancel(
(r['a1']*x(t) + r['b1']*y(t))/
(r['a2']*x(t) + r['b2']*y(t))).as_numer_denom()
f = r['a1']/num.coeff(x(t))
a1 = num.coeff(x(t))
b1 = num.coeff(y(t))
a2 = den.coeff(x(t))
b2 = den.coeff(y(t))
chareq = k**2 - (a1 + b2)*k + a1*b2 - a2*b1
[k1, k2] = [rootof(chareq, k) for k in range(Poly(chareq).degree())]
F = Integral(f, t)
z1 = C1*Integral(exp(k1*F), t) + C2
z2 = C3*Integral(exp(k2*F), t) + C4
sol1 = (k1*z2 - k2*z1 + a1*(z1 - z2))/(a2*(k1-k2))
sol2 = (z1 - z2)/(k1 - k2)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type8(x, y, t, r, eq):
r"""
The equation of this category are
.. math:: x'' = a f(t) (t y' - y)
.. math:: y'' = b f(t) (t x' - x)
The transformation
.. math:: u = t x' - x, v = t y' - y
leads to the system of first-order equations
.. math:: u' = a t f(t) v, v' = b t f(t) u
The general solution of this system has the form
If `ab > 0`:
.. math:: u = C_1 a e^{\sqrt{ab} \int t f(t) \,dt} + C_2 a e^{-\sqrt{ab} \int t f(t) \,dt}
.. math:: v = C_1 \sqrt{ab} e^{\sqrt{ab} \int t f(t) \,dt} - C_2 \sqrt{ab} e^{-\sqrt{ab} \int t f(t) \,dt}
If `ab < 0`:
.. math:: u = C_1 a \cos(\sqrt{\left|ab\right|} \int t f(t) \,dt) + C_2 a \sin(-\sqrt{\left|ab\right|} \int t f(t) \,dt)
.. math:: v = C_1 \sqrt{\left|ab\right|} \sin(\sqrt{\left|ab\right|} \int t f(t) \,dt) + C_2 \sqrt{\left|ab\right|} \cos(-\sqrt{\left|ab\right|} \int t f(t) \,dt)
where `C_1` and `C_2` are arbitrary constants. On substituting the value of `u` and `v`
in above equations and integrating the resulting expressions, the general solution will become
.. math:: x = C_3 t + t \int \frac{u}{t^2} \,dt, y = C_4 t + t \int \frac{u}{t^2} \,dt
where `C_3` and `C_4` are arbitrary constants.
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
num, den = cancel(r['d1']/r['c2']).as_numer_denom()
f = -r['d1']/num
a = num
b = den
mul = sqrt(abs(a*b))
Igral = Integral(t*f, t)
if a*b > 0:
u = C1*a*exp(mul*Igral) + C2*a*exp(-mul*Igral)
v = C1*mul*exp(mul*Igral) - C2*mul*exp(-mul*Igral)
else:
u = C1*a*cos(mul*Igral) + C2*a*sin(mul*Igral)
v = -C1*mul*sin(mul*Igral) + C2*mul*cos(mul*Igral)
sol1 = C3*t + t*Integral(u/t**2, t)
sol2 = C4*t + t*Integral(v/t**2, t)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type9(x, y, t, r, eq):
r"""
.. math:: t^2 x'' + a_1 t x' + b_1 t y' + c_1 x + d_1 y = 0
.. math:: t^2 y'' + a_2 t x' + b_2 t y' + c_2 x + d_2 y = 0
These system of equations are euler type.
The substitution of `t = \sigma e^{\tau} (\sigma \neq 0)` leads to the system of constant
coefficient linear differential equations
.. math:: x'' + (a_1 - 1) x' + b_1 y' + c_1 x + d_1 y = 0
.. math:: y'' + a_2 x' + (b_2 - 1) y' + c_2 x + d_2 y = 0
The general solution of the homogeneous system of differential equations is determined
by a linear combination of linearly independent particular solutions determined by
the method of undetermined coefficients in the form of exponentials
.. math:: x = A e^{\lambda t}, y = B e^{\lambda t}
On substituting these expressions into the original system and collecting the
coefficients of the unknown `A` and `B`, one obtains
.. math:: (\lambda^{2} + (a_1 - 1) \lambda + c_1) A + (b_1 \lambda + d_1) B = 0
.. math:: (a_2 \lambda + c_2) A + (\lambda^{2} + (b_2 - 1) \lambda + d_2) B = 0
The determinant of this system must vanish for nontrivial solutions A, B to exist.
This requirement results in the following characteristic equation for `\lambda`
.. math:: (\lambda^2 + (a_1 - 1) \lambda + c_1) (\lambda^2 + (b_2 - 1) \lambda + d_2) - (b_1 \lambda + d_1) (a_2 \lambda + c_2) = 0
If all roots `k_1,...,k_4` of this equation are distinct, the general solution of the original
system of the differential equations has the form
.. math:: x = C_1 (b_1 \lambda_1 + d_1) e^{\lambda_1 t} - C_2 (b_1 \lambda_2 + d_1) e^{\lambda_2 t} - C_3 (b_1 \lambda_3 + d_1) e^{\lambda_3 t} - C_4 (b_1 \lambda_4 + d_1) e^{\lambda_4 t}
.. math:: y = C_1 (\lambda_1^{2} + (a_1 - 1) \lambda_1 + c_1) e^{\lambda_1 t} + C_2 (\lambda_2^{2} + (a_1 - 1) \lambda_2 + c_1) e^{\lambda_2 t} + C_3 (\lambda_3^{2} + (a_1 - 1) \lambda_3 + c_1) e^{\lambda_3 t} + C_4 (\lambda_4^{2} + (a_1 - 1) \lambda_4 + c_1) e^{\lambda_4 t}
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
k = Symbol('k')
a1 = -r['a1']*t; a2 = -r['a2']*t
b1 = -r['b1']*t; b2 = -r['b2']*t
c1 = -r['c1']*t**2; c2 = -r['c2']*t**2
d1 = -r['d1']*t**2; d2 = -r['d2']*t**2
eq = (k**2+(a1-1)*k+c1)*(k**2+(b2-1)*k+d2)-(b1*k+d1)*(a2*k+c2)
[k1, k2, k3, k4] = roots_quartic(Poly(eq))
sol1 = -C1*(b1*k1+d1)*exp(k1*log(t)) - C2*(b1*k2+d1)*exp(k2*log(t)) - \
C3*(b1*k3+d1)*exp(k3*log(t)) - C4*(b1*k4+d1)*exp(k4*log(t))
a1_ = (a1-1)
sol2 = C1*(k1**2+a1_*k1+c1)*exp(k1*log(t)) + C2*(k2**2+a1_*k2+c1)*exp(k2*log(t)) \
+ C3*(k3**2+a1_*k3+c1)*exp(k3*log(t)) + C4*(k4**2+a1_*k4+c1)*exp(k4*log(t))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type10(x, y, t, r, eq):
r"""
The equation of this category are
.. math:: (\alpha t^2 + \beta t + \gamma)^{2} x'' = ax + by
.. math:: (\alpha t^2 + \beta t + \gamma)^{2} y'' = cx + dy
The transformation
.. math:: \tau = \int \frac{1}{\alpha t^2 + \beta t + \gamma} \,dt , u = \frac{x}{\sqrt{\left|\alpha t^2 + \beta t + \gamma\right|}} , v = \frac{y}{\sqrt{\left|\alpha t^2 + \beta t + \gamma\right|}}
leads to a constant coefficient linear system of equations
.. math:: u'' = (a - \alpha \gamma + \frac{1}{4} \beta^{2}) u + b v
.. math:: v'' = c u + (d - \alpha \gamma + \frac{1}{4} \beta^{2}) v
These system of equations obtained can be solved by type1 of System of two
constant-coefficient second-order linear homogeneous differential equations.
"""
u, v = symbols('u, v', cls=Function)
assert False
p = Wild('p', exclude=[t, t**2])
q = Wild('q', exclude=[t, t**2])
s = Wild('s', exclude=[t, t**2])
n = Wild('n', exclude=[t, t**2])
num, den = r['c1'].as_numer_denom()
dic = den.match((n*(p*t**2+q*t+s)**2).expand())
eqz = dic[p]*t**2 + dic[q]*t + dic[s]
a = num/dic[n]
b = cancel(r['d1']*eqz**2)
c = cancel(r['c2']*eqz**2)
d = cancel(r['d2']*eqz**2)
[msol1, msol2] = dsolve([Eq(diff(u(t), t, t), (a - dic[p]*dic[s] + dic[q]**2/4)*u(t) \
+ b*v(t)), Eq(diff(v(t),t,t), c*u(t) + (d - dic[p]*dic[s] + dic[q]**2/4)*v(t))])
sol1 = (msol1.rhs*sqrt(abs(eqz))).subs(t, Integral(1/eqz, t))
sol2 = (msol2.rhs*sqrt(abs(eqz))).subs(t, Integral(1/eqz, t))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type11(x, y, t, r, eq):
r"""
The equations which comes under this type are
.. math:: x'' = f(t) (t x' - x) + g(t) (t y' - y)
.. math:: y'' = h(t) (t x' - x) + p(t) (t y' - y)
The transformation
.. math:: u = t x' - x, v = t y' - y
leads to the linear system of first-order equations
.. math:: u' = t f(t) u + t g(t) v, v' = t h(t) u + t p(t) v
On substituting the value of `u` and `v` in transformed equation gives value of `x` and `y` as
.. math:: x = C_3 t + t \int \frac{u}{t^2} \,dt , y = C_4 t + t \int \frac{v}{t^2} \,dt.
where `C_3` and `C_4` are arbitrary constants.
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
u, v = symbols('u, v', cls=Function)
f = -r['c1'] ; g = -r['d1']
h = -r['c2'] ; p = -r['d2']
[msol1, msol2] = dsolve([Eq(diff(u(t),t), t*f*u(t) + t*g*v(t)), Eq(diff(v(t),t), t*h*u(t) + t*p*v(t))])
sol1 = C3*t + t*Integral(msol1.rhs/t**2, t)
sol2 = C4*t + t*Integral(msol2.rhs/t**2, t)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def sysode_linear_3eq_order1(match_):
x = match_['func'][0].func
y = match_['func'][1].func
z = match_['func'][2].func
func = match_['func']
fc = match_['func_coeff']
eq = match_['eq']
r = dict()
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
for i in range(3):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
# for equations:
# Eq(g1*diff(x(t),t), a1*x(t)+b1*y(t)+c1*z(t)+d1),
# Eq(g2*diff(y(t),t), a2*x(t)+b2*y(t)+c2*z(t)+d2), and
# Eq(g3*diff(z(t),t), a3*x(t)+b3*y(t)+c3*z(t)+d3)
r['a1'] = fc[0,x(t),0]/fc[0,x(t),1]; r['a2'] = fc[1,x(t),0]/fc[1,y(t),1];
r['a3'] = fc[2,x(t),0]/fc[2,z(t),1]
r['b1'] = fc[0,y(t),0]/fc[0,x(t),1]; r['b2'] = fc[1,y(t),0]/fc[1,y(t),1];
r['b3'] = fc[2,y(t),0]/fc[2,z(t),1]
r['c1'] = fc[0,z(t),0]/fc[0,x(t),1]; r['c2'] = fc[1,z(t),0]/fc[1,y(t),1];
r['c3'] = fc[2,z(t),0]/fc[2,z(t),1]
for i in range(3):
for j in Add.make_args(eq[i]):
if not j.has(x(t), y(t), z(t)):
raise NotImplementedError("Only homogeneous problems are supported, non-homogeneous are not supported currently.")
if match_['type_of_equation'] == 'type1':
sol = _linear_3eq_order1_type1(x, y, z, t, r, eq)
if match_['type_of_equation'] == 'type2':
sol = _linear_3eq_order1_type2(x, y, z, t, r, eq)
if match_['type_of_equation'] == 'type3':
sol = _linear_3eq_order1_type3(x, y, z, t, r, eq)
if match_['type_of_equation'] == 'type4':
sol = _linear_3eq_order1_type4(x, y, z, t, r, eq)
if match_['type_of_equation'] == 'type6':
sol = _linear_neq_order1_type1(match_)
return sol
def _linear_3eq_order1_type1(x, y, z, t, r, eq):
r"""
.. math:: x' = ax
.. math:: y' = bx + cy
.. math:: z' = dx + ky + pz
Solution of such equations are forward substitution. Solving first equations
gives the value of `x`, substituting it in second and third equation and
solving second equation gives `y` and similarly substituting `y` in third
equation give `z`.
.. math:: x = C_1 e^{at}
.. math:: y = \frac{b C_1}{a - c} e^{at} + C_2 e^{ct}
.. math:: z = \frac{C_1}{a - p} (d + \frac{bk}{a - c}) e^{at} + \frac{k C_2}{c - p} e^{ct} + C_3 e^{pt}
where `C_1, C_2` and `C_3` are arbitrary constants.
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
a = -r['a1']; b = -r['a2']; c = -r['b2']
d = -r['a3']; k = -r['b3']; p = -r['c3']
sol1 = C1*exp(a*t)
sol2 = b*C1*exp(a*t)/(a-c) + C2*exp(c*t)
sol3 = C1*(d+b*k/(a-c))*exp(a*t)/(a-p) + k*C2*exp(c*t)/(c-p) + C3*exp(p*t)
return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)]
def _linear_3eq_order1_type2(x, y, z, t, r, eq):
r"""
The equations of this type are
.. math:: x' = cy - bz
.. math:: y' = az - cx
.. math:: z' = bx - ay
1. First integral:
.. math:: ax + by + cz = A \qquad - (1)
.. math:: x^2 + y^2 + z^2 = B^2 \qquad - (2)
where `A` and `B` are arbitrary constants. It follows from these integrals
that the integral lines are circles formed by the intersection of the planes
`(1)` and sphere `(2)`
2. Solution:
.. math:: x = a C_0 + k C_1 \cos(kt) + (c C_2 - b C_3) \sin(kt)
.. math:: y = b C_0 + k C_2 \cos(kt) + (a C_2 - c C_3) \sin(kt)
.. math:: z = c C_0 + k C_3 \cos(kt) + (b C_2 - a C_3) \sin(kt)
where `k = \sqrt{a^2 + b^2 + c^2}` and the four constants of integration,
`C_1,...,C_4` are constrained by a single relation,
.. math:: a C_1 + b C_2 + c C_3 = 0
"""
C0, C1, C2, C3 = get_numbered_constants(eq, num=4, start=0)
a = -r['c2']; b = -r['a3']; c = -r['b1']
k = sqrt(a**2 + b**2 + c**2)
C3 = (-a*C1 - b*C2)/c
sol1 = a*C0 + k*C1*cos(k*t) + (c*C2-b*C3)*sin(k*t)
sol2 = b*C0 + k*C2*cos(k*t) + (a*C3-c*C1)*sin(k*t)
sol3 = c*C0 + k*C3*cos(k*t) + (b*C1-a*C2)*sin(k*t)
return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)]
def _linear_3eq_order1_type3(x, y, z, t, r, eq):
r"""
Equations of this system of ODEs
.. math:: a x' = bc (y - z)
.. math:: b y' = ac (z - x)
.. math:: c z' = ab (x - y)
1. First integral:
.. math:: a^2 x + b^2 y + c^2 z = A
where A is an arbitrary constant. It follows that the integral lines are plane curves.
2. Solution:
.. math:: x = C_0 + k C_1 \cos(kt) + a^{-1} bc (C_2 - C_3) \sin(kt)
.. math:: y = C_0 + k C_2 \cos(kt) + a b^{-1} c (C_3 - C_1) \sin(kt)
.. math:: z = C_0 + k C_3 \cos(kt) + ab c^{-1} (C_1 - C_2) \sin(kt)
where `k = \sqrt{a^2 + b^2 + c^2}` and the four constants of integration,
`C_1,...,C_4` are constrained by a single relation
.. math:: a^2 C_1 + b^2 C_2 + c^2 C_3 = 0
"""
C0, C1, C2, C3 = get_numbered_constants(eq, num=4, start=0)
c = sqrt(r['b1']*r['c2'])
b = sqrt(r['b1']*r['a3'])
a = sqrt(r['c2']*r['a3'])
C3 = (-a**2*C1-b**2*C2)/c**2
k = sqrt(a**2 + b**2 + c**2)
sol1 = C0 + k*C1*cos(k*t) + a**-1*b*c*(C2-C3)*sin(k*t)
sol2 = C0 + k*C2*cos(k*t) + a*b**-1*c*(C3-C1)*sin(k*t)
sol3 = C0 + k*C3*cos(k*t) + a*b*c**-1*(C1-C2)*sin(k*t)
return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)]
def _linear_3eq_order1_type4(x, y, z, t, r, eq):
r"""
Equations:
.. math:: x' = (a_1 f(t) + g(t)) x + a_2 f(t) y + a_3 f(t) z
.. math:: y' = b_1 f(t) x + (b_2 f(t) + g(t)) y + b_3 f(t) z
.. math:: z' = c_1 f(t) x + c_2 f(t) y + (c_3 f(t) + g(t)) z
The transformation
.. math:: x = e^{\int g(t) \,dt} u, y = e^{\int g(t) \,dt} v, z = e^{\int g(t) \,dt} w, \tau = \int f(t) \,dt
leads to the system of constant coefficient linear differential equations
.. math:: u' = a_1 u + a_2 v + a_3 w
.. math:: v' = b_1 u + b_2 v + b_3 w
.. math:: w' = c_1 u + c_2 v + c_3 w
These system of equations are solved by homogeneous linear system of constant
coefficients of `n` equations of first order. Then substituting the value of
`u, v` and `w` in transformed equation gives value of `x, y` and `z`.
"""
u, v, w = symbols('u, v, w', cls=Function)
a2, a3 = cancel(r['b1']/r['c1']).as_numer_denom()
f = cancel(r['b1']/a2)
b1 = cancel(r['a2']/f); b3 = cancel(r['c2']/f)
c1 = cancel(r['a3']/f); c2 = cancel(r['b3']/f)
a1, g = div(r['a1'],f)
b2 = div(r['b2'],f)[0]
c3 = div(r['c3'],f)[0]
trans_eq = (diff(u(t),t)-a1*u(t)-a2*v(t)-a3*w(t), diff(v(t),t)-b1*u(t)-\
b2*v(t)-b3*w(t), diff(w(t),t)-c1*u(t)-c2*v(t)-c3*w(t))
sol = dsolve(trans_eq)
sol1 = exp(Integral(g,t))*((sol[0].rhs).subs(t, Integral(f,t)))
sol2 = exp(Integral(g,t))*((sol[1].rhs).subs(t, Integral(f,t)))
sol3 = exp(Integral(g,t))*((sol[2].rhs).subs(t, Integral(f,t)))
return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)]
def sysode_linear_neq_order1(match_):
sol = _linear_neq_order1_type1(match_)
return sol
def _linear_neq_order1_type1(match_):
r"""
System of n first-order constant-coefficient linear nonhomogeneous differential equation
.. math:: y'_k = a_{k1} y_1 + a_{k2} y_2 +...+ a_{kn} y_n; k = 1,2,...,n
or that can be written as `\vec{y'} = A . \vec{y}`
where `\vec{y}` is matrix of `y_k` for `k = 1,2,...n` and `A` is a `n \times n` matrix.
Since these equations are equivalent to a first order homogeneous linear
differential equation. So the general solution will contain `n` linearly
independent parts and solution will consist some type of exponential
functions. Assuming `y = \vec{v} e^{rt}` is a solution of the system where
`\vec{v}` is a vector of coefficients of `y_1,...,y_n`. Substituting `y` and
`y' = r v e^{r t}` into the equation `\vec{y'} = A . \vec{y}`, we get
.. math:: r \vec{v} e^{rt} = A \vec{v} e^{rt}
.. math:: r \vec{v} = A \vec{v}
where `r` comes out to be eigenvalue of `A` and vector `\vec{v}` is the eigenvector
of `A` corresponding to `r`. There are three possibilities of eigenvalues of `A`
- `n` distinct real eigenvalues
- complex conjugate eigenvalues
- eigenvalues with multiplicity `k`
1. When all eigenvalues `r_1,..,r_n` are distinct with `n` different eigenvectors
`v_1,...v_n` then the solution is given by
.. math:: \vec{y} = C_1 e^{r_1 t} \vec{v_1} + C_2 e^{r_2 t} \vec{v_2} +...+ C_n e^{r_n t} \vec{v_n}
where `C_1,C_2,...,C_n` are arbitrary constants.
2. When some eigenvalues are complex then in order to make the solution real,
we take a linear combination: if `r = a + bi` has an eigenvector
`\vec{v} = \vec{w_1} + i \vec{w_2}` then to obtain real-valued solutions to
the system, replace the complex-valued solutions `e^{rx} \vec{v}`
with real-valued solution `e^{ax} (\vec{w_1} \cos(bx) - \vec{w_2} \sin(bx))`
and for `r = a - bi` replace the solution `e^{-r x} \vec{v}` with
`e^{ax} (\vec{w_1} \sin(bx) + \vec{w_2} \cos(bx))`
3. If some eigenvalues are repeated. Then we get fewer than `n` linearly
independent eigenvectors, we miss some of the solutions and need to
construct the missing ones. We do this via generalized eigenvectors, vectors
which are not eigenvectors but are close enough that we can use to write
down the remaining solutions. For a eigenvalue `r` with eigenvector `\vec{w}`
we obtain `\vec{w_2},...,\vec{w_k}` using
.. math:: (A - r I) . \vec{w_2} = \vec{w}
.. math:: (A - r I) . \vec{w_3} = \vec{w_2}
.. math:: \vdots
.. math:: (A - r I) . \vec{w_k} = \vec{w_{k-1}}
Then the solutions to the system for the eigenspace are `e^{rt} [\vec{w}],
e^{rt} [t \vec{w} + \vec{w_2}], e^{rt} [\frac{t^2}{2} \vec{w} + t \vec{w_2} + \vec{w_3}],
...,e^{rt} [\frac{t^{k-1}}{(k-1)!} \vec{w} + \frac{t^{k-2}}{(k-2)!} \vec{w_2} +...+ t \vec{w_{k-1}}
+ \vec{w_k}]`
So, If `\vec{y_1},...,\vec{y_n}` are `n` solution of obtained from three
categories of `A`, then general solution to the system `\vec{y'} = A . \vec{y}`
.. math:: \vec{y} = C_1 \vec{y_1} + C_2 \vec{y_2} + \cdots + C_n \vec{y_n}
"""
eq = match_['eq']
func = match_['func']
fc = match_['func_coeff']
n = len(eq)
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
constants = numbered_symbols(prefix='C', cls=Symbol, start=1)
M = Matrix(n,n,lambda i,j:-fc[i,func[j],0])
evector = M.eigenvects(simplify=True)
def is_complex(mat, root):
return Matrix(n, 1, lambda i,j: re(mat[i])*cos(im(root)*t) - im(mat[i])*sin(im(root)*t))
def is_complex_conjugate(mat, root):
return Matrix(n, 1, lambda i,j: re(mat[i])*sin(abs(im(root))*t) + im(mat[i])*cos(im(root)*t)*abs(im(root))/im(root))
conjugate_root = []
e_vector = zeros(n,1)
for evects in evector:
if evects[0] not in conjugate_root:
# If number of column of an eigenvector is not equal to the multiplicity
# of its eigenvalue then the legt eigenvectors are calculated
if len(evects[2])!=evects[1]:
var_mat = Matrix(n, 1, lambda i,j: Symbol('x'+str(i)))
Mnew = (M - evects[0]*eye(evects[2][-1].rows))*var_mat
w = [0 for i in range(evects[1])]
w[0] = evects[2][-1]
for r in range(1, evects[1]):
w_ = Mnew - w[r-1]
sol_dict = solve(list(w_), var_mat[1:])
sol_dict[var_mat[0]] = var_mat[0]
for key, value in sol_dict.items():
sol_dict[key] = value.subs(var_mat[0],1)
w[r] = Matrix(n, 1, lambda i,j: sol_dict[var_mat[i]])
evects[2].append(w[r])
for i in range(evects[1]):
C = next(constants)
for j in range(i+1):
if evects[0].has(I):
evects[2][j] = simplify(evects[2][j])
e_vector += C*is_complex(evects[2][j], evects[0])*t**(i-j)*exp(re(evects[0])*t)/factorial(i-j)
C = next(constants)
e_vector += C*is_complex_conjugate(evects[2][j], evects[0])*t**(i-j)*exp(re(evects[0])*t)/factorial(i-j)
else:
e_vector += C*evects[2][j]*t**(i-j)*exp(evects[0]*t)/factorial(i-j)
if evects[0].has(I):
conjugate_root.append(conjugate(evects[0]))
sol = []
for i in range(len(eq)):
sol.append(Eq(func[i],e_vector[i]))
return sol
def sysode_nonlinear_2eq_order1(match_):
func = match_['func']
eq = match_['eq']
fc = match_['func_coeff']
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
if match_['type_of_equation'] == 'type5':
sol = _nonlinear_2eq_order1_type5(func, t, eq)
return sol
x = func[0].func
y = func[1].func
for i in range(2):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
if match_['type_of_equation'] == 'type1':
sol = _nonlinear_2eq_order1_type1(x, y, t, eq)
elif match_['type_of_equation'] == 'type2':
sol = _nonlinear_2eq_order1_type2(x, y, t, eq)
elif match_['type_of_equation'] == 'type3':
sol = _nonlinear_2eq_order1_type3(x, y, t, eq)
elif match_['type_of_equation'] == 'type4':
sol = _nonlinear_2eq_order1_type4(x, y, t, eq)
return sol
def _nonlinear_2eq_order1_type1(x, y, t, eq):
r"""
Equations:
.. math:: x' = x^n F(x,y)
.. math:: y' = g(y) F(x,y)
Solution:
.. math:: x = \varphi(y), \int \frac{1}{g(y) F(\varphi(y),y)} \,dy = t + C_2
where
if `n \neq 1`
.. math:: \varphi = [C_1 + (1-n) \int \frac{1}{g(y)} \,dy]^{\frac{1}{1-n}}
if `n = 1`
.. math:: \varphi = C_1 e^{\int \frac{1}{g(y)} \,dy}
where `C_1` and `C_2` are arbitrary constants.
"""
C1, C2 = get_numbered_constants(eq, num=2)
n = Wild('n', exclude=[x(t),y(t)])
f = Wild('f')
u, v = symbols('u, v')
r = eq[0].match(diff(x(t),t) - x(t)**n*f)
g = ((diff(y(t),t) - eq[1])/r[f]).subs(y(t),v)
F = r[f].subs(x(t),u).subs(y(t),v)
n = r[n]
if n!=1:
phi = (C1 + (1-n)*Integral(1/g, v))**(1/(1-n))
else:
phi = C1*exp(Integral(1/g, v))
phi = phi.doit()
sol2 = solve(Integral(1/(g*F.subs(u,phi)), v).doit() - t - C2, v)
sol = []
for sols in sol2:
sol.append(Eq(x(t),phi.subs(v, sols)))
sol.append(Eq(y(t), sols))
return sol
def _nonlinear_2eq_order1_type2(x, y, t, eq):
r"""
Equations:
.. math:: x' = e^{\lambda x} F(x,y)
.. math:: y' = g(y) F(x,y)
Solution:
.. math:: x = \varphi(y), \int \frac{1}{g(y) F(\varphi(y),y)} \,dy = t + C_2
where
if `\lambda \neq 0`
.. math:: \varphi = -\frac{1}{\lambda} log(C_1 - \lambda \int \frac{1}{g(y)} \,dy)
if `\lambda = 0`
.. math:: \varphi = C_1 + \int \frac{1}{g(y)} \,dy
where `C_1` and `C_2` are arbitrary constants.
"""
C1, C2 = get_numbered_constants(eq, num=2)
n = Wild('n', exclude=[x(t),y(t)])
f = Wild('f')
u, v = symbols('u, v')
r = eq[0].match(diff(x(t),t) - exp(n*x(t))*f)
g = ((diff(y(t),t) - eq[1])/r[f]).subs(y(t),v)
F = r[f].subs(x(t),u).subs(y(t),v)
n = r[n]
if n:
phi = -1/n*log(C1 - n*Integral(1/g, v))
else:
phi = C1 + Integral(1/g, v)
phi = phi.doit()
sol2 = solve(Integral(1/(g*F.subs(u,phi)), v).doit() - t - C2, v)
sol = []
for sols in sol2:
sol.append(Eq(x(t),phi.subs(v, sols)))
sol.append(Eq(y(t), sols))
return sol
def _nonlinear_2eq_order1_type3(x, y, t, eq):
r"""
Autonomous system of general form
.. math:: x' = F(x,y)
.. math:: y' = G(x,y)
Assuming `y = y(x, C_1)` where `C_1` is an arbitrary constant is the general
solution of the first-order equation
.. math:: F(x,y) y'_x = G(x,y)
Then the general solution of the original system of equations has the form
.. math:: \int \frac{1}{F(x,y(x,C_1))} \,dx = t + C_1
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
v = Function('v')
u = Symbol('u')
f = Wild('f')
g = Wild('g')
r1 = eq[0].match(diff(x(t),t) - f)
r2 = eq[1].match(diff(y(t),t) - g)
F = r1[f].subs(x(t), u).subs(y(t), v(u))
G = r2[g].subs(x(t), u).subs(y(t), v(u))
sol2r = dsolve(Eq(diff(v(u), u), G/F))
for sol2s in sol2r:
sol1 = solve(Integral(1/F.subs(v(u), sol2s.rhs), u).doit() - t - C2, u)
sol = []
for sols in sol1:
sol.append(Eq(x(t), sols))
sol.append(Eq(y(t), (sol2s.rhs).subs(u, sols)))
return sol
def _nonlinear_2eq_order1_type4(x, y, t, eq):
r"""
Equation:
.. math:: x' = f_1(x) g_1(y) \phi(x,y,t)
.. math:: y' = f_2(x) g_2(y) \phi(x,y,t)
First integral:
.. math:: \int \frac{f_2(x)}{f_1(x)} \,dx - \int \frac{g_1(y)}{g_2(y)} \,dy = C
where `C` is an arbitrary constant.
On solving the first integral for `x` (resp., `y` ) and on substituting the
resulting expression into either equation of the original solution, one
arrives at a first-order equation for determining `y` (resp., `x` ).
"""
C1, C2 = get_numbered_constants(eq, num=2)
u, v = symbols('u, v')
U, V = symbols('U, V', cls=Function)
f = Wild('f')
g = Wild('g')
f1 = Wild('f1', exclude=[v,t])
f2 = Wild('f2', exclude=[v,t])
g1 = Wild('g1', exclude=[u,t])
g2 = Wild('g2', exclude=[u,t])
r1 = eq[0].match(diff(x(t),t) - f)
r2 = eq[1].match(diff(y(t),t) - g)
num, den = (
(r1[f].subs(x(t),u).subs(y(t),v))/
(r2[g].subs(x(t),u).subs(y(t),v))).as_numer_denom()
R1 = num.match(f1*g1)
R2 = den.match(f2*g2)
phi = (r1[f].subs(x(t),u).subs(y(t),v))/num
F1 = R1[f1]; F2 = R2[f2]
G1 = R1[g1]; G2 = R2[g2]
sol1r = solve(Integral(F2/F1, u).doit() - Integral(G1/G2,v).doit() - C1, u)
sol2r = solve(Integral(F2/F1, u).doit() - Integral(G1/G2,v).doit() - C1, v)
sol = []
for sols in sol1r:
sol.append(Eq(y(t), dsolve(diff(V(t),t) - F2.subs(u,sols).subs(v,V(t))*G2.subs(v,V(t))*phi.subs(u,sols).subs(v,V(t))).rhs))
for sols in sol2r:
sol.append(Eq(x(t), dsolve(diff(U(t),t) - F1.subs(u,U(t))*G1.subs(v,sols).subs(u,U(t))*phi.subs(v,sols).subs(u,U(t))).rhs))
return set(sol)
def _nonlinear_2eq_order1_type5(func, t, eq):
r"""
Clairaut system of ODEs
.. math:: x = t x' + F(x',y')
.. math:: y = t y' + G(x',y')
The following are solutions of the system
`(i)` straight lines:
.. math:: x = C_1 t + F(C_1, C_2), y = C_2 t + G(C_1, C_2)
where `C_1` and `C_2` are arbitrary constants;
`(ii)` envelopes of the above lines;
`(iii)` continuously differentiable lines made up from segments of the lines
`(i)` and `(ii)`.
"""
C1, C2 = get_numbered_constants(eq, num=2)
f = Wild('f')
g = Wild('g')
def check_type(x, y):
r1 = eq[0].match(t*diff(x(t),t) - x(t) + f)
r2 = eq[1].match(t*diff(y(t),t) - y(t) + g)
if not (r1 and r2):
r1 = eq[0].match(diff(x(t),t) - x(t)/t + f/t)
r2 = eq[1].match(diff(y(t),t) - y(t)/t + g/t)
if not (r1 and r2):
r1 = (-eq[0]).match(t*diff(x(t),t) - x(t) + f)
r2 = (-eq[1]).match(t*diff(y(t),t) - y(t) + g)
if not (r1 and r2):
r1 = (-eq[0]).match(diff(x(t),t) - x(t)/t + f/t)
r2 = (-eq[1]).match(diff(y(t),t) - y(t)/t + g/t)
return [r1, r2]
for func_ in func:
if isinstance(func_, list):
x = func[0][0].func
y = func[0][1].func
[r1, r2] = check_type(x, y)
if not (r1 and r2):
[r1, r2] = check_type(y, x)
x, y = y, x
x1 = diff(x(t),t); y1 = diff(y(t),t)
return {Eq(x(t), C1*t + r1[f].subs(x1,C1).subs(y1,C2)), Eq(y(t), C2*t + r2[g].subs(x1,C1).subs(y1,C2))}
def sysode_nonlinear_3eq_order1(match_):
x = match_['func'][0].func
y = match_['func'][1].func
z = match_['func'][2].func
eq = match_['eq']
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
if match_['type_of_equation'] == 'type1':
sol = _nonlinear_3eq_order1_type1(x, y, z, t, eq)
if match_['type_of_equation'] == 'type2':
sol = _nonlinear_3eq_order1_type2(x, y, z, t, eq)
if match_['type_of_equation'] == 'type3':
sol = _nonlinear_3eq_order1_type3(x, y, z, t, eq)
if match_['type_of_equation'] == 'type4':
sol = _nonlinear_3eq_order1_type4(x, y, z, t, eq)
if match_['type_of_equation'] == 'type5':
sol = _nonlinear_3eq_order1_type5(x, y, z, t, eq)
return sol
def _nonlinear_3eq_order1_type1(x, y, z, t, eq):
r"""
Equations:
.. math:: a x' = (b - c) y z, \enspace b y' = (c - a) z x, \enspace c z' = (a - b) x y
First Integrals:
.. math:: a x^{2} + b y^{2} + c z^{2} = C_1
.. math:: a^{2} x^{2} + b^{2} y^{2} + c^{2} z^{2} = C_2
where `C_1` and `C_2` are arbitrary constants. On solving the integrals for `y` and
`z` and on substituting the resulting expressions into the first equation of the
system, we arrives at a separable first-order equation on `x`. Similarly doing that
for other two equations, we will arrive at first order equation on `y` and `z` too.
References
==========
-http://eqworld.ipmnet.ru/en/solutions/sysode/sode0401.pdf
"""
C1, C2 = get_numbered_constants(eq, num=2)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
r = (diff(x(t),t) - eq[0]).match(p*y(t)*z(t))
r.update((diff(y(t),t) - eq[1]).match(q*z(t)*x(t)))
r.update((diff(z(t),t) - eq[2]).match(s*x(t)*y(t)))
n1, d1 = r[p].as_numer_denom()
n2, d2 = r[q].as_numer_denom()
n3, d3 = r[s].as_numer_denom()
val = solve([n1*u-d1*v+d1*w, d2*u+n2*v-d2*w, d3*u-d3*v-n3*w],[u,v])
vals = [val[v], val[u]]
c = lcm(vals[0].as_numer_denom()[1], vals[1].as_numer_denom()[1])
b = vals[0].subs(w, c)
a = vals[1].subs(w, c)
y_x = sqrt(((c*C1-C2) - a*(c-a)*x(t)**2)/(b*(c-b)))
z_x = sqrt(((b*C1-C2) - a*(b-a)*x(t)**2)/(c*(b-c)))
z_y = sqrt(((a*C1-C2) - b*(a-b)*y(t)**2)/(c*(a-c)))
x_y = sqrt(((c*C1-C2) - b*(c-b)*y(t)**2)/(a*(c-a)))
x_z = sqrt(((b*C1-C2) - c*(b-c)*z(t)**2)/(a*(b-a)))
y_z = sqrt(((a*C1-C2) - c*(a-c)*z(t)**2)/(b*(a-b)))
sol1 = dsolve(a*diff(x(t),t) - (b-c)*y_x*z_x)
sol2 = dsolve(b*diff(y(t),t) - (c-a)*z_y*x_y)
sol3 = dsolve(c*diff(z(t),t) - (a-b)*x_z*y_z)
return [sol1, sol2, sol3]
def _nonlinear_3eq_order1_type2(x, y, z, t, eq):
r"""
Equations:
.. math:: a x' = (b - c) y z f(x, y, z, t)
.. math:: b y' = (c - a) z x f(x, y, z, t)
.. math:: c z' = (a - b) x y f(x, y, z, t)
First Integrals:
.. math:: a x^{2} + b y^{2} + c z^{2} = C_1
.. math:: a^{2} x^{2} + b^{2} y^{2} + c^{2} z^{2} = C_2
where `C_1` and `C_2` are arbitrary constants. On solving the integrals for `y` and
`z` and on substituting the resulting expressions into the first equation of the
system, we arrives at a first-order differential equations on `x`. Similarly doing
that for other two equations we will arrive at first order equation on `y` and `z`.
References
==========
-http://eqworld.ipmnet.ru/en/solutions/sysode/sode0402.pdf
"""
C1, C2 = get_numbered_constants(eq, num=2)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
f = Wild('f')
r1 = (diff(x(t),t) - eq[0]).match(y(t)*z(t)*f)
r = collect_const(r1[f]).match(p*f)
r.update(((diff(y(t),t) - eq[1])/r[f]).match(q*z(t)*x(t)))
r.update(((diff(z(t),t) - eq[2])/r[f]).match(s*x(t)*y(t)))
n1, d1 = r[p].as_numer_denom()
n2, d2 = r[q].as_numer_denom()
n3, d3 = r[s].as_numer_denom()
val = solve([n1*u-d1*v+d1*w, d2*u+n2*v-d2*w, -d3*u+d3*v+n3*w],[u,v])
vals = [val[v], val[u]]
c = lcm(vals[0].as_numer_denom()[1], vals[1].as_numer_denom()[1])
a = vals[0].subs(w, c)
b = vals[1].subs(w, c)
y_x = sqrt(((c*C1-C2) - a*(c-a)*x(t)**2)/(b*(c-b)))
z_x = sqrt(((b*C1-C2) - a*(b-a)*x(t)**2)/(c*(b-c)))
z_y = sqrt(((a*C1-C2) - b*(a-b)*y(t)**2)/(c*(a-c)))
x_y = sqrt(((c*C1-C2) - b*(c-b)*y(t)**2)/(a*(c-a)))
x_z = sqrt(((b*C1-C2) - c*(b-c)*z(t)**2)/(a*(b-a)))
y_z = sqrt(((a*C1-C2) - c*(a-c)*z(t)**2)/(b*(a-b)))
sol1 = dsolve(a*diff(x(t),t) - (b-c)*y_x*z_x*r[f])
sol2 = dsolve(b*diff(y(t),t) - (c-a)*z_y*x_y*r[f])
sol3 = dsolve(c*diff(z(t),t) - (a-b)*x_z*y_z*r[f])
return [sol1, sol2, sol3]
def _nonlinear_3eq_order1_type3(x, y, z, t, eq):
r"""
Equations:
.. math:: x' = c F_2 - b F_3, \enspace y' = a F_3 - c F_1, \enspace z' = b F_1 - a F_2
where `F_n = F_n(x, y, z, t)`.
1. First Integral:
.. math:: a x + b y + c z = C_1,
where C is an arbitrary constant.
2. If we assume function `F_n` to be independent of `t`,i.e, `F_n` = `F_n (x, y, z)`
Then, on eliminating `t` and `z` from the first two equation of the system, one
arrives at the first-order equation
.. math:: \frac{dy}{dx} = \frac{a F_3 (x, y, z) - c F_1 (x, y, z)}{c F_2 (x, y, z) -
b F_3 (x, y, z)}
where `z = \frac{1}{c} (C_1 - a x - b y)`
References
==========
-http://eqworld.ipmnet.ru/en/solutions/sysode/sode0404.pdf
"""
C1 = get_numbered_constants(eq, num=1)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
F1, F2, F3 = symbols('F1, F2, F3', cls=Wild)
r1 = (diff(x(t), t) - eq[0]).match(F2-F3)
r = collect_const(r1[F2]).match(s*F2)
r.update(collect_const(r1[F3]).match(q*F3))
if eq[1].has(r[F2]) and not eq[1].has(r[F3]):
r[F2], r[F3] = r[F3], r[F2]
r[s], r[q] = -r[q], -r[s]
r.update((diff(y(t), t) - eq[1]).match(p*r[F3] - r[s]*F1))
a = r[p]; b = r[q]; c = r[s]
F1 = r[F1].subs(x(t), u).subs(y(t),v).subs(z(t), w)
F2 = r[F2].subs(x(t), u).subs(y(t),v).subs(z(t), w)
F3 = r[F3].subs(x(t), u).subs(y(t),v).subs(z(t), w)
z_xy = (C1-a*u-b*v)/c
y_zx = (C1-a*u-c*w)/b
x_yz = (C1-b*v-c*w)/a
y_x = dsolve(diff(v(u),u) - ((a*F3-c*F1)/(c*F2-b*F3)).subs(w,z_xy).subs(v,v(u))).rhs
z_x = dsolve(diff(w(u),u) - ((b*F1-a*F2)/(c*F2-b*F3)).subs(v,y_zx).subs(w,w(u))).rhs
z_y = dsolve(diff(w(v),v) - ((b*F1-a*F2)/(a*F3-c*F1)).subs(u,x_yz).subs(w,w(v))).rhs
x_y = dsolve(diff(u(v),v) - ((c*F2-b*F3)/(a*F3-c*F1)).subs(w,z_xy).subs(u,u(v))).rhs
y_z = dsolve(diff(v(w),w) - ((a*F3-c*F1)/(b*F1-a*F2)).subs(u,x_yz).subs(v,v(w))).rhs
x_z = dsolve(diff(u(w),w) - ((c*F2-b*F3)/(b*F1-a*F2)).subs(v,y_zx).subs(u,u(w))).rhs
sol1 = dsolve(diff(u(t),t) - (c*F2 - b*F3).subs(v,y_x).subs(w,z_x).subs(u,u(t))).rhs
sol2 = dsolve(diff(v(t),t) - (a*F3 - c*F1).subs(u,x_y).subs(w,z_y).subs(v,v(t))).rhs
sol3 = dsolve(diff(w(t),t) - (b*F1 - a*F2).subs(u,x_z).subs(v,y_z).subs(w,w(t))).rhs
return [sol1, sol2, sol3]
def _nonlinear_3eq_order1_type4(x, y, z, t, eq):
r"""
Equations:
.. math:: x' = c z F_2 - b y F_3, \enspace y' = a x F_3 - c z F_1, \enspace z' = b y F_1 - a x F_2
where `F_n = F_n (x, y, z, t)`
1. First integral:
.. math:: a x^{2} + b y^{2} + c z^{2} = C_1
where `C` is an arbitrary constant.
2. Assuming the function `F_n` is independent of `t`: `F_n = F_n (x, y, z)`. Then on
eliminating `t` and `z` from the first two equations of the system, one arrives at
the first-order equation
.. math:: \frac{dy}{dx} = \frac{a x F_3 (x, y, z) - c z F_1 (x, y, z)}
{c z F_2 (x, y, z) - b y F_3 (x, y, z)}
where `z = \pm \sqrt{\frac{1}{c} (C_1 - a x^{2} - b y^{2})}`
References
==========
-http://eqworld.ipmnet.ru/en/solutions/sysode/sode0405.pdf
"""
C1 = get_numbered_constants(eq, num=1)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
F1, F2, F3 = symbols('F1, F2, F3', cls=Wild)
r1 = eq[0].match(diff(x(t),t) - z(t)*F2 + y(t)*F3)
r = collect_const(r1[F2]).match(s*F2)
r.update(collect_const(r1[F3]).match(q*F3))
if eq[1].has(r[F2]) and not eq[1].has(r[F3]):
r[F2], r[F3] = r[F3], r[F2]
r[s], r[q] = -r[q], -r[s]
r.update((diff(y(t),t) - eq[1]).match(p*x(t)*r[F3] - r[s]*z(t)*F1))
a = r[p]; b = r[q]; c = r[s]
F1 = r[F1].subs(x(t),u).subs(y(t),v).subs(z(t),w)
F2 = r[F2].subs(x(t),u).subs(y(t),v).subs(z(t),w)
F3 = r[F3].subs(x(t),u).subs(y(t),v).subs(z(t),w)
x_yz = sqrt((C1 - b*v**2 - c*w**2)/a)
y_zx = sqrt((C1 - c*w**2 - a*u**2)/b)
z_xy = sqrt((C1 - a*u**2 - b*v**2)/c)
y_x = dsolve(diff(v(u),u) - ((a*u*F3-c*w*F1)/(c*w*F2-b*v*F3)).subs(w,z_xy).subs(v,v(u))).rhs
z_x = dsolve(diff(w(u),u) - ((b*v*F1-a*u*F2)/(c*w*F2-b*v*F3)).subs(v,y_zx).subs(w,w(u))).rhs
z_y = dsolve(diff(w(v),v) - ((b*v*F1-a*u*F2)/(a*u*F3-c*w*F1)).subs(u,x_yz).subs(w,w(v))).rhs
x_y = dsolve(diff(u(v),v) - ((c*w*F2-b*v*F3)/(a*u*F3-c*w*F1)).subs(w,z_xy).subs(u,u(v))).rhs
y_z = dsolve(diff(v(w),w) - ((a*u*F3-c*w*F1)/(b*v*F1-a*u*F2)).subs(u,x_yz).subs(v,v(w))).rhs
x_z = dsolve(diff(u(w),w) - ((c*w*F2-b*v*F3)/(b*v*F1-a*u*F2)).subs(v,y_zx).subs(u,u(w))).rhs
sol1 = dsolve(diff(u(t),t) - (c*w*F2 - b*v*F3).subs(v,y_x).subs(w,z_x).subs(u,u(t))).rhs
sol2 = dsolve(diff(v(t),t) - (a*u*F3 - c*w*F1).subs(u,x_y).subs(w,z_y).subs(v,v(t))).rhs
sol3 = dsolve(diff(w(t),t) - (b*v*F1 - a*u*F2).subs(u,x_z).subs(v,y_z).subs(w,w(t))).rhs
return [sol1, sol2, sol3]
def _nonlinear_3eq_order1_type5(x, y, z, t, eq):
r"""
.. math:: x' = x (c F_2 - b F_3), \enspace y' = y (a F_3 - c F_1), \enspace z' = z (b F_1 - a F_2)
where `F_n = F_n (x, y, z, t)` and are arbitrary functions.
First Integral:
.. math:: \left|x\right|^{a} \left|y\right|^{b} \left|z\right|^{c} = C_1
where `C` is an arbitrary constant. If the function `F_n` is independent of `t`,
then, by eliminating `t` and `z` from the first two equations of the system, one
arrives at a first-order equation.
References
==========
-http://eqworld.ipmnet.ru/en/solutions/sysode/sode0406.pdf
"""
C1 = get_numbered_constants(eq, num=1)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
F1, F2, F3 = symbols('F1, F2, F3', cls=Wild)
r1 = eq[0].match(diff(x(t), t) - x(t)*(F2 - F3))
r = collect_const(r1[F2]).match(s*F2)
r.update(collect_const(r1[F3]).match(q*F3))
if eq[1].has(r[F2]) and not eq[1].has(r[F3]):
r[F2], r[F3] = r[F3], r[F2]
r[s], r[q] = -r[q], -r[s]
r.update((diff(y(t), t) - eq[1]).match(y(t)*(p*r[F3] - r[s]*F1)))
a = r[p]; b = r[q]; c = r[s]
F1 = r[F1].subs(x(t), u).subs(y(t), v).subs(z(t), w)
F2 = r[F2].subs(x(t), u).subs(y(t), v).subs(z(t), w)
F3 = r[F3].subs(x(t), u).subs(y(t), v).subs(z(t), w)
x_yz = (C1*v**-b*w**-c)**-a
y_zx = (C1*w**-c*u**-a)**-b
z_xy = (C1*u**-a*v**-b)**-c
y_x = dsolve(diff(v(u), u) - ((v*(a*F3 - c*F1))/(u*(c*F2 - b*F3))).subs(w, z_xy).subs(v, v(u))).rhs
z_x = dsolve(diff(w(u), u) - ((w*(b*F1 - a*F2))/(u*(c*F2 - b*F3))).subs(v, y_zx).subs(w, w(u))).rhs
z_y = dsolve(diff(w(v), v) - ((w*(b*F1 - a*F2))/(v*(a*F3 - c*F1))).subs(u, x_yz).subs(w, w(v))).rhs
x_y = dsolve(diff(u(v), v) - ((u*(c*F2 - b*F3))/(v*(a*F3 - c*F1))).subs(w, z_xy).subs(u, u(v))).rhs
y_z = dsolve(diff(v(w), w) - ((v*(a*F3 - c*F1))/(w*(b*F1 - a*F2))).subs(u, x_yz).subs(v, v(w))).rhs
x_z = dsolve(diff(u(w), w) - ((u*(c*F2 - b*F3))/(w*(b*F1 - a*F2))).subs(v, y_zx).subs(u, u(w))).rhs
sol1 = dsolve(diff(u(t), t) - (u*(c*F2 - b*F3)).subs(v, y_x).subs(w, z_x).subs(u, u(t))).rhs
sol2 = dsolve(diff(v(t), t) - (v*(a*F3 - c*F1)).subs(u, x_y).subs(w, z_y).subs(v, v(t))).rhs
sol3 = dsolve(diff(w(t), t) - (w*(b*F1 - a*F2)).subs(u, x_z).subs(v, y_z).subs(w, w(t))).rhs
return [sol1, sol2, sol3]
| 39.712997 | 279 | 0.54749 | from __future__ import print_function, division
from collections import defaultdict
from itertools import islice
from sympy.core import Add, S, Mul, Pow, oo
from sympy.core.compatibility import ordered, iterable, is_sequence, range, string_types
from sympy.core.containers import Tuple
from sympy.core.exprtools import factor_terms
from sympy.core.expr import AtomicExpr, Expr
from sympy.core.function import (Function, Derivative, AppliedUndef, diff,
expand, expand_mul, Subs, _mexpand)
from sympy.core.multidimensional import vectorize
from sympy.core.numbers import NaN, zoo, I, Number
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, Dummy, symbols
from sympy.core.sympify import sympify
from sympy.logic.boolalg import (BooleanAtom, And, Not, BooleanTrue,
BooleanFalse)
from sympy.functions import cos, exp, im, log, re, sin, tan, sqrt, \
atan2, conjugate, Piecewise
from sympy.functions.combinatorial.factorials import factorial
from sympy.integrals.integrals import Integral, integrate
from sympy.matrices import wronskian, Matrix, eye, zeros
from sympy.polys import (Poly, RootOf, rootof, terms_gcd,
PolynomialError, lcm, roots)
from sympy.polys.polyroots import roots_quartic
from sympy.polys.polytools import cancel, degree, div
from sympy.series import Order
from sympy.series.series import series
from sympy.simplify import collect, logcombine, powsimp, separatevars, \
simplify, trigsimp, posify, cse
from sympy.simplify.powsimp import powdenest
from sympy.simplify.radsimp import collect_const
from sympy.solvers import solve
from sympy.solvers.pde import pdsolve
from sympy.utilities import numbered_symbols, default_sort_key, sift
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
allhints = (
"nth_algebraic",
"separable",
"1st_exact",
"1st_linear",
"Bernoulli",
"Riccati_special_minus2",
"1st_homogeneous_coeff_best",
"1st_homogeneous_coeff_subs_indep_div_dep",
"1st_homogeneous_coeff_subs_dep_div_indep",
"almost_linear",
"linear_coefficients",
"separable_reduced",
"1st_power_series",
"lie_group",
"nth_linear_constant_coeff_homogeneous",
"nth_linear_euler_eq_homogeneous",
"nth_linear_constant_coeff_undetermined_coefficients",
"nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients",
"nth_linear_constant_coeff_variation_of_parameters",
"nth_linear_euler_eq_nonhomogeneous_variation_of_parameters",
"Liouville",
"nth_order_reducible",
"2nd_power_series_ordinary",
"2nd_power_series_regular",
"nth_algebraic_Integral",
"separable_Integral",
"1st_exact_Integral",
"1st_linear_Integral",
"Bernoulli_Integral",
"1st_homogeneous_coeff_subs_indep_div_dep_Integral",
"1st_homogeneous_coeff_subs_dep_div_indep_Integral",
"almost_linear_Integral",
"linear_coefficients_Integral",
"separable_reduced_Integral",
"nth_linear_constant_coeff_variation_of_parameters_Integral",
"nth_linear_euler_eq_nonhomogeneous_variation_of_parameters_Integral",
"Liouville_Integral",
)
lie_heuristics = (
"abaco1_simple",
"abaco1_product",
"abaco2_similar",
"abaco2_unique_unknown",
"abaco2_unique_general",
"linear",
"function_sum",
"bivariate",
"chi"
)
def sub_func_doit(eq, func, new):
reps= {func: new}
for d in eq.atoms(Derivative):
if d.expr == func:
reps[d] = new.diff(*d.variable_count)
else:
reps[d] = d.xreplace({func: new}).doit(deep=False)
return eq.xreplace(reps)
def get_numbered_constants(eq, num=1, start=1, prefix='C'):
ncs = iter_numbered_constants(eq, start, prefix)
Cs = [next(ncs) for i in range(num)]
return (Cs[0] if num == 1 else tuple(Cs))
def iter_numbered_constants(eq, start=1, prefix='C'):
if isinstance(eq, Expr):
eq = [eq]
elif not iterable(eq):
raise ValueError("Expected Expr or iterable but got %s" % eq)
atom_set = set().union(*[i.free_symbols for i in eq])
func_set = set().union(*[i.atoms(Function) for i in eq])
if func_set:
atom_set |= {Symbol(str(f.func)) for f in func_set}
return numbered_symbols(start=start, prefix=prefix, exclude=atom_set)
def dsolve(eq, func=None, hint="default", simplify=True,
ics= None, xi=None, eta=None, x0=0, n=6, **kwargs):
if iterable(eq):
match = classify_sysode(eq, func)
eq = match['eq']
order = match['order']
func = match['func']
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
for i in range(len(eq)):
for func_ in func:
if isinstance(func_, list):
pass
else:
if eq[i].coeff(diff(func[i],t,ode_order(eq[i], func[i]))).is_negative:
eq[i] = -eq[i]
match['eq'] = eq
if len(set(order.values()))!=1:
raise ValueError("It solves only those systems of equations whose orders are equal")
match['order'] = list(order.values())[0]
def recur_len(l):
return sum(recur_len(item) if isinstance(item,list) else 1 for item in l)
if recur_len(func) != len(eq):
raise ValueError("dsolve() and classify_sysode() work with "
"number of functions being equal to number of equations")
if match['type_of_equation'] is None:
raise NotImplementedError
else:
if match['is_linear'] == True:
if match['no_of_equation'] > 3:
solvefunc = globals()['sysode_linear_neq_order%(order)s' % match]
else:
solvefunc = globals()['sysode_linear_%(no_of_equation)seq_order%(order)s' % match]
else:
solvefunc = globals()['sysode_nonlinear_%(no_of_equation)seq_order%(order)s' % match]
sols = solvefunc(match)
if ics:
constants = Tuple(*sols).free_symbols - Tuple(*eq).free_symbols
solved_constants = solve_ics(sols, func, constants, ics)
return [sol.subs(solved_constants) for sol in sols]
return sols
else:
given_hint = hint
hints = _desolve(eq, func=func,
hint=hint, simplify=True, xi=xi, eta=eta, type='ode', ics=ics,
x0=x0, n=n, **kwargs)
eq = hints.pop('eq', eq)
all_ = hints.pop('all', False)
if all_:
retdict = {}
failed_hints = {}
gethints = classify_ode(eq, dict=True)
orderedhints = gethints['ordered_hints']
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint], simplify)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
retdict[hint] = rv
func = hints[hint]['func']
retdict['best'] = min(list(retdict.values()), key=lambda x:
ode_sol_simplicity(x, func, trysolving=not simplify))
if given_hint == 'best':
return retdict['best']
for i in orderedhints:
if retdict['best'] == retdict.get(i, None):
retdict['best_hint'] = i
break
retdict['default'] = gethints['default']
retdict['order'] = gethints['order']
retdict.update(failed_hints)
return retdict
else:
hint = hints['hint']
return _helper_simplify(eq, hint, hints, simplify, ics=ics)
def _helper_simplify(eq, hint, match, simplify=True, ics=None, **kwargs):
r = match
if hint.endswith('_Integral'):
solvefunc = globals()['ode_' + hint[:-len('_Integral')]]
else:
solvefunc = globals()['ode_' + hint]
func = r['func']
order = r['order']
match = r[hint]
free = eq.free_symbols
cons = lambda s: s.free_symbols.difference(free)
if simplify:
sols = solvefunc(eq, func, order, match)
if isinstance(sols, Expr):
rv = odesimp(eq, sols, func, hint)
else:
rv = [odesimp(eq, s, func, hint) for s in sols]
else:
match['simplify'] = False
rv = _handle_Integral(solvefunc(eq, func, order, match), func, hint)
if ics and not 'power_series' in hint:
if isinstance(rv, Expr):
solved_constants = solve_ics([rv], [r['func']], cons(rv), ics)
rv = rv.subs(solved_constants)
else:
rv1 = []
for s in rv:
try:
solved_constants = solve_ics([s], [r['func']], cons(s), ics)
except ValueError:
continue
rv1.append(s.subs(solved_constants))
if len(rv1) == 1:
return rv1[0]
rv = rv1
return rv
def solve_ics(sols, funcs, constants, ics):
x = funcs[0].args[0]
diff_sols = []
subs_sols = []
diff_variables = set()
for funcarg, value in ics.items():
if isinstance(funcarg, AppliedUndef):
x0 = funcarg.args[0]
matching_func = [f for f in funcs if f.func == funcarg.func][0]
S = sols
elif isinstance(funcarg, (Subs, Derivative)):
if isinstance(funcarg, Subs):
funcarg = funcarg.doit()
if isinstance(funcarg, Subs):
deriv = funcarg.expr
x0 = funcarg.point[0]
variables = funcarg.expr.variables
matching_func = deriv
elif isinstance(funcarg, Derivative):
deriv = funcarg
x0 = funcarg.variables[0]
variables = (x,)*len(funcarg.variables)
matching_func = deriv.subs(x0, x)
if variables not in diff_variables:
for sol in sols:
if sol.has(deriv.expr.func):
diff_sols.append(Eq(sol.lhs.diff(*variables), sol.rhs.diff(*variables)))
diff_variables.add(variables)
S = diff_sols
else:
raise NotImplementedError("Unrecognized initial condition")
for sol in S:
if sol.has(matching_func):
sol2 = sol
sol2 = sol2.subs(x, x0)
sol2 = sol2.subs(funcarg, value)
if not isinstance(sol2, BooleanAtom) or not subs_sols:
subs_sols = [s for s in subs_sols if not isinstance(s, BooleanAtom)]
subs_sols.append(sol2)
try:
solved_constants = solve(subs_sols, constants, dict=True)
except NotImplementedError:
solved_constants = []
# invalid initial conditions, and not existing because solve is not smart
# enough. If we could use solveset, this might be improvable, but for now,
# we use NotImplementedError in this case.
if not solved_constants:
raise ValueError("Couldn't solve for initial conditions")
if solved_constants == True:
raise ValueError("Initial conditions did not produce any solutions for constants. Perhaps they are degenerate.")
if len(solved_constants) > 1:
raise NotImplementedError("Initial conditions produced too many solutions for constants")
return solved_constants[0]
def classify_ode(eq, func=None, dict=False, ics=None, **kwargs):
ics = sympify(ics)
prep = kwargs.pop('prep', True)
if func and len(func.args) != 1:
raise ValueError("dsolve() and classify_ode() only "
"work with functions of one variable, not %s" % func)
if prep or func is None:
eq, func_ = _preprocess(eq, func)
if func is None:
func = func_
x = func.args[0]
f = func.func
y = Dummy('y')
xi = kwargs.get('xi')
eta = kwargs.get('eta')
terms = kwargs.get('n')
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_ode(eq.lhs - eq.rhs, func, dict=dict, ics=ics, xi=xi,
n=terms, eta=eta, prep=False)
eq = eq.lhs
order = ode_order(eq, f(x))
matching_hints = {"order": order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
df = f(x).diff(x)
a = Wild('a', exclude=[f(x)])
b = Wild('b', exclude=[f(x)])
c = Wild('c', exclude=[f(x)])
d = Wild('d', exclude=[df, f(x).diff(x, 2)])
e = Wild('e', exclude=[df])
k = Wild('k', exclude=[df])
n = Wild('n', exclude=[x, f(x), df])
c1 = Wild('c1', exclude=[x])
a2 = Wild('a2', exclude=[x, f(x), df])
b2 = Wild('b2', exclude=[x, f(x), df])
c2 = Wild('c2', exclude=[x, f(x), df])
d2 = Wild('d2', exclude=[x, f(x), df])
a3 = Wild('a3', exclude=[f(x), df, f(x).diff(x, 2)])
b3 = Wild('b3', exclude=[f(x), df, f(x).diff(x, 2)])
c3 = Wild('c3', exclude=[f(x), df, f(x).diff(x, 2)])
r3 = {'xi': xi, 'eta': eta}
boundary = {}
C1 = Symbol("C1")
eq = expand(eq)
if ics is not None:
for funcarg in ics:
if isinstance(funcarg, (Subs, Derivative)):
if isinstance(funcarg, Subs):
deriv = funcarg.expr
old = funcarg.variables[0]
new = funcarg.point[0]
elif isinstance(funcarg, Derivative):
deriv = funcarg
old = x
new = funcarg.variables[0]
if (isinstance(deriv, Derivative) and isinstance(deriv.args[0],
AppliedUndef) and deriv.args[0].func == f and
len(deriv.args[0].args) == 1 and old == x and not
new.has(x) and all(i == deriv.variables[0] for i in
deriv.variables) and not ics[funcarg].has(f)):
dorder = ode_order(deriv, x)
temp = 'f' + str(dorder)
boundary.update({temp: new, temp + 'val': ics[funcarg]})
else:
raise ValueError("Enter valid boundary conditions for Derivatives")
elif isinstance(funcarg, AppliedUndef):
if (funcarg.func == f and len(funcarg.args) == 1 and
not funcarg.args[0].has(x) and not ics[funcarg].has(f)):
boundary.update({'f0': funcarg.args[0], 'f0val': ics[funcarg]})
else:
raise ValueError("Enter valid boundary conditions for Function")
else:
raise ValueError("Enter boundary conditions of the form ics={f(point}: value, f(x).diff(x, order).subs(x, point): value}")
reduced_eq = None
if eq.is_Add:
deriv_coef = eq.coeff(f(x).diff(x, order))
if deriv_coef not in (1, 0):
r = deriv_coef.match(a*f(x)**c1)
if r and r[c1]:
den = f(x)**r[c1]
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
dep = reduced_eq.as_independent(f)
else:
u = Dummy('u')
ind, dep = (reduced_eq + u).as_independent(f)
ind, dep = [tmp.subs(u, 0) for tmp in [ind, dep]]
r = {a: dep.coeff(df),
b: dep.coeff(f(x)),
c: ind}
# double check f[a] since the preconditioning may have failed
if not r[a].has(f) and not r[b].has(f) and (
r[a]*df + r[b]*f(x) + r[c]).expand() - reduced_eq == 0:
r['a'] = a
r['b'] = b
r['c'] = c
matching_hints["1st_linear"] = r
matching_hints["1st_linear_Integral"] = r
## Bernoulli case: a(x)*y'+b(x)*y+c(x)*y**n == 0
r = collect(
reduced_eq, f(x), exact=True).match(a*df + b*f(x) + c*f(x)**n)
if r and r[c] != 0 and r[n] != 1:
r['a'] = a
r['b'] = b
r['c'] = c
r['n'] = n
matching_hints["Bernoulli"] = r
matching_hints["Bernoulli_Integral"] = r
tch(a2*df + b2*f(x)**2 + c2*f(x)/x + d2/x**2)
if r and r[b2] != 0 and (r[c2] != 0 or r[d2] != 0):
r['a2'] = a2
r['b2'] = b2
r['c2'] = c2
r['d2'] = d2
matching_hints["Riccati_special_minus2"] = r
# NON-REDUCED FORM OF EQUATION matches
r = collect(eq, df, exact=True).match(d + e * df)
if r:
r['d'] = d
r['e'] = e
r['y'] = y
r[d] = r[d].subs(f(x), y)
r[e] = r[e].subs(f(x), y)
# FIRST ORDER POWER SERIES WHICH NEEDS INITIAL CONDITIONS
# TODO: Hint first order series should match only if d/e is analytic.
# For now, only d/e and (d/e).diff(arg) is checked for existence at
# at a given point.
# This is currently done internally in ode_1st_power_series.
point = boundary.get('f0', 0)
value = boundary.get('f0val', C1)
check = cancel(r[d]/r[e])
check1 = check.subs({x: point, y: value})
if not check1.has(oo) and not check1.has(zoo) and \
not check1.has(NaN) and not check1.has(-oo):
check2 = (check1.diff(x)).subs({x: point, y: value})
if not check2.has(oo) and not check2.has(zoo) and \
not check2.has(NaN) and not check2.has(-oo):
rseries = r.copy()
rseries.update({'terms': terms, 'f0': point, 'f0val': value})
matching_hints["1st_power_series"] = rseries
r3.update(r)
## Exact Differential Equation: P(x, y) + Q(x, y)*y' = 0 where
try:
if r[d] != 0:
numerator = simplify(r[d].diff(y) - r[e].diff(x))
if numerator:
factor = simplify(numerator/r[e])
variables = factor.free_symbols
if len(variables) == 1 and x == variables.pop():
factor = exp(Integral(factor).doit())
r[d] *= factor
r[e] *= factor
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
else:
factor = simplify(-numerator/r[d])
variables = factor.free_symbols
if len(variables) == 1 and y == variables.pop():
factor = exp(Integral(factor).doit())
r[d] *= factor
r[e] *= factor
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
else:
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
except NotImplementedError:
pass
matching_hints["lie_group"] = r3
r = collect(reduced_eq, df, exact=True).match(d + e*df)
if r:
num, den = r[d], r[e]
r['d'] = d
r['e'] = e
r['y'] = y
r[d] = num.subs(f(x), y)
r[e] = den.subs(f(x), y)
r[d])
r[e] = separatevars(r[e])
# m1[coeff]*m1[x]*m1[y] + m2[coeff]*m2[x]*m2[y]*y'
m1 = separatevars(r[d], dict=True, symbols=(x, y))
m2 = separatevars(r[e], dict=True, symbols=(x, y))
if m1 and m2:
r1 = {'m1': m1, 'm2': m2, 'y': y}
matching_hints["separable"] = r1
matching_hints["separable_Integral"] = r1
r[d], x, y)
if ordera is not None:
orderb = homogeneous_order(r[e], x, y)
if ordera == orderb:
u1 = Dummy('u1')
u2 = Dummy('u2')
s = "1st_homogeneous_coeff_subs"
s1 = s + "_dep_div_indep"
s2 = s + "_indep_div_dep"
if simplify((r[d] + u1*r[e]).subs({x: 1, y: u1})) != 0:
matching_hints[s1] = r
matching_hints[s1 + "_Integral"] = r
if simplify((r[e] + u2*r[d]).subs({x: u2, y: 1})) != 0:
matching_hints[s2] = r
matching_hints[s2 + "_Integral"] = r
if s1 in matching_hints and s2 in matching_hints:
matching_hints["1st_homogeneous_coeff_best"] = r
F = num/den
params = _linear_coeff_match(F, func)
if params:
xarg, yarg = params
u = Dummy('u')
t = Dummy('t')
dummy_eq = reduced_eq.subs(((df, t), (f(x), u)))
reps = ((x, x + xarg), (u, u + yarg), (t, df), (u, f(x)))
dummy_eq = simplify(dummy_eq.subs(reps))
r2 = collect(expand(dummy_eq), [df, f(x)]).match(e*df + d)
if r2:
orderd = homogeneous_order(r2[d], x, f(x))
if orderd is not None:
ordere = homogeneous_order(r2[e], x, f(x))
if orderd == ordere:
r2[d] = r2[d].subs(f(x), y)
r2[e] = r2[e].subs(f(x), y)
r2.update({'xarg': xarg, 'yarg': yarg,
'd': d, 'e': e, 'y': y})
matching_hints["linear_coefficients"] = r2
matching_hints["linear_coefficients_Integral"] = r2
e form
factor = simplify(x/f(x)*num/den)
# Try representing factor in terms of x^n*y
# where n is lowest power of x in factor;
# first remove terms like sqrt(2)*3 from factor.atoms(Mul)
u = None
for mul in ordered(factor.atoms(Mul)):
if mul.has(x):
_, u = mul.as_independent(x, f(x))
break
if u and u.has(f(x)):
h = x**(degree(Poly(u.subs(f(x), y), gen=x)))*f(x)
p = Wild('p')
if (u/h == 1) or ((u/h).simplify().match(x**p)):
t = Dummy('t')
r2 = {'t': t}
xpart, ypart = u.as_independent(f(x))
test = factor.subs(((u, t), (1/u, 1/t)))
free = test.free_symbols
if len(free) == 1 and free.pop() == t:
r2.update({'power': xpart.as_base_exp()[1], 'u': test})
matching_hints["separable_reduced"] = r2
matching_hints["separable_reduced_Integral"] = r2
## Almost-linear equation of the form f(x)*g(y)*y' + k(x)*l(y) + m(x) = 0
r = collect(eq, [df, f(x)]).match(e*df + d)
if r:
r2 = r.copy()
r2[c] = S.Zero
if r2[d].is_Add:
no_f, r2[d] = r2[d].as_independent(f(x))
r2[c] += no_f
factor = simplify(r2[d].diff(f(x))/r[e])
if factor and not factor.has(f(x)):
r2[d] = factor_terms(r2[d])
u = r2[d].as_independent(f(x), as_Add=False)[1]
r2.update({'a': e, 'b': d, 'c': c, 'u': u})
r2[d] /= u
r2[e] /= u.diff(f(x))
matching_hints["almost_linear"] = r2
matching_hints["almost_linear_Integral"] = r2
elif order == 2:
# Differential Equations", pg. 98
s = d*f(x).diff(x, 2) + e*df**2 + k*df
r = reduced_eq.match(s)
if r and r[d] != 0:
y = Dummy('y')
g = simplify(r[e]/r[d]).subs(f(x), y)
h = simplify(r[k]/r[d]).subs(f(x), y)
if y in h.free_symbols or x in g.free_symbols:
pass
else:
r = {'g': g, 'h': h, 'y': y}
matching_hints["Liouville"] = r
matching_hints["Liouville_Integral"] = r
deq = a3*(f(x).diff(x, 2)) + b3*df + c3*f(x)
r = collect(reduced_eq,
[f(x).diff(x, 2), f(x).diff(x), f(x)]).match(deq)
ordinary = False
if r and r[a3] != 0:
if all([r[key].is_polynomial() for key in r]):
p = cancel(r[b3]/r[a3])
q = cancel(r[c3]/r[a3])
point = kwargs.get('x0', 0)
check = p.subs(x, point)
if not check.has(oo) and not check.has(NaN) and \
not check.has(zoo) and not check.has(-oo):
check = q.subs(x, point)
if not check.has(oo) and not check.has(NaN) and \
not check.has(zoo) and not check.has(-oo):
ordinary = True
r.update({'a3': a3, 'b3': b3, 'c3': c3, 'x0': point, 'terms': terms})
matching_hints["2nd_power_series_ordinary"] = r
if not ordinary:
p = cancel((x - point)*p)
check = p.subs(x, point)
if not check.has(oo) and not check.has(NaN) and \
not check.has(zoo) and not check.has(-oo):
q = cancel(((x - point)**2)*q)
check = q.subs(x, point)
if not check.has(oo) and not check.has(NaN) and \
not check.has(zoo) and not check.has(-oo):
coeff_dict = {'p': p, 'q': q, 'x0': point, 'terms': terms}
matching_hints["2nd_power_series_regular"] = coeff_dict
if order > 0:
r = _nth_order_reducible_match(reduced_eq, func)
if r:
matching_hints['nth_order_reducible'] = r
# Any ODE that can be solved with a combination of algebra and
# integrals e.g.:
# d^3/dx^3(x y) = F(x)
r = _nth_algebraic_match(reduced_eq, func)
if r['solutions']:
matching_hints['nth_algebraic'] = r
matching_hints['nth_algebraic_Integral'] = r
# nth order linear ODE
# a_n(x)y^(n) + ... + a_1(x)y' + a_0(x)y = F(x) = b
r = _nth_linear_match(reduced_eq, func, order)
if r and not any(r[i].has(x) for i in r if i >= 0):
if r[-1]:
undetcoeff = _undetermined_coefficients_match(r[-1], x)
s = "nth_linear_constant_coeff_variation_of_parameters"
matching_hints[s] = r
matching_hints[s + "_Integral"] = r
if undetcoeff['test']:
r['trialset'] = undetcoeff['trialset']
matching_hints[
"nth_linear_constant_coeff_undetermined_coefficients"
] = r
else:
matching_hints["nth_linear_constant_coeff_homogeneous"] = r
#In case of Homogeneous euler equation F(x) = 0
def _test_term(coeff, order):
if order < 0:
raise ValueError("order should be greater than 0")
if coeff == 0:
return True
if order == 0:
if x in coeff.free_symbols:
return False
return True
if coeff.is_Mul:
if coeff.has(f(x)):
return False
return x**order in coeff.args
elif coeff.is_Pow:
return coeff.as_base_exp() == (x, order)
elif order == 1:
return x == coeff
return False
# Find coefficient for highest derivative, multiply coefficients to
# bring the equation into Euler form if possible
r_rescaled = None
if r is not None:
coeff = r[order]
factor = x**order / coeff
r_rescaled = {i: factor*r[i] for i in r}
if r_rescaled and not any(not _test_term(r_rescaled[i], i) for i in
r_rescaled if i != 'trialset' and i >= 0):
if not r_rescaled[-1]:
matching_hints["nth_linear_euler_eq_homogeneous"] = r_rescaled
else:
matching_hints["nth_linear_euler_eq_nonhomogeneous_variation_of_parameters"] = r_rescaled
matching_hints["nth_linear_euler_eq_nonhomogeneous_variation_of_parameters_Integral"] = r_rescaled
e, re = posify(r_rescaled[-1].subs(x, exp(x)))
undetcoeff = _undetermined_coefficients_match(e.subs(re), x)
if undetcoeff['test']:
r_rescaled['trialset'] = undetcoeff['trialset']
matching_hints["nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients"] = r_rescaled
# Order keys based on allhints.
retlist = [i for i in allhints if i in matching_hints]
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for dsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = retlist[0] if retlist else None
matching_hints["ordered_hints"] = tuple(retlist)
return matching_hints
else:
return tuple(retlist)
def classify_sysode(eq, funcs=None, **kwargs):
# Sympify equations and convert iterables of equations into
# a list of equations
def _sympify(eq):
return list(map(sympify, eq if iterable(eq) else [eq]))
eq, funcs = (_sympify(w) for w in [eq, funcs])
for i, fi in enumerate(eq):
if isinstance(fi, Equality):
eq[i] = fi.lhs - fi.rhs
matching_hints = {"no_of_equation":i+1}
matching_hints['eq'] = eq
if i==0:
raise ValueError("classify_sysode() works for systems of ODEs. "
"For scalar ODEs, classify_ode should be used")
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
# find all the functions if not given
order = dict()
if funcs==[None]:
funcs = []
for eqs in eq:
derivs = eqs.atoms(Derivative)
func = set().union(*[d.atoms(AppliedUndef) for d in derivs])
for func_ in func:
funcs.append(func_)
funcs = list(set(funcs))
if len(funcs) != len(eq):
raise ValueError("Number of functions given is not equal to the number of equations %s" % funcs)
func_dict = dict()
for func in funcs:
if not order.get(func, False):
max_order = 0
for i, eqs_ in enumerate(eq):
order_ = ode_order(eqs_,func)
if max_order < order_:
max_order = order_
eq_no = i
if eq_no in func_dict:
list_func = []
list_func.append(func_dict[eq_no])
list_func.append(func)
func_dict[eq_no] = list_func
else:
func_dict[eq_no] = func
order[func] = max_order
funcs = [func_dict[i] for i in range(len(func_dict))]
matching_hints['func'] = funcs
for func in funcs:
if isinstance(func, list):
for func_elem in func:
if len(func_elem.args) != 1:
raise ValueError("dsolve() and classify_sysode() work with "
"functions of one variable only, not %s" % func)
else:
if func and len(func.args) != 1:
raise ValueError("dsolve() and classify_sysode() work with "
"functions of one variable only, not %s" % func)
# find the order of all equation in system of odes
matching_hints["order"] = order
# find coefficients of terms f(t), diff(f(t),t) and higher derivatives
# and similarly for other functions g(t), diff(g(t),t) in all equations.
# Here j denotes the equation number, funcs[l] denotes the function about
# which we are talking about and k denotes the order of function funcs[l]
# whose coefficient we are calculating.
def linearity_check(eqs, j, func, is_linear_):
for k in range(order[func] + 1):
func_coef[j, func, k] = collect(eqs.expand(), [diff(func, t, k)]).coeff(diff(func, t, k))
if is_linear_ == True:
if func_coef[j, func, k] == 0:
if k == 0:
coef = eqs.as_independent(func, as_Add=True)[1]
for xr in range(1, ode_order(eqs,func) + 1):
coef -= eqs.as_independent(diff(func, t, xr), as_Add=True)[1]
if coef != 0:
is_linear_ = False
else:
if eqs.as_independent(diff(func, t, k), as_Add=True)[1]:
is_linear_ = False
else:
for func_ in funcs:
if isinstance(func_, list):
for elem_func_ in func_:
dep = func_coef[j, func, k].as_independent(elem_func_, as_Add=True)[1]
if dep != 0:
is_linear_ = False
else:
dep = func_coef[j, func, k].as_independent(func_, as_Add=True)[1]
if dep != 0:
is_linear_ = False
return is_linear_
func_coef = {}
is_linear = True
for j, eqs in enumerate(eq):
for func in funcs:
if isinstance(func, list):
for func_elem in func:
is_linear = linearity_check(eqs, j, func_elem, is_linear)
else:
is_linear = linearity_check(eqs, j, func, is_linear)
matching_hints['func_coeff'] = func_coef
matching_hints['is_linear'] = is_linear
if len(set(order.values())) == 1:
order_eq = list(matching_hints['order'].values())[0]
if matching_hints['is_linear'] == True:
if matching_hints['no_of_equation'] == 2:
if order_eq == 1:
type_of_equation = check_linear_2eq_order1(eq, funcs, func_coef)
elif order_eq == 2:
type_of_equation = check_linear_2eq_order2(eq, funcs, func_coef)
else:
type_of_equation = None
elif matching_hints['no_of_equation'] == 3:
if order_eq == 1:
type_of_equation = check_linear_3eq_order1(eq, funcs, func_coef)
if type_of_equation is None:
type_of_equation = check_linear_neq_order1(eq, funcs, func_coef)
else:
type_of_equation = None
else:
if order_eq == 1:
type_of_equation = check_linear_neq_order1(eq, funcs, func_coef)
else:
type_of_equation = None
else:
if matching_hints['no_of_equation'] == 2:
if order_eq == 1:
type_of_equation = check_nonlinear_2eq_order1(eq, funcs, func_coef)
else:
type_of_equation = None
elif matching_hints['no_of_equation'] == 3:
if order_eq == 1:
type_of_equation = check_nonlinear_3eq_order1(eq, funcs, func_coef)
else:
type_of_equation = None
else:
type_of_equation = None
else:
type_of_equation = None
matching_hints['type_of_equation'] = type_of_equation
return matching_hints
def check_linear_2eq_order1(eq, func, func_coef):
x = func[0].func
y = func[1].func
fc = func_coef
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
r = dict()
# for equations Eq(a1*diff(x(t),t), b1*x(t) + c1*y(t) + d1)
# and Eq(a2*diff(y(t),t), b2*x(t) + c2*y(t) + d2)
r['a1'] = fc[0,x(t),1] ; r['a2'] = fc[1,y(t),1]
r['b1'] = -fc[0,x(t),0]/fc[0,x(t),1] ; r['b2'] = -fc[1,x(t),0]/fc[1,y(t),1]
r['c1'] = -fc[0,y(t),0]/fc[0,x(t),1] ; r['c2'] = -fc[1,y(t),0]/fc[1,y(t),1]
forcing = [S(0),S(0)]
for i in range(2):
for j in Add.make_args(eq[i]):
if not j.has(x(t), y(t)):
forcing[i] += j
if not (forcing[0].has(t) or forcing[1].has(t)):
# We can handle homogeneous case and simple constant forcings
r['d1'] = forcing[0]
r['d2'] = forcing[1]
else:
# Issue #9244: nonhomogeneous linear systems are not supported
return None
# Conditions to check for type 6 whose equations are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and
# Eq(diff(y(t),t), a*[f(t) + a*h(t)]x(t) + a*[g(t) - h(t)]*y(t))
p = 0
q = 0
p1 = cancel(r['b2']/(cancel(r['b2']/r['c2']).as_numer_denom()[0]))
p2 = cancel(r['b1']/(cancel(r['b1']/r['c1']).as_numer_denom()[0]))
for n, i in enumerate([p1, p2]):
for j in Mul.make_args(collect_const(i)):
if not j.has(t):
q = j
if q and n==0:
if ((r['b2']/j - r['b1'])/(r['c1'] - r['c2']/j)) == j:
p = 1
elif q and n==1:
if ((r['b1']/j - r['b2'])/(r['c2'] - r['c1']/j)) == j:
p = 2
# End of condition for type 6
if r['d1']!=0 or r['d2']!=0:
if not r['d1'].has(t) and not r['d2'].has(t):
if all(not r[k].has(t) for k in 'a1 a2 b1 b2 c1 c2'.split()):
# Equations for type 2 are Eq(a1*diff(x(t),t),b1*x(t)+c1*y(t)+d1) and Eq(a2*diff(y(t),t),b2*x(t)+c2*y(t)+d2)
return "type2"
else:
return None
else:
if all(not r[k].has(t) for k in 'a1 a2 b1 b2 c1 c2'.split()):
# Equations for type 1 are Eq(a1*diff(x(t),t),b1*x(t)+c1*y(t)) and Eq(a2*diff(y(t),t),b2*x(t)+c2*y(t))
return "type1"
else:
r['b1'] = r['b1']/r['a1'] ; r['b2'] = r['b2']/r['a2']
r['c1'] = r['c1']/r['a1'] ; r['c2'] = r['c2']/r['a2']
if (r['b1'] == r['c2']) and (r['c1'] == r['b2']):
# Equation for type 3 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), g(t)*x(t) + f(t)*y(t))
return "type3"
elif (r['b1'] == r['c2']) and (r['c1'] == -r['b2']) or (r['b1'] == -r['c2']) and (r['c1'] == r['b2']):
# Equation for type 4 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), -g(t)*x(t) + f(t)*y(t))
return "type4"
elif (not cancel(r['b2']/r['c1']).has(t) and not cancel((r['c2']-r['b1'])/r['c1']).has(t)) \
or (not cancel(r['b1']/r['c2']).has(t) and not cancel((r['c1']-r['b2'])/r['c2']).has(t)):
# Equations for type 5 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), a*g(t)*x(t) + [f(t) + b*g(t)]*y(t)
return "type5"
elif p:
return "type6"
else:
# Equations for type 7 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), h(t)*x(t) + p(t)*y(t))
return "type7"
def check_linear_2eq_order2(eq, func, func_coef):
x = func[0].func
y = func[1].func
fc = func_coef
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
r = dict()
a = Wild('a', exclude=[1/t])
b = Wild('b', exclude=[1/t**2])
u = Wild('u', exclude=[t, t**2])
v = Wild('v', exclude=[t, t**2])
w = Wild('w', exclude=[t, t**2])
p = Wild('p', exclude=[t, t**2])
r['a1'] = fc[0,x(t),2] ; r['a2'] = fc[1,y(t),2]
r['b1'] = fc[0,x(t),1] ; r['b2'] = fc[1,x(t),1]
r['c1'] = fc[0,y(t),1] ; r['c2'] = fc[1,y(t),1]
r['d1'] = fc[0,x(t),0] ; r['d2'] = fc[1,x(t),0]
r['e1'] = fc[0,y(t),0] ; r['e2'] = fc[1,y(t),0]
const = [S(0), S(0)]
for i in range(2):
for j in Add.make_args(eq[i]):
if not (j.has(x(t)) or j.has(y(t))):
const[i] += j
r['f1'] = const[0]
r['f2'] = const[1]
if r['f1']!=0 or r['f2']!=0:
if all(not r[k].has(t) for k in 'a1 a2 d1 d2 e1 e2 f1 f2'.split()) \
and r['b1']==r['c1']==r['b2']==r['c2']==0:
return "type2"
elif all(not r[k].has(t) for k in 'a1 a2 b1 b2 c1 c2 d1 d2 e1 e1'.split()):
p = [S(0), S(0)] ; q = [S(0), S(0)]
for n, e in enumerate([r['f1'], r['f2']]):
if e.has(t):
tpart = e.as_independent(t, Mul)[1]
for i in Mul.make_args(tpart):
if i.has(exp):
b, e = i.as_base_exp()
co = e.coeff(t)
if co and not co.has(t) and co.has(I):
p[n] = 1
else:
q[n] = 1
else:
q[n] = 1
else:
q[n] = 1
if p[0]==1 and p[1]==1 and q[0]==0 and q[1]==0:
return "type4"
else:
return None
else:
return None
else:
if r['b1']==r['b2']==r['c1']==r['c2']==0 and all(not r[k].has(t) \
for k in 'a1 a2 d1 d2 e1 e2'.split()):
return "type1"
elif r['b1']==r['e1']==r['c2']==r['d2']==0 and all(not r[k].has(t) \
for k in 'a1 a2 b2 c1 d1 e2'.split()) and r['c1'] == -r['b2'] and \
r['d1'] == r['e2']:
return "type3"
elif cancel(-r['b2']/r['d2'])==t and cancel(-r['c1']/r['e1'])==t and not \
(r['d2']/r['a2']).has(t) and not (r['e1']/r['a1']).has(t) and \
r['b1']==r['d1']==r['c2']==r['e2']==0:
return "type5"
elif ((r['a1']/r['d1']).expand()).match((p*(u*t**2+v*t+w)**2).expand()) and not \
(cancel(r['a1']*r['d2']/(r['a2']*r['d1']))).has(t) and not (r['d1']/r['e1']).has(t) and not \
(r['d2']/r['e2']).has(t) and r['b1'] == r['b2'] == r['c1'] == r['c2'] == 0:
return "type10"
elif not cancel(r['d1']/r['e1']).has(t) and not cancel(r['d2']/r['e2']).has(t) and not \
cancel(r['d1']*r['a2']/(r['d2']*r['a1'])).has(t) and r['b1']==r['b2']==r['c1']==r['c2']==0:
return "type6"
elif not cancel(r['b1']/r['c1']).has(t) and not cancel(r['b2']/r['c2']).has(t) and not \
cancel(r['b1']*r['a2']/(r['b2']*r['a1'])).has(t) and r['d1']==r['d2']==r['e1']==r['e2']==0:
return "type7"
elif cancel(-r['b2']/r['d2'])==t and cancel(-r['c1']/r['e1'])==t and not \
cancel(r['e1']*r['a2']/(r['d2']*r['a1'])).has(t) and r['e1'].has(t) \
and r['b1']==r['d1']==r['c2']==r['e2']==0:
return "type8"
elif (r['b1']/r['a1']).match(a/t) and (r['b2']/r['a2']).match(a/t) and not \
(r['b1']/r['c1']).has(t) and not (r['b2']/r['c2']).has(t) and \
(r['d1']/r['a1']).match(b/t**2) and (r['d2']/r['a2']).match(b/t**2) \
and not (r['d1']/r['e1']).has(t) and not (r['d2']/r['e2']).has(t):
return "type9"
elif -r['b1']/r['d1']==-r['c1']/r['e1']==-r['b2']/r['d2']==-r['c2']/r['e2']==t:
return "type11"
else:
return None
def check_linear_3eq_order1(eq, func, func_coef):
x = func[0].func
y = func[1].func
z = func[2].func
fc = func_coef
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
r = dict()
r['a1'] = fc[0,x(t),1]; r['a2'] = fc[1,y(t),1]; r['a3'] = fc[2,z(t),1]
r['b1'] = fc[0,x(t),0]; r['b2'] = fc[1,x(t),0]; r['b3'] = fc[2,x(t),0]
r['c1'] = fc[0,y(t),0]; r['c2'] = fc[1,y(t),0]; r['c3'] = fc[2,y(t),0]
r['d1'] = fc[0,z(t),0]; r['d2'] = fc[1,z(t),0]; r['d3'] = fc[2,z(t),0]
forcing = [S(0), S(0), S(0)]
for i in range(3):
for j in Add.make_args(eq[i]):
if not j.has(x(t), y(t), z(t)):
forcing[i] += j
if forcing[0].has(t) or forcing[1].has(t) or forcing[2].has(t):
# We can handle homogeneous case and simple constant forcings.
# Issue #9244: nonhomogeneous linear systems are not supported
return None
if all(not r[k].has(t) for k in 'a1 a2 a3 b1 b2 b3 c1 c2 c3 d1 d2 d3'.split()):
if r['c1']==r['d1']==r['d2']==0:
return 'type1'
elif r['c1'] == -r['b2'] and r['d1'] == -r['b3'] and r['d2'] == -r['c3'] \
and r['b1'] == r['c2'] == r['d3'] == 0:
return 'type2'
elif r['b1'] == r['c2'] == r['d3'] == 0 and r['c1']/r['a1'] == -r['d1']/r['a1'] \
and r['d2']/r['a2'] == -r['b2']/r['a2'] and r['b3']/r['a3'] == -r['c3']/r['a3']:
return 'type3'
else:
return None
else:
for k1 in 'c1 d1 b2 d2 b3 c3'.split():
if r[k1] == 0:
continue
else:
if all(not cancel(r[k1]/r[k]).has(t) for k in 'd1 b2 d2 b3 c3'.split() if r[k]!=0) \
and all(not cancel(r[k1]/(r['b1'] - r[k])).has(t) for k in 'b1 c2 d3'.split() if r['b1']!=r[k]):
return 'type4'
else:
break
return None
def check_linear_neq_order1(eq, func, func_coef):
fc = func_coef
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
n = len(eq)
for i in range(n):
for j in range(n):
if (fc[i, func[j], 0]/fc[i, func[i], 1]).has(t):
return None
if len(eq) == 3:
return 'type6'
return 'type1'
def check_nonlinear_2eq_order1(eq, func, func_coef):
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
f = Wild('f')
g = Wild('g')
u, v = symbols('u, v', cls=Dummy)
def check_type(x, y):
r1 = eq[0].match(t*diff(x(t),t) - x(t) + f)
r2 = eq[1].match(t*diff(y(t),t) - y(t) + g)
if not (r1 and r2):
r1 = eq[0].match(diff(x(t),t) - x(t)/t + f/t)
r2 = eq[1].match(diff(y(t),t) - y(t)/t + g/t)
if not (r1 and r2):
r1 = (-eq[0]).match(t*diff(x(t),t) - x(t) + f)
r2 = (-eq[1]).match(t*diff(y(t),t) - y(t) + g)
if not (r1 and r2):
r1 = (-eq[0]).match(diff(x(t),t) - x(t)/t + f/t)
r2 = (-eq[1]).match(diff(y(t),t) - y(t)/t + g/t)
if r1 and r2 and not (r1[f].subs(diff(x(t),t),u).subs(diff(y(t),t),v).has(t) \
or r2[g].subs(diff(x(t),t),u).subs(diff(y(t),t),v).has(t)):
return 'type5'
else:
return None
for func_ in func:
if isinstance(func_, list):
x = func[0][0].func
y = func[0][1].func
eq_type = check_type(x, y)
if not eq_type:
eq_type = check_type(y, x)
return eq_type
x = func[0].func
y = func[1].func
fc = func_coef
n = Wild('n', exclude=[x(t),y(t)])
f1 = Wild('f1', exclude=[v,t])
f2 = Wild('f2', exclude=[v,t])
g1 = Wild('g1', exclude=[u,t])
g2 = Wild('g2', exclude=[u,t])
for i in range(2):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
r = eq[0].match(diff(x(t),t) - x(t)**n*f)
if r:
g = (diff(y(t),t) - eq[1])/r[f]
if r and not (g.has(x(t)) or g.subs(y(t),v).has(t) or r[f].subs(x(t),u).subs(y(t),v).has(t)):
return 'type1'
r = eq[0].match(diff(x(t),t) - exp(n*x(t))*f)
if r:
g = (diff(y(t),t) - eq[1])/r[f]
if r and not (g.has(x(t)) or g.subs(y(t),v).has(t) or r[f].subs(x(t),u).subs(y(t),v).has(t)):
return 'type2'
g = Wild('g')
r1 = eq[0].match(diff(x(t),t) - f)
r2 = eq[1].match(diff(y(t),t) - g)
if r1 and r2 and not (r1[f].subs(x(t),u).subs(y(t),v).has(t) or \
r2[g].subs(x(t),u).subs(y(t),v).has(t)):
return 'type3'
r1 = eq[0].match(diff(x(t),t) - f)
r2 = eq[1].match(diff(y(t),t) - g)
num, den = (
(r1[f].subs(x(t),u).subs(y(t),v))/
(r2[g].subs(x(t),u).subs(y(t),v))).as_numer_denom()
R1 = num.match(f1*g1)
R2 = den.match(f2*g2)
# phi = (r1[f].subs(x(t),u).subs(y(t),v))/num
if R1 and R2:
return 'type4'
return None
def check_nonlinear_2eq_order2(eq, func, func_coef):
return None
def check_nonlinear_3eq_order1(eq, func, func_coef):
x = func[0].func
y = func[1].func
z = func[2].func
fc = func_coef
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
u, v, w = symbols('u, v, w', cls=Dummy)
a = Wild('a', exclude=[x(t), y(t), z(t), t])
b = Wild('b', exclude=[x(t), y(t), z(t), t])
c = Wild('c', exclude=[x(t), y(t), z(t), t])
f = Wild('f')
F1 = Wild('F1')
F2 = Wild('F2')
F3 = Wild('F3')
for i in range(3):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
r1 = eq[0].match(diff(x(t),t) - a*y(t)*z(t))
r2 = eq[1].match(diff(y(t),t) - b*z(t)*x(t))
r3 = eq[2].match(diff(z(t),t) - c*x(t)*y(t))
if r1 and r2 and r3:
num1, den1 = r1[a].as_numer_denom()
num2, den2 = r2[b].as_numer_denom()
num3, den3 = r3[c].as_numer_denom()
if solve([num1*u-den1*(v-w), num2*v-den2*(w-u), num3*w-den3*(u-v)],[u, v]):
return 'type1'
r = eq[0].match(diff(x(t),t) - y(t)*z(t)*f)
if r:
r1 = collect_const(r[f]).match(a*f)
r2 = ((diff(y(t),t) - eq[1])/r1[f]).match(b*z(t)*x(t))
r3 = ((diff(z(t),t) - eq[2])/r1[f]).match(c*x(t)*y(t))
if r1 and r2 and r3:
num1, den1 = r1[a].as_numer_denom()
num2, den2 = r2[b].as_numer_denom()
num3, den3 = r3[c].as_numer_denom()
if solve([num1*u-den1*(v-w), num2*v-den2*(w-u), num3*w-den3*(u-v)],[u, v]):
return 'type2'
r = eq[0].match(diff(x(t),t) - (F2-F3))
if r:
r1 = collect_const(r[F2]).match(c*F2)
r1.update(collect_const(r[F3]).match(b*F3))
if r1:
if eq[1].has(r1[F2]) and not eq[1].has(r1[F3]):
r1[F2], r1[F3] = r1[F3], r1[F2]
r1[c], r1[b] = -r1[b], -r1[c]
r2 = eq[1].match(diff(y(t),t) - a*r1[F3] + r1[c]*F1)
if r2:
r3 = (eq[2] == diff(z(t),t) - r1[b]*r2[F1] + r2[a]*r1[F2])
if r1 and r2 and r3:
return 'type3'
r = eq[0].match(diff(x(t),t) - z(t)*F2 + y(t)*F3)
if r:
r1 = collect_const(r[F2]).match(c*F2)
r1.update(collect_const(r[F3]).match(b*F3))
if r1:
if eq[1].has(r1[F2]) and not eq[1].has(r1[F3]):
r1[F2], r1[F3] = r1[F3], r1[F2]
r1[c], r1[b] = -r1[b], -r1[c]
r2 = (diff(y(t),t) - eq[1]).match(a*x(t)*r1[F3] - r1[c]*z(t)*F1)
if r2:
r3 = (diff(z(t),t) - eq[2] == r1[b]*y(t)*r2[F1] - r2[a]*x(t)*r1[F2])
if r1 and r2 and r3:
return 'type4'
r = (diff(x(t),t) - eq[0]).match(x(t)*(F2 - F3))
if r:
r1 = collect_const(r[F2]).match(c*F2)
r1.update(collect_const(r[F3]).match(b*F3))
if r1:
if eq[1].has(r1[F2]) and not eq[1].has(r1[F3]):
r1[F2], r1[F3] = r1[F3], r1[F2]
r1[c], r1[b] = -r1[b], -r1[c]
r2 = (diff(y(t),t) - eq[1]).match(y(t)*(a*r1[F3] - r1[c]*F1))
if r2:
r3 = (diff(z(t),t) - eq[2] == z(t)*(r1[b]*r2[F1] - r2[a]*r1[F2]))
if r1 and r2 and r3:
return 'type5'
return None
def check_nonlinear_3eq_order2(eq, func, func_coef):
return None
def checksysodesol(eqs, sols, func=None):
def _sympify(eq):
return list(map(sympify, eq if iterable(eq) else [eq]))
eqs = _sympify(eqs)
for i in range(len(eqs)):
if isinstance(eqs[i], Equality):
eqs[i] = eqs[i].lhs - eqs[i].rhs
if func is None:
funcs = []
for eq in eqs:
derivs = eq.atoms(Derivative)
func = set().union(*[d.atoms(AppliedUndef) for d in derivs])
for func_ in func:
funcs.append(func_)
funcs = list(set(funcs))
if not all(isinstance(func, AppliedUndef) and len(func.args) == 1 for func in funcs)\
and len({func.args for func in funcs})!=1:
raise ValueError("func must be a function of one variable, not %s" % func)
for sol in sols:
if len(sol.atoms(AppliedUndef)) != 1:
raise ValueError("solutions should have one function only")
if len(funcs) != len({sol.lhs for sol in sols}):
raise ValueError("number of solutions provided does not match the number of equations")
dictsol = dict()
for sol in sols:
func = list(sol.atoms(AppliedUndef))[0]
if sol.rhs == func:
sol = sol.reversed
solved = sol.lhs == func and not sol.rhs.has(func)
if not solved:
rhs = solve(sol, func)
if not rhs:
raise NotImplementedError
else:
rhs = sol.rhs
dictsol[func] = rhs
checkeq = []
for eq in eqs:
for func in funcs:
eq = sub_func_doit(eq, func, dictsol[func])
ss = simplify(eq)
if ss != 0:
eq = ss.expand(force=True)
else:
eq = 0
checkeq.append(eq)
if len(set(checkeq)) == 1 and list(set(checkeq))[0] == 0:
return (True, checkeq)
else:
return (False, checkeq)
@vectorize(0)
def odesimp(ode, eq, func, hint):
x = func.args[0]
f = func.func
C1 = get_numbered_constants(eq, num=1)
constants = eq.free_symbols - ode.free_symbols
# First, integrate if the hint allows it.
eq = _handle_Integral(eq, func, hint)
if hint.startswith("nth_linear_euler_eq_nonhomogeneous"):
eq = simplify(eq)
if not isinstance(eq, Equality):
raise TypeError("eq should be an instance of Equality")
# Second, clean up the arbitrary constants.
# Right now, nth linear hints can put as many as 2*order constants in an
# expression. If that number grows with another hint, the third argument
# here should be raised accordingly, or constantsimp() rewritten to handle
# an arbitrary number of constants.
eq = constantsimp(eq, constants)
# Lastly, now that we have cleaned up the expression, try solving for func.
# When CRootOf is implemented in solve(), we will want to return a CRootOf
# every time instead of an Equality.
# Get the f(x) on the left if possible.
if eq.rhs == func and not eq.lhs.has(func):
eq = [Eq(eq.rhs, eq.lhs)]
# make sure we are working with lists of solutions in simplified form.
if eq.lhs == func and not eq.rhs.has(func):
# The solution is already solved
eq = [eq]
# special simplification of the rhs
if hint.startswith("nth_linear_constant_coeff"):
# Collect terms to make the solution look nice.
# This is also necessary for constantsimp to remove unnecessary
# terms from the particular solution from variation of parameters
#
# Collect is not behaving reliably here. The results for
# some linear constant-coefficient equations with repeated
# roots do not properly simplify all constants sometimes.
# 'collectterms' gives different orders sometimes, and results
# differ in collect based on that order. The
# sort-reverse trick fixes things, but may fail in the
# future. In addition, collect is splitting exponentials with
# rational powers for no reason. We have to do a match
# to fix this using Wilds.
global collectterms
try:
collectterms.sort(key=default_sort_key)
collectterms.reverse()
except Exception:
pass
assert len(eq) == 1 and eq[0].lhs == f(x)
sol = eq[0].rhs
sol = expand_mul(sol)
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x)*sin(abs(imroot)*x))
sol = collect(sol, x**i*exp(reroot*x)*cos(imroot*x))
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x))
del collectterms
# Collect is splitting exponentials with rational powers for
# no reason. We call powsimp to fix.
sol = powsimp(sol)
eq[0] = Eq(f(x), sol)
else:
# The solution is not solved, so try to solve it
try:
floats = any(i.is_Float for i in eq.atoms(Number))
eqsol = solve(eq, func, force=True, rational=False if floats else None)
if not eqsol:
raise NotImplementedError
except (NotImplementedError, PolynomialError):
eq = [eq]
else:
def _expand(expr):
numer, denom = expr.as_numer_denom()
if denom.is_Add:
return expr
else:
return powsimp(expr.expand(), combine='exp', deep=True)
# XXX: the rest of odesimp() expects each ``t`` to be in a
# specific normal form: rational expression with numerator
# expanded, but with combined exponential functions (at
# least in this setup all tests pass).
eq = [Eq(f(x), _expand(t)) for t in eqsol]
# special simplification of the lhs.
if hint.startswith("1st_homogeneous_coeff"):
for j, eqi in enumerate(eq):
newi = logcombine(eqi, force=True)
if isinstance(newi.lhs, log) and newi.rhs == 0:
newi = Eq(newi.lhs.args[0]/C1, C1)
eq[j] = newi
# We cleaned up the constants before solving to help the solve engine with
# a simpler expression, but the solved expression could have introduced
# things like -C1, so rerun constantsimp() one last time before returning.
for i, eqi in enumerate(eq):
eq[i] = constantsimp(eqi, constants)
eq[i] = constant_renumber(eq[i], ode.free_symbols)
# If there is only 1 solution, return it;
# otherwise return the list of solutions.
if len(eq) == 1:
eq = eq[0]
return eq
def checkodesol(ode, sol, func=None, order='auto', solve_for_func=True):
if not isinstance(ode, Equality):
ode = Eq(ode, 0)
if func is None:
try:
_, func = _preprocess(ode.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(*funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkodesol for this case.')
func = funcs.pop()
if not isinstance(func, AppliedUndef) or len(func.args) != 1:
raise ValueError(
"func must be a function of one variable, not %s" % func)
if is_sequence(sol, set):
return type(sol)([checkodesol(ode, i, order=order, solve_for_func=solve_for_func) for i in sol])
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
if order == 'auto':
order = ode_order(ode, func)
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
rhs = solve(sol, func)
if rhs:
eqs = [Eq(func, t) for t in rhs]
if len(rhs) == 1:
eqs = eqs[0]
return checkodesol(ode, eqs, order=order,
solve_for_func=False)
s = True
testnum = 0
x = func.args[0]
while s:
if testnum == 0:
# First pass, try substituting a solved solution directly into the
# ODE. This has the highest chance of succeeding.
ode_diff = ode.lhs - ode.rhs
if sol.lhs == func:
s = sub_func_doit(ode_diff, func, sol.rhs)
else:
testnum += 1
continue
ss = simplify(s)
if ss:
# with the new numer_denom in power.py, if we do a simple
# expansion then testnum == 0 verifies all solutions.
s = ss.expand(force=True)
else:
s = 0
testnum += 1
elif testnum == 1:
# Second pass. If we cannot substitute f, try seeing if the nth
# derivative is equal, this will only work for odes that are exact,
# by definition.
s = simplify(
trigsimp(diff(sol.lhs, x, order) - diff(sol.rhs, x, order)) -
trigsimp(ode.lhs) + trigsimp(ode.rhs))
# s2 = simplify(
# diff(sol.lhs, x, order) - diff(sol.rhs, x, order) - \
# ode.lhs + ode.rhs)
testnum += 1
elif testnum == 2:
# Third pass. Try solving for df/dx and substituting that into the
# ODE. Thanks to Chris Smith for suggesting this method. Many of
# the comments below are his, too.
# The method:
# - Take each of 1..n derivatives of the solution.
# - Solve each nth derivative for d^(n)f/dx^(n)
# (the differential of that order)
# - Back substitute into the ODE in decreasing order
# (i.e., n, n-1, ...)
# - Check the result for zero equivalence
if sol.lhs == func and not sol.rhs.has(func):
diffsols = {0: sol.rhs}
elif sol.rhs == func and not sol.lhs.has(func):
diffsols = {0: sol.lhs}
else:
diffsols = {}
sol = sol.lhs - sol.rhs
for i in range(1, order + 1):
# Differentiation is a linear operator, so there should always
# be 1 solution. Nonetheless, we test just to make sure.
# We only need to solve once. After that, we automatically
# have the solution to the differential in the order we want.
if i == 1:
ds = sol.diff(x)
try:
sdf = solve(ds, func.diff(x, i))
if not sdf:
raise NotImplementedError
except NotImplementedError:
testnum += 1
break
else:
diffsols[i] = sdf[0]
else:
# This is what the solution says df/dx should be.
diffsols[i] = diffsols[i - 1].diff(x)
# Make sure the above didn't fail.
if testnum > 2:
continue
else:
lhs, rhs = ode.lhs, ode.rhs
for i in range(order, -1, -1):
if i == 0 and 0 not in diffsols:
break
lhs = sub_func_doit(lhs, func.diff(x, i), diffsols[i])
rhs = sub_func_doit(rhs, func.diff(x, i), diffsols[i])
ode_or_bool = Eq(lhs, rhs)
ode_or_bool = simplify(ode_or_bool)
if isinstance(ode_or_bool, (bool, BooleanAtom)):
if ode_or_bool:
lhs = rhs = S.Zero
else:
lhs = ode_or_bool.lhs
rhs = ode_or_bool.rhs
num = trigsimp((lhs - rhs).as_numer_denom()[0])
num = num.subs(func, _func)
eps = posify(num)
s = simplify(num).xreplace(reps).xreplace({_func: func})
testnum += 1
else:
break
if not s:
return (True, s)
elif s is True:
raise NotImplementedError("Unable to test if " + str(sol) +
" is a solution to " + str(ode) + ".")
else:
return (False, s)
def ode_sol_simplicity(sol, func, trysolving=True):
if iterable(sol):
for i in sol:
if ode_sol_simplicity(i, func, trysolving=trysolving) == oo:
return oo
return len(str(sol))
if sol.has(Integral):
return oo
if sol.lhs == func and not sol.rhs.has(func) or \
sol.rhs == func and not sol.lhs.has(func):
return -2
if trysolving:
try:
sols = solve(sol, func)
if not sols:
raise NotImplementedError
except NotImplementedError:
pass
else:
return -1
return len(str(sol))
def _get_constant_subexpressions(expr, Cs):
Cs = set(Cs)
Ces = []
def _recursive_walk(expr):
expr_syms = expr.free_symbols
if expr_syms and expr_syms.issubset(Cs):
Ces.append(expr)
else:
if expr.func == exp:
expr = expr.expand(mul=True)
if expr.func in (Add, Mul):
d = sift(expr.args, lambda i : i.free_symbols.issubset(Cs))
if len(d[True]) > 1:
x = expr.func(*d[True])
if not x.is_number:
Ces.append(x)
elif isinstance(expr, Integral):
if expr.free_symbols.issubset(Cs) and \
all(len(x) == 3 for x in expr.limits):
Ces.append(expr)
for i in expr.args:
_recursive_walk(i)
return
_recursive_walk(expr)
return Ces
def __remove_linear_redundancies(expr, Cs):
cnts = {i: expr.count(i) for i in Cs}
Cs = [i for i in Cs if cnts[i] > 0]
def _linear(expr):
if isinstance(expr, Add):
xs = [i for i in Cs if expr.count(i)==cnts[i] \
and 0 == expr.diff(i, 2)]
d = {}
for x in xs:
y = expr.diff(x)
if y not in d:
d[y]=[]
d[y].append(x)
for y in d:
if len(d[y]) > 1:
d[y].sort(key=str)
for x in d[y][1:]:
expr = expr.subs(x, 0)
return expr
def _recursive_walk(expr):
if len(expr.args) != 0:
expr = expr.func(*[_recursive_walk(i) for i in expr.args])
expr = _linear(expr)
return expr
if isinstance(expr, Equality):
lhs, rhs = [_recursive_walk(i) for i in expr.args]
f = lambda i: isinstance(i, Number) or i in Cs
if isinstance(lhs, Symbol) and lhs in Cs:
rhs, lhs = lhs, rhs
if lhs.func in (Add, Symbol) and rhs.func in (Add, Symbol):
dlhs = sift([lhs] if isinstance(lhs, AtomicExpr) else lhs.args, f)
drhs = sift([rhs] if isinstance(rhs, AtomicExpr) else rhs.args, f)
for i in [True, False]:
for hs in [dlhs, drhs]:
if i not in hs:
hs[i] = [0]
lhs = Add(*dlhs[False]) - Add(*drhs[False])
rhs = Add(*drhs[True]) - Add(*dlhs[True])
elif lhs.func in (Mul, Symbol) and rhs.func in (Mul, Symbol):
dlhs = sift([lhs] if isinstance(lhs, AtomicExpr) else lhs.args, f)
if True in dlhs:
if False not in dlhs:
dlhs[False] = [1]
lhs = Mul(*dlhs[False])
rhs = rhs/Mul(*dlhs[True])
return Eq(lhs, rhs)
else:
return _recursive_walk(expr)
@vectorize(0)
def constantsimp(expr, constants):
Cs = constants
orig_expr = expr
constant_subexprs = _get_constant_subexpressions(expr, Cs)
for xe in constant_subexprs:
xes = list(xe.free_symbols)
if not xes:
continue
if all([expr.count(c) == xe.count(c) for c in xes]):
xes.sort(key=str)
expr = expr.subs(xe, xes[0])
try:
commons, rexpr = cse(expr)
commons.reverse()
rexpr = rexpr[0]
for s in commons:
cs = list(s[1].atoms(Symbol))
if len(cs) == 1 and cs[0] in Cs and \
cs[0] not in rexpr.atoms(Symbol) and \
not any(cs[0] in ex for ex in commons if ex != s):
rexpr = rexpr.subs(s[0], cs[0])
else:
rexpr = rexpr.subs(*s)
expr = rexpr
except Exception:
pass
expr = __remove_linear_redundancies(expr, Cs)
def _conditional_term_factoring(expr):
new_expr = terms_gcd(expr, clear=False, deep=True, expand=False)
if new_expr.is_Mul:
infac = False
asfac = False
for m in new_expr.args:
if isinstance(m, exp):
asfac = True
elif m.is_Add:
infac = any(isinstance(fi, exp) for t in m.args
for fi in Mul.make_args(t))
if asfac and infac:
new_expr = expr
break
return new_expr
expr = _conditional_term_factoring(expr)
if orig_expr != expr:
return constantsimp(expr, Cs)
return expr
def constant_renumber(expr, variables=None, newconstants=None):
if type(expr) in (set, list, tuple):
renumbered = [constant_renumber(e, variables, newconstants) for e in expr]
return type(expr)(renumbered)
if variables is not None:
variables = set(variables)
constantsymbols = list(expr.free_symbols - variables)
else:
variables = set()
isconstant = lambda s: s.startswith('C') and s[1:].isdigit()
constantsymbols = [sym for sym in expr.free_symbols if isconstant(sym.name)]
if newconstants is None:
iter_constants = numbered_symbols(start=1, prefix='C', exclude=variables)
else:
iter_constants = (sym for sym in newconstants if sym not in variables)
global newstartnumber
newstartnumber = 1
endnumber = len(constantsymbols)
constants_found = [None]*(endnumber + 2)
# make a mapping to send all constantsymbols to S.One and use
# that to make sure that term ordering is not dependent on
# the indexed value of C
C_1 = [(ci, S.One) for ci in constantsymbols]
sort_key=lambda arg: default_sort_key(arg.subs(C_1))
def _constant_renumber(expr):
# FIXME: Use nonlocal here when support for Py2 is dropped:
global newstartnumber
if isinstance(expr, Equality):
return Eq(
_constant_renumber(expr.lhs),
_constant_renumber(expr.rhs))
if type(expr) not in (Mul, Add, Pow) and not expr.is_Function and \
not expr.has(*constantsymbols):
# Base case, as above. Hope there aren't constants inside
return expr
elif expr.is_Piecewise:
return expr
elif expr in constantsymbols:
if expr not in constants_found:
constants_found[newstartnumber] = expr
newstartnumber += 1
return expr
elif expr.is_Function or expr.is_Pow or isinstance(expr, Tuple):
return expr.func(
*[_constant_renumber(x) for x in expr.args])
else:
sortedargs = list(expr.args)
sortedargs.sort(key=sort_key)
return expr.func(*[_constant_renumber(x) for x in sortedargs])
expr = _constant_renumber(expr)
# Don't renumber symbols present in the ODE.
constants_found = [c for c in constants_found if c not in variables]
expr = expr.subs(zip(constants_found[1:], iter_constants), simultaneous=True)
return expr
def _handle_Integral(expr, func, hint):
global y
x = func.args[0]
f = func.func
if hint == "1st_exact":
sol = (expr.doit()).subs(y, f(x))
del y
elif hint == "1st_exact_Integral":
sol = Eq(Subs(expr.lhs, y, f(x)), expr.rhs)
del y
elif hint == "nth_linear_constant_coeff_homogeneous":
sol = expr
elif not hint.endswith("_Integral"):
sol = expr.doit()
else:
sol = expr
return sol
def ode_1st_exact(eq, func, order, match):
x = func.args[0]
r = match
e = r[r['e']]
d = r[r['d']]
global y
y = r['y']
C1 = get_numbered_constants(eq, num=1)
sol = Integral(d, x) + Integral((e - (Integral(d, x).diff(y))), y)
return Eq(sol, C1)
def ode_1st_homogeneous_coeff_best(eq, func, order, match):
sol1 = ode_1st_homogeneous_coeff_subs_indep_div_dep(eq,
func, order, match)
sol2 = ode_1st_homogeneous_coeff_subs_dep_div_indep(eq,
func, order, match)
simplify = match.get('simplify', True)
if simplify:
sol1 = odesimp(eq, sol1, func, "1st_homogeneous_coeff_subs_indep_div_dep")
sol2 = odesimp(eq, sol2, func, "1st_homogeneous_coeff_subs_dep_div_indep")
return min([sol1, sol2], key=lambda x: ode_sol_simplicity(x, func,
trysolving=not simplify))
def ode_1st_homogeneous_coeff_subs_dep_div_indep(eq, func, order, match):
x = func.args[0]
f = func.func
u = Dummy('u')
u1 = Dummy('u1')
r = match
C1 = get_numbered_constants(eq, num=1)
xarg = match.get('xarg', 0)
yarg = match.get('yarg', 0)
int = Integral(
(-r[r['e']]/(r[r['d']] + u1*r[r['e']])).subs({x: 1, r['y']: u1}),
(u1, None, f(x)/x))
sol = logcombine(Eq(log(x), int + log(C1)), force=True)
sol = sol.subs(f(x), u).subs(((u, u - yarg), (x, x - xarg), (u, f(x))))
return sol
def ode_1st_homogeneous_coeff_subs_indep_div_dep(eq, func, order, match):
x = func.args[0]
f = func.func
u = Dummy('u')
u2 = Dummy('u2')
r = match
C1 = get_numbered_constants(eq, num=1)
xarg = match.get('xarg', 0)
yarg = match.get('yarg', 0)
int = Integral(
simplify(
(-r[r['d']]/(r[r['e']] + u2*r[r['d']])).subs({x: u2, r['y']: 1})),
(u2, None, x/f(x)))
sol = logcombine(Eq(log(f(x)), int + log(C1)), force=True)
sol = sol.subs(f(x), u).subs(((u, u - yarg), (x, x - xarg), (u, f(x))))
return sol
def homogeneous_order(eq, *symbols):
if not symbols:
raise ValueError("homogeneous_order: no symbols were given.")
symset = set(symbols)
eq = sympify(eq)
if eq.has(Order, Derivative):
return None
if (eq.is_Number or
eq.is_NumberSymbol or
eq.is_number
):
return S.Zero
dum = numbered_symbols(prefix='d', cls=Dummy)
newsyms = set()
for i in [j for j in symset if getattr(j, 'is_Function')]:
iargs = set(i.args)
if iargs.difference(symset):
return None
else:
dummyvar = next(dum)
eq = eq.subs(i, dummyvar)
symset.remove(i)
newsyms.add(dummyvar)
symset.update(newsyms)
if not eq.free_symbols & symset:
return None
if isinstance(eq, Function):
return None if homogeneous_order(
eq.args[0], *tuple(symset)) != 0 else S.Zero
t = Dummy('t', positive=True)
eqs = separatevars(eq.subs([(i, t*i) for i in symset]), [t], dict=True)[t]
if eqs is S.One:
return S.Zero
i, d = eqs.as_independent(t, as_Add=False)
b, e = d.as_base_exp()
if b == t:
return e
def ode_1st_linear(eq, func, order, match):
x = func.args[0]
f = func.func
r = match
C1 = get_numbered_constants(eq, num=1)
t = exp(Integral(r[r['b']]/r[r['a']], x))
tt = Integral(t*(-r[r['c']]/r[r['a']]), x)
f = match.get('u', f(x))
return Eq(f, (tt + C1)/t)
def ode_Bernoulli(eq, func, order, match):
x = func.args[0]
f = func.func
r = match
C1 = get_numbered_constants(eq, num=1)
t = exp((1 - r[r['n']])*Integral(r[r['b']]/r[r['a']], x))
tt = (r[r['n']] - 1)*Integral(t*r[r['c']]/r[r['a']], x)
return Eq(f(x), ((tt + C1)/t)**(1/(1 - r[r['n']])))
def ode_Riccati_special_minus2(eq, func, order, match):
x = func.args[0]
f = func.func
r = match
a2, b2, c2, d2 = [r[r[s]] for s in 'a2 b2 c2 d2'.split()]
C1 = get_numbered_constants(eq, num=1)
mu = sqrt(4*d2*b2 - (a2 - c2)**2)
return Eq(f(x), (a2 - c2 - mu*tan(mu/(2*a2)*log(x) + C1))/(2*b2*x))
def ode_Liouville(eq, func, order, match):
# Differential Equations", pg. 98, as well as
x = func.args[0]
f = func.func
r = match
y = r['y']
C1, C2 = get_numbered_constants(eq, num=2)
int = Integral(exp(Integral(r['g'], y)), (y, None, f(x)))
sol = Eq(int + C1*Integral(exp(-Integral(r['h'], x)), x) + C2, 0)
return sol
def ode_2nd_power_series_ordinary(eq, func, order, match):
x = func.args[0]
f = func.func
C0, C1 = get_numbered_constants(eq, num=2)
n = Dummy("n", integer=True)
s = Wild("s")
k = Wild("k", exclude=[x])
x0 = match.get('x0')
terms = match.get('terms', 5)
p = match[match['a3']]
q = match[match['b3']]
r = match[match['c3']]
seriesdict = {}
recurr = Function("r")
coefflist = [(recurr(n), r), (n*recurr(n), q), (n*(n - 1)*recurr(n), p)]
for index, coeff in enumerate(coefflist):
if coeff[1]:
f2 = powsimp(expand((coeff[1]*(x - x0)**(n - index)).subs(x, x + x0)))
if f2.is_Add:
addargs = f2.args
else:
addargs = [f2]
for arg in addargs:
powm = arg.match(s*x**k)
term = coeff[0]*powm[s]
if not powm[k].is_Symbol:
term = term.subs(n, n - powm[k].as_independent(n)[0])
startind = powm[k].subs(n, index)
if startind:
for i in reversed(range(startind)):
if not term.subs(n, i):
seriesdict[term] = i
else:
seriesdict[term] = i + 1
break
else:
seriesdict[term] = S(0)
teq = S(0)
suminit = seriesdict.values()
rkeys = seriesdict.keys()
req = Add(*rkeys)
if any(suminit):
maxval = max(suminit)
for term in seriesdict:
val = seriesdict[term]
if val != maxval:
for i in range(val, maxval):
teq += term.subs(n, val)
finaldict = {}
if teq:
fargs = teq.atoms(AppliedUndef)
if len(fargs) == 1:
finaldict[fargs.pop()] = 0
else:
maxf = max(fargs, key = lambda x: x.args[0])
sol = solve(teq, maxf)
if isinstance(sol, list):
sol = sol[0]
finaldict[maxf] = sol
fargs = req.atoms(AppliedUndef)
maxf = max(fargs, key = lambda x: x.args[0])
minf = min(fargs, key = lambda x: x.args[0])
if minf.args[0].is_Symbol:
startiter = 0
else:
startiter = -minf.args[0].as_independent(n)[0]
lhs = maxf
rhs = solve(req, maxf)
if isinstance(rhs, list):
rhs = rhs[0]
tcounter = len([t for t in finaldict.values() if t])
for _ in range(tcounter, terms - 3):
check = rhs.subs(n, startiter)
nlhs = lhs.subs(n, startiter)
nrhs = check.subs(finaldict)
finaldict[nlhs] = nrhs
startiter += 1
series = C0 + C1*(x - x0)
for term in finaldict:
if finaldict[term]:
fact = term.args[0]
series += (finaldict[term].subs([(recurr(0), C0), (recurr(1), C1)])*(
x - x0)**fact)
series = collect(expand_mul(series), [C0, C1]) + Order(x**terms)
return Eq(f(x), series)
def ode_2nd_power_series_regular(eq, func, order, match):
x = func.args[0]
f = func.func
C0, C1 = get_numbered_constants(eq, num=2)
m = Dummy("m")
x0 = match.get('x0')
terms = match.get('terms', 5)
p = match['p']
q = match['q']
indicial = []
for term in [p, q]:
if not term.has(x):
indicial.append(term)
else:
term = series(term, n=1, x0=x0)
if isinstance(term, Order):
indicial.append(S(0))
else:
for arg in term.args:
if not arg.has(x):
indicial.append(arg)
break
p0, q0 = indicial
sollist = solve(m*(m - 1) + m*p0 + q0, m)
if sollist and isinstance(sollist, list) and all(
[sol.is_real for sol in sollist]):
serdict1 = {}
serdict2 = {}
if len(sollist) == 1:
m1 = m2 = sollist.pop()
if terms-m1-1 <= 0:
return Eq(f(x), Order(terms))
serdict1 = _frobenius(terms-m1-1, m1, p0, q0, p, q, x0, x, C0)
else:
m1 = sollist[0]
m2 = sollist[1]
if m1 < m2:
m1, m2 = m2, m1
serdict1 = _frobenius(terms-m1-1, m1, p0, q0, p, q, x0, x, C0)
if not (m1 - m2).is_integer:
serdict2 = _frobenius(terms-m2-1, m2, p0, q0, p, q, x0, x, C1)
else:
serdict2 = _frobenius(terms-m2-1, m2, p0, q0, p, q, x0, x, C1, check=m1)
if serdict1:
finalseries1 = C0
for key in serdict1:
power = int(key.name[1:])
finalseries1 += serdict1[key]*(x - x0)**power
finalseries1 = (x - x0)**m1*finalseries1
finalseries2 = S(0)
if serdict2:
for key in serdict2:
power = int(key.name[1:])
finalseries2 += serdict2[key]*(x - x0)**power
finalseries2 += C1
finalseries2 = (x - x0)**m2*finalseries2
return Eq(f(x), collect(finalseries1 + finalseries2,
[C0, C1]) + Order(x**terms))
def _frobenius(n, m, p0, q0, p, q, x0, x, c, check=None):
n = int(n)
m2 = check
d = Dummy("d")
numsyms = numbered_symbols("C", start=0)
numsyms = [next(numsyms) for i in range(n + 1)]
serlist = []
for ser in [p, q]:
if ser.is_polynomial(x) and Poly(ser, x).degree() <= n:
if x0:
ser = ser.subs(x, x + x0)
dict_ = Poly(ser, x).as_dict()
else:
tseries = series(ser, x=x0, n=n+1)
dict_ = Poly(list(ordered(tseries.args))[: -1], x).as_dict()
for i in range(n + 1):
if (i,) not in dict_:
dict_[(i,)] = S(0)
serlist.append(dict_)
pseries = serlist[0]
qseries = serlist[1]
indicial = d*(d - 1) + d*p0 + q0
frobdict = {}
for i in range(1, n + 1):
num = c*(m*pseries[(i,)] + qseries[(i,)])
for j in range(1, i):
sym = Symbol("C" + str(j))
num += frobdict[sym]*((m + j)*pseries[(i - j,)] + qseries[(i - j,)])
if m2 is not None and i == m2 - m:
if num:
return False
else:
frobdict[numsyms[i]] = S(0)
else:
frobdict[numsyms[i]] = -num/(indicial.subs(d, m+i))
return frobdict
def _nth_order_reducible_match(eq, func):
assert len(func.args) == 1
x = func.args[0]
vc = [d.variable_count[0] for d in eq.atoms(Derivative)
if d.expr == func and len(d.variable_count) == 1]
ords = [c for v, c in vc if v == x]
if len(ords) < 2:
return
smallest = min(ords)
D = Dummy()
if eq.subs(func.diff(x, smallest), D).has(func):
return
return {'n': smallest}
def ode_nth_order_reducible(eq, func, order, match):
x = func.args[0]
f = func.func
n = match['n']
names = [a.name for a in eq.atoms(AppliedUndef)]
while True:
name = Dummy().name
if name not in names:
g = Function(name)
break
w = f(x).diff(x, n)
geq = eq.subs(w, g(x))
gsol = dsolve(geq, g(x))
if not isinstance(gsol, list):
gsol = [gsol]
fsol = []
for gsoli in gsol:
fsoli = dsolve(gsoli.subs(g(x), w), f(x))
fsol.append(fsoli)
if len(fsol) == 1:
fsol = fsol[0]
return fsol
_nth_algebraic_diffx_stored = {}
def _nth_algebraic_diffx(var):
cls = _nth_algebraic_diffx_stored.get(var, None)
if cls is None:
class diffx(Function):
def inverse(self):
# in the equation; integrals will not be correct while solve
# is at work.
return lambda expr: Integral(expr, var) + Dummy('C')
cls = _nth_algebraic_diffx_stored.setdefault(var, diffx)
return cls
def _nth_algebraic_match(eq, func):
# The independent variable
var = func.args[0]
# Derivative that solve can handle:
diffx = _nth_algebraic_diffx(var)
# Replace derivatives wrt the independent variable with diffx
def replace(eq, var):
def expand_diffx(*args):
differand, diffs = args[0], args[1:]
toreplace = differand
for v, n in diffs:
for _ in range(n):
if v == var:
toreplace = diffx(toreplace)
else:
toreplace = Derivative(toreplace, v)
return toreplace
return eq.replace(Derivative, expand_diffx)
# Restore derivatives in solution afterwards
def unreplace(eq, var):
return eq.replace(diffx, lambda e: Derivative(e, var))
subs_eqn = replace(eq, var)
try:
# turn off simplification to protect Integrals that have
# _t instead of fx in them and would otherwise factor
# as t_*Integral(1, x)
solns = solve(subs_eqn, func, simplify=False)
except NotImplementedError:
solns = []
solns = [simplify(unreplace(soln, var)) for soln in solns]
solns = [Equality(func, soln) for soln in solns]
return {'var':var, 'solutions':solns}
def ode_nth_algebraic(eq, func, order, match):
solns = match['solutions']
var = match['var']
solns = _nth_algebraic_remove_redundant_solutions(eq, solns, order, var)
if len(solns) == 1:
return solns[0]
else:
return solns
# FIXME: Maybe something like this function should be applied to the solutions
# returned by dsolve in general rather than just for nth_algebraic...
def _nth_algebraic_remove_redundant_solutions(eq, solns, order, var):
def is_special_case_of(soln1, soln2):
return _nth_algebraic_is_special_case_of(soln1, soln2, eq, order, var)
unique_solns = []
for soln1 in solns:
for soln2 in unique_solns[:]:
if is_special_case_of(soln1, soln2):
break
elif is_special_case_of(soln2, soln1):
unique_solns.remove(soln2)
else:
unique_solns.append(soln1)
return unique_solns
def _nth_algebraic_is_special_case_of(soln1, soln2, eq, order, var):
# The solutions returned by nth_algebraic should be given explicitly as in
# Eq(f(x), expr). We will equate the RHSs of the two solutions giving an
# equation f1(x) = f2(x).
#
# Since this is supposed to hold for all x it also holds for derivatives
# f1'(x) and f2'(x). For an order n ode we should be able to differentiate
# each solution n times to get n+1 equations.
#
# We then try to solve those n+1 equations for the integrations constants
# in f2(x). If we can find a solution that doesn't depend on x then it
constants1 = soln1.free_symbols.difference(eq.free_symbols)
constants2 = soln2.free_symbols.difference(eq.free_symbols)
constants1_new = get_numbered_constants(soln1.rhs - soln2.rhs, len(constants1))
if len(constants1) == 1:
constants1_new = {constants1_new}
for c_old, c_new in zip(constants1, constants1_new):
soln1 = soln1.subs(c_old, c_new)
lhs = soln1.rhs.doit()
rhs = soln2.rhs.doit()
eqns = [Eq(lhs, rhs)]
for n in range(1, order):
lhs = lhs.diff(var)
rhs = rhs.diff(var)
eq = Eq(lhs, rhs)
eqns.append(eq)
if any(isinstance(eq, BooleanFalse) for eq in eqns):
return False
eqns = [eq for eq in eqns if not isinstance(eq, BooleanTrue)]
constant_solns = solve(eqns, constants2)
if isinstance(constant_solns, dict):
constant_solns = [constant_solns]
# x then there exists constants for soln2 that give soln1
for constant_soln in constant_solns:
if not any(c.has(var) for c in constant_soln.values()):
return True
return False
def _nth_linear_match(eq, func, order):
x = func.args[0]
one_x = {x}
terms = {i: S.Zero for i in range(-1, order + 1)}
for i in Add.make_args(eq):
if not i.has(func):
terms[-1] += i
else:
c, f = i.as_independent(func)
if (isinstance(f, Derivative)
and set(f.variables) == one_x
and f.args[0] == func):
terms[f.derivative_count] += c
elif f == func:
terms[len(f.args[1:])] += c
else:
return None
return terms
def ode_nth_linear_euler_eq_homogeneous(eq, func, order, match, returns='sol'):
global collectterms
collectterms = []
x = func.args[0]
f = func.func
r = match
# First, set up characteristic equation.
chareq, symbol = S.Zero, Dummy('x')
for i in r.keys():
if not isinstance(i, string_types) and i >= 0:
chareq += (r[i]*diff(x**symbol, x, i)*x**-symbol).expand()
chareq = Poly(chareq, symbol)
chareqroots = [rootof(chareq, k) for k in range(chareq.degree())]
# A generator of constants
constants = list(get_numbered_constants(eq, num=chareq.degree()*2))
constants.reverse()
# Create a dict root: multiplicity or charroots
charroots = defaultdict(int)
for root in chareqroots:
charroots[root] += 1
gsol = S(0)
# We need keep track of terms so we can run collect() at the end.
# This is necessary for constantsimp to work properly.
ln = log
for root, multiplicity in charroots.items():
for i in range(multiplicity):
if isinstance(root, RootOf):
gsol += (x**root) * constants.pop()
if multiplicity != 1:
raise ValueError("Value should be 1")
collectterms = [(0, root, 0)] + collectterms
elif root.is_real:
gsol += ln(x)**i*(x**root) * constants.pop()
collectterms = [(i, root, 0)] + collectterms
else:
reroot = re(root)
imroot = im(root)
gsol += ln(x)**i * (x**reroot) * (
constants.pop() * sin(abs(imroot)*ln(x))
+ constants.pop() * cos(imroot*ln(x)))
# Preserve ordering (multiplicity, real part, imaginary part)
# It will be assumed implicitly when constructing
# fundamental solution sets.
collectterms = [(i, reroot, imroot)] + collectterms
if returns == 'sol':
return Eq(f(x), gsol)
elif returns in ('list' 'both'):
# HOW TO TEST THIS CODE? (dsolve does not pass 'returns' through)
# Create a list of (hopefully) linearly independent solutions
gensols = []
# Keep track of when to use sin or cos for nonzero imroot
for i, reroot, imroot in collectterms:
if imroot == 0:
gensols.append(ln(x)**i*x**reroot)
else:
sin_form = ln(x)**i*x**reroot*sin(abs(imroot)*ln(x))
if sin_form in gensols:
cos_form = ln(x)**i*x**reroot*cos(imroot*ln(x))
gensols.append(cos_form)
else:
gensols.append(sin_form)
if returns == 'list':
return gensols
else:
return {'sol': Eq(f(x), gsol), 'list': gensols}
else:
raise ValueError('Unknown value for key "returns".')
def ode_nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients(eq, func, order, match, returns='sol'):
x = func.args[0]
f = func.func
r = match
chareq, eq, symbol = S.Zero, S.Zero, Dummy('x')
for i in r.keys():
if not isinstance(i, string_types) and i >= 0:
chareq += (r[i]*diff(x**symbol, x, i)*x**-symbol).expand()
for i in range(1,degree(Poly(chareq, symbol))+1):
eq += chareq.coeff(symbol**i)*diff(f(x), x, i)
if chareq.as_coeff_add(symbol)[0]:
eq += chareq.as_coeff_add(symbol)[0]*f(x)
e, re = posify(r[-1].subs(x, exp(x)))
eq += e.subs(re)
match = _nth_linear_match(eq, f(x), ode_order(eq, f(x)))
match['trialset'] = r['trialset']
return ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match).subs(x, log(x)).subs(f(log(x)), f(x)).expand()
def ode_nth_linear_euler_eq_nonhomogeneous_variation_of_parameters(eq, func, order, match, returns='sol'):
x = func.args[0]
f = func.func
r = match
gensol = ode_nth_linear_euler_eq_homogeneous(eq, func, order, match, returns='both')
match.update(gensol)
r[-1] = r[-1]/r[ode_order(eq, f(x))]
sol = _solve_variation_of_parameters(eq, func, order, match)
return Eq(f(x), r['sol'].rhs + (sol.rhs - r['sol'].rhs)*r[ode_order(eq, f(x))])
def ode_almost_linear(eq, func, order, match):
# Since ode_1st_linear has already been implemented, and the
# coefficients have been modified to the required form in
# classify_ode, just passing eq, func, order and match to
# ode_1st_linear will give the required output.
return ode_1st_linear(eq, func, order, match)
def _linear_coeff_match(expr, func):
f = func.func
x = func.args[0]
def abc(eq):
eq = _mexpand(eq)
c = eq.as_independent(x, f(x), as_Add=True)[0]
if not c.is_Rational:
return
a = eq.coeff(x)
if not a.is_Rational:
return
b = eq.coeff(f(x))
if not b.is_Rational:
return
if eq == a*x + b*f(x) + c:
return a, b, c
def match(arg):
n, d = arg.together().as_numer_denom()
m = abc(n)
if m is not None:
a1, b1, c1 = m
m = abc(d)
if m is not None:
a2, b2, c2 = m
d = a2*b1 - a1*b2
if (c1 or c2) and d:
return a1, b1, c1, a2, b2, c2, d
m = [fi.args[0] for fi in expr.atoms(Function) if fi.func != f and
len(fi.args) == 1 and not fi.args[0].is_Function] or {expr}
m1 = match(m.pop())
if m1 and all(match(mi) == m1 for mi in m):
a1, b1, c1, a2, b2, c2, denom = m1
return (b2*c1 - b1*c2)/denom, (a1*c2 - a2*c1)/denom
def ode_linear_coefficients(eq, func, order, match):
return ode_1st_homogeneous_coeff_best(eq, func, order, match)
def ode_separable_reduced(eq, func, order, match):
# Arguments are passed in a way so that they are coherent with the
# ode_separable function
x = func.args[0]
f = func.func
y = Dummy('y')
u = match['u'].subs(match['t'], y)
ycoeff = 1/(y*(match['power'] - u))
m1 = {y: 1, x: -1/x, 'coeff': 1}
m2 = {y: ycoeff, x: 1, 'coeff': 1}
r = {'m1': m1, 'm2': m2, 'y': y, 'hint': x**match['power']*f(x)}
return ode_separable(eq, func, order, r)
def ode_1st_power_series(eq, func, order, match):
x = func.args[0]
y = match['y']
f = func.func
h = -match[match['d']]/match[match['e']]
point = match.get('f0')
value = match.get('f0val')
terms = match.get('terms')
# First term
F = h
if not h:
return Eq(f(x), value)
# Initialization
series = value
if terms > 1:
hc = h.subs({x: point, y: value})
if hc.has(oo) or hc.has(NaN) or hc.has(zoo):
# Derivative does not exist, not analytic
return Eq(f(x), oo)
elif hc:
series += hc*(x - point)
for factcount in range(2, terms):
Fnew = F.diff(x) + F.diff(y)*h
Fnewc = Fnew.subs({x: point, y: value})
# Same logic as above
if Fnewc.has(oo) or Fnewc.has(NaN) or Fnewc.has(-oo) or Fnewc.has(zoo):
return Eq(f(x), oo)
series += Fnewc*((x - point)**factcount)/factorial(factcount)
F = Fnew
series += Order(x**terms)
return Eq(f(x), series)
def ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='sol'):
x = func.args[0]
f = func.func
r = match
# First, set up characteristic equation.
chareq, symbol = S.Zero, Dummy('x')
for i in r.keys():
if type(i) == str or i < 0:
pass
else:
chareq += r[i]*symbol**i
chareq = Poly(chareq, symbol)
# Can't just call roots because it doesn't return rootof for unsolveable
# polynomials.
chareqroots = roots(chareq, multiple=True)
if len(chareqroots) != order:
chareqroots = [rootof(chareq, k) for k in range(chareq.degree())]
chareq_is_complex = not all([i.is_real for i in chareq.all_coeffs()])
# A generator of constants
constants = list(get_numbered_constants(eq, num=chareq.degree()*2))
# Create a dict root: multiplicity or charroots
charroots = defaultdict(int)
for root in chareqroots:
charroots[root] += 1
# We need to keep track of terms so we can run collect() at the end.
# This is necessary for constantsimp to work properly.
global collectterms
collectterms = []
gensols = []
conjugate_roots = [] # used to prevent double-use of conjugate roots
# Loop over roots in theorder provided by roots/rootof...
for root in chareqroots:
# but don't repoeat multiple roots.
if root not in charroots:
continue
multiplicity = charroots.pop(root)
for i in range(multiplicity):
if chareq_is_complex:
gensols.append(x**i*exp(root*x))
collectterms = [(i, root, 0)] + collectterms
continue
reroot = re(root)
imroot = im(root)
if imroot.has(atan2) and reroot.has(atan2):
gensols.append(x**i*exp(root*x))
collectterms = [(i, root, 0)] + collectterms
else:
if root in conjugate_roots:
collectterms = [(i, reroot, imroot)] + collectterms
continue
if imroot == 0:
gensols.append(x**i*exp(reroot*x))
collectterms = [(i, reroot, 0)] + collectterms
continue
conjugate_roots.append(conjugate(root))
gensols.append(x**i*exp(reroot*x) * sin(abs(imroot) * x))
gensols.append(x**i*exp(reroot*x) * cos( imroot * x))
collectterms = [(i, reroot, imroot)] + collectterms
if returns == 'list':
return gensols
elif returns in ('sol' 'both'):
gsol = Add(*[i*j for (i, j) in zip(constants, gensols)])
if returns == 'sol':
return Eq(f(x), gsol)
else:
return {'sol': Eq(f(x), gsol), 'list': gensols}
else:
raise ValueError('Unknown value for key "returns".')
def ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match):
gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='both')
match.update(gensol)
return _solve_undetermined_coefficients(eq, func, order, match)
def _solve_undetermined_coefficients(eq, func, order, match):
x = func.args[0]
f = func.func
r = match
coeffs = numbered_symbols('a', cls=Dummy)
coefflist = []
gensols = r['list']
gsol = r['sol']
trialset = r['trialset']
notneedset = set([])
global collectterms
if len(gensols) != order:
raise NotImplementedError("Cannot find " + str(order) +
" solutions to the homogeneous equation necessary to apply" +
" undetermined coefficients to " + str(eq) +
" (number of terms != order)")
usedsin = set([])
mult = 0
getmult = True
for i, reroot, imroot in collectterms:
if getmult:
mult = i + 1
getmult = False
if i == 0:
getmult = True
if imroot:
if (i, reroot) in usedsin:
check = x**i*exp(reroot*x)*cos(imroot*x)
else:
check = x**i*exp(reroot*x)*sin(abs(imroot)*x)
usedsin.add((i, reroot))
else:
check = x**i*exp(reroot*x)
if check in trialset:
# checking for the coefficients on those elements, since we
# already know it will be 0.
while True:
if check*x**mult in trialset:
mult += 1
else:
break
trialset.add(check*x**mult)
notneedset.add(check)
newtrialset = trialset - notneedset
trialfunc = 0
for i in newtrialset:
c = next(coeffs)
coefflist.append(c)
trialfunc += c*i
eqs = sub_func_doit(eq, f(x), trialfunc)
coeffsdict = dict(list(zip(trialset, [0]*(len(trialset) + 1))))
eqs = _mexpand(eqs)
for i in Add.make_args(eqs):
s = separatevars(i, dict=True, symbols=[x])
coeffsdict[s[x]] += s['coeff']
coeffvals = solve(list(coeffsdict.values()), coefflist)
if not coeffvals:
raise NotImplementedError(
"Could not solve `%s` using the "
"method of undetermined coefficients "
"(unable to solve for coefficients)." % eq)
psol = trialfunc.subs(coeffvals)
return Eq(f(x), gsol.rhs + psol)
def _undetermined_coefficients_match(expr, x):
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1)
retdict = {}
def _test_term(expr, x):
if not expr.has(x):
return True
elif expr.is_Add:
return all(_test_term(i, x) for i in expr.args)
elif expr.is_Mul:
if expr.has(sin, cos):
foundtrig = False
# Make sure that there is only one trig function in the args.
# See the docstring.
for i in expr.args:
if i.has(sin, cos):
if foundtrig:
return False
else:
foundtrig = True
return all(_test_term(i, x) for i in expr.args)
elif expr.is_Function:
if expr.func in (sin, cos, exp):
if expr.args[0].match(a*x + b):
return True
else:
return False
else:
return False
elif expr.is_Pow and expr.base.is_Symbol and expr.exp.is_Integer and \
expr.exp >= 0:
return True
elif expr.is_Pow and expr.base.is_number:
if expr.exp.match(a*x + b):
return True
else:
return False
elif expr.is_Symbol or expr.is_number:
return True
else:
return False
def _get_trial_set(expr, x, exprs=set([])):
def _remove_coefficient(expr, x):
term = S.One
if expr.is_Mul:
for i in expr.args:
if i.has(x):
term *= i
elif expr.has(x):
term = expr
return term
expr = expand_mul(expr)
if expr.is_Add:
for term in expr.args:
if _remove_coefficient(term, x) in exprs:
pass
else:
exprs.add(_remove_coefficient(term, x))
exprs = exprs.union(_get_trial_set(term, x, exprs))
else:
term = _remove_coefficient(expr, x)
tmpset = exprs.union({term})
oldset = set([])
while tmpset != oldset:
# If you get stuck in this loop, then _test_term is probably
# broken
oldset = tmpset.copy()
expr = expr.diff(x)
term = _remove_coefficient(expr, x)
if term.is_Add:
tmpset = tmpset.union(_get_trial_set(term, x, tmpset))
else:
tmpset.add(term)
exprs = tmpset
return exprs
retdict['test'] = _test_term(expr, x)
if retdict['test']:
# Try to generate a list of trial solutions that will have the
# undetermined coefficients. Note that if any of these are not linearly
# independent with any of the solutions to the homogeneous equation,
# then they will need to be multiplied by sufficient x to make them so.
# This function DOES NOT do that (it doesn't even look at the
retdict['trialset'] = _get_trial_set(expr, x)
return retdict
def ode_nth_linear_constant_coeff_variation_of_parameters(eq, func, order, match):
gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='both')
match.update(gensol)
return _solve_variation_of_parameters(eq, func, order, match)
def _solve_variation_of_parameters(eq, func, order, match):
x = func.args[0]
f = func.func
r = match
psol = 0
gensols = r['list']
gsol = r['sol']
wr = wronskian(gensols, x)
if r.get('simplify', True):
wr = simplify(wr)
wr = trigsimp(wr, deep=True, recursive=True)
if not wr:
raise NotImplementedError("Cannot find " + str(order) +
" solutions to the homogeneous equation necessary to apply " +
"variation of parameters to " + str(eq) + " (Wronskian == 0)")
if len(gensols) != order:
raise NotImplementedError("Cannot find " + str(order) +
" solutions to the homogeneous equation necessary to apply " +
"variation of parameters to " +
str(eq) + " (number of terms != order)")
negoneterm = (-1)**(order)
for i in gensols:
psol += negoneterm*Integral(wronskian([sol for sol in gensols if sol != i], x)*r[-1]/wr, x)*i/r[order]
negoneterm *= -1
if r.get('simplify', True):
psol = simplify(psol)
psol = trigsimp(psol, deep=True)
return Eq(f(x), gsol.rhs + psol)
def ode_separable(eq, func, order, match):
x = func.args[0]
f = func.func
C1 = get_numbered_constants(eq, num=1)
r = match
u = r.get('hint', f(x))
return Eq(Integral(r['m2']['coeff']*r['m2'][r['y']]/r['m1'][r['y']],
(r['y'], None, u)), Integral(-r['m1']['coeff']*r['m1'][x]/
r['m2'][x], x) + C1)
def checkinfsol(eq, infinitesimals, func=None, order=None):
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
if not func:
eq, func = _preprocess(eq)
variables = func.args
if len(variables) != 1:
raise ValueError("ODE's have only one independent variable")
else:
x = variables[0]
if not order:
order = ode_order(eq, func)
if order != 1:
raise NotImplementedError("Lie groups solver has been implemented "
"only for first order differential equations")
else:
df = func.diff(x)
a = Wild('a', exclude = [df])
b = Wild('b', exclude = [df])
match = collect(expand(eq), df).match(a*df + b)
if match:
h = -simplify(match[b]/match[a])
else:
try:
sol = solve(eq, df)
except NotImplementedError:
raise NotImplementedError("Infinitesimals for the "
"first order ODE could not be found")
else:
h = sol[0] # Find infinitesimals for one solution
y = Dummy('y')
h = h.subs(func, y)
xi = Function('xi')(x, y)
eta = Function('eta')(x, y)
dxi = Function('xi')(x, func)
deta = Function('eta')(x, func)
pde = (eta.diff(x) + (eta.diff(y) - xi.diff(x))*h -
(xi.diff(y))*h**2 - xi*(h.diff(x)) - eta*(h.diff(y)))
soltup = []
for sol in infinitesimals:
tsol = {xi: S(sol[dxi]).subs(func, y),
eta: S(sol[deta]).subs(func, y)}
sol = simplify(pde.subs(tsol).doit())
if sol:
soltup.append((False, sol.subs(y, func)))
else:
soltup.append((True, 0))
return soltup
def ode_lie_group(eq, func, order, match):
heuristics = lie_heuristics
inf = {}
f = func.func
x = func.args[0]
df = func.diff(x)
xi = Function("xi")
eta = Function("eta")
xis = match.pop('xi')
etas = match.pop('eta')
if match:
h = -simplify(match[match['d']]/match[match['e']])
y = match['y']
else:
try:
sol = solve(eq, df)
if sol == []:
raise NotImplementedError
except NotImplementedError:
raise NotImplementedError("Unable to solve the differential equation " +
str(eq) + " by the lie group method")
else:
y = Dummy("y")
h = sol[0].subs(func, y)
if xis is not None and etas is not None:
inf = [{xi(x, f(x)): S(xis), eta(x, f(x)): S(etas)}]
if not checkinfsol(eq, inf, func=f(x), order=1)[0][0]:
raise ValueError("The given infinitesimals xi and eta"
" are not the infinitesimals to the given equation")
else:
heuristics = ["user_defined"]
match = {'h': h, 'y': y}
# This is done so that if:
# a] solve raises a NotImplementedError.
# b] any heuristic raises a ValueError
# another heuristic can be used.
tempsol = [] # Used by solve below
for heuristic in heuristics:
try:
if not inf:
inf = infinitesimals(eq, hint=heuristic, func=func, order=1, match=match)
except ValueError:
continue
else:
for infsim in inf:
xiinf = (infsim[xi(x, func)]).subs(func, y)
etainf = (infsim[eta(x, func)]).subs(func, y)
# This condition creates recursion while using pdsolve.
# Since the first step while solving a PDE of form
# a*(f(x, y).diff(x)) + b*(f(x, y).diff(y)) + c = 0
# is to solve the ODE dy/dx = b/a
if simplify(etainf/xiinf) == h:
continue
rpde = f(x, y).diff(x)*xiinf + f(x, y).diff(y)*etainf
r = pdsolve(rpde, func=f(x, y)).rhs
s = pdsolve(rpde - 1, func=f(x, y)).rhs
newcoord = [_lie_group_remove(coord) for coord in [r, s]]
r = Dummy("r")
s = Dummy("s")
C1 = Symbol("C1")
rcoord = newcoord[0]
scoord = newcoord[-1]
try:
sol = solve([r - rcoord, s - scoord], x, y, dict=True)
except NotImplementedError:
continue
else:
sol = sol[0]
xsub = sol[x]
ysub = sol[y]
num = simplify(scoord.diff(x) + scoord.diff(y)*h)
denom = simplify(rcoord.diff(x) + rcoord.diff(y)*h)
if num and denom:
diffeq = simplify((num/denom).subs([(x, xsub), (y, ysub)]))
sep = separatevars(diffeq, symbols=[r, s], dict=True)
if sep:
# Trying to separate, r and s coordinates
deq = integrate((1/sep[s]), s) + C1 - integrate(sep['coeff']*sep[r], r)
# Substituting and reverting back to original coordinates
deq = deq.subs([(r, rcoord), (s, scoord)])
try:
sdeq = solve(deq, y)
except NotImplementedError:
tempsol.append(deq)
else:
if len(sdeq) == 1:
return Eq(f(x), sdeq.pop())
else:
return [Eq(f(x), sol) for sol in sdeq]
elif denom: # (ds/dr) is zero which means s is constant
return Eq(f(x), solve(scoord - C1, y)[0])
elif num: # (dr/ds) is zero which means r is constant
return Eq(f(x), solve(rcoord - C1, y)[0])
# If nothing works, return solution as it is, without solving for y
if tempsol:
if len(tempsol) == 1:
return Eq(tempsol.pop().subs(y, f(x)), 0)
else:
return [Eq(sol.subs(y, f(x)), 0) for sol in tempsol]
raise NotImplementedError("The given ODE " + str(eq) + " cannot be solved by"
+ " the lie group method")
def _lie_group_remove(coords):
if isinstance(coords, AppliedUndef):
return coords.args[0]
elif coords.is_Add:
subfunc = coords.atoms(AppliedUndef)
if subfunc:
for func in subfunc:
coords = coords.subs(func, 0)
return coords
elif coords.is_Pow:
base, expr = coords.as_base_exp()
base = _lie_group_remove(base)
expr = _lie_group_remove(expr)
return base**expr
elif coords.is_Mul:
mulargs = []
coordargs = coords.args
for arg in coordargs:
if not isinstance(coords, AppliedUndef):
mulargs.append(_lie_group_remove(arg))
return Mul(*mulargs)
return coords
def infinitesimals(eq, func=None, order=None, hint='default', match=None):
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
if not func:
eq, func = _preprocess(eq)
variables = func.args
if len(variables) != 1:
raise ValueError("ODE's have only one independent variable")
else:
x = variables[0]
if not order:
order = ode_order(eq, func)
if order != 1:
raise NotImplementedError("Infinitesimals for only "
"first order ODE's have been implemented")
else:
df = func.diff(x)
# Matching differential equation of the form a*df + b
a = Wild('a', exclude = [df])
b = Wild('b', exclude = [df])
if match: # Used by lie_group hint
h = match['h']
y = match['y']
else:
match = collect(expand(eq), df).match(a*df + b)
if match:
h = -simplify(match[b]/match[a])
else:
try:
sol = solve(eq, df)
except NotImplementedError:
raise NotImplementedError("Infinitesimals for the "
"first order ODE could not be found")
else:
h = sol[0] # Find infinitesimals for one solution
y = Dummy("y")
h = h.subs(func, y)
u = Dummy("u")
hx = h.diff(x)
hy = h.diff(y)
hinv = ((1/h).subs([(x, u), (y, x)])).subs(u, y) # Inverse ODE
match = {'h': h, 'func': func, 'hx': hx, 'hy': hy, 'y': y, 'hinv': hinv}
if hint == 'all':
xieta = []
for heuristic in lie_heuristics:
function = globals()['lie_heuristic_' + heuristic]
inflist = function(match, comp=True)
if inflist:
xieta.extend([inf for inf in inflist if inf not in xieta])
if xieta:
return xieta
else:
raise NotImplementedError("Infinitesimals could not be found for "
"the given ODE")
elif hint == 'default':
for heuristic in lie_heuristics:
function = globals()['lie_heuristic_' + heuristic]
xieta = function(match, comp=False)
if xieta:
return xieta
raise NotImplementedError("Infinitesimals could not be found for"
" the given ODE")
elif hint not in lie_heuristics:
raise ValueError("Heuristic not recognized: " + hint)
else:
function = globals()['lie_heuristic_' + hint]
xieta = function(match, comp=True)
if xieta:
return xieta
else:
raise ValueError("Infinitesimals could not be found using the"
" given heuristic")
def lie_heuristic_abaco1_simple(match, comp=False):
xieta = []
y = match['y']
h = match['h']
func = match['func']
x = func.args[0]
hx = match['hx']
hy = match['hy']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
hysym = hy.free_symbols
if y not in hysym:
try:
fx = exp(integrate(hy, x))
except NotImplementedError:
pass
else:
inf = {xi: S(0), eta: fx}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
factor = hy/h
facsym = factor.free_symbols
if x not in facsym:
try:
fy = exp(integrate(factor, y))
except NotImplementedError:
pass
else:
inf = {xi: S(0), eta: fy.subs(y, func)}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
factor = -hx/h
facsym = factor.free_symbols
if y not in facsym:
try:
fx = exp(integrate(factor, x))
except NotImplementedError:
pass
else:
inf = {xi: fx, eta: S(0)}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
factor = -hx/(h**2)
facsym = factor.free_symbols
if x not in facsym:
try:
fy = exp(integrate(factor, y))
except NotImplementedError:
pass
else:
inf = {xi: fy.subs(y, func), eta: S(0)}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
if xieta:
return xieta
def lie_heuristic_abaco1_product(match, comp=False):
xieta = []
y = match['y']
h = match['h']
hinv = match['hinv']
func = match['func']
x = func.args[0]
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
inf = separatevars(((log(h).diff(y)).diff(x))/h**2, dict=True, symbols=[x, y])
if inf and inf['coeff']:
fx = inf[x]
gy = simplify(fx*((1/(fx*h)).diff(x)))
gysyms = gy.free_symbols
if x not in gysyms:
gy = exp(integrate(gy, y))
inf = {eta: S(0), xi: (fx*gy).subs(y, func)}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
u1 = Dummy("u1")
inf = separatevars(((log(hinv).diff(y)).diff(x))/hinv**2, dict=True, symbols=[x, y])
if inf and inf['coeff']:
fx = inf[x]
gy = simplify(fx*((1/(fx*hinv)).diff(x)))
gysyms = gy.free_symbols
if x not in gysyms:
gy = exp(integrate(gy, y))
etaval = fx*gy
etaval = (etaval.subs([(x, u1), (y, x)])).subs(u1, y)
inf = {eta: etaval.subs(y, func), xi: S(0)}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
if xieta:
return xieta
def lie_heuristic_bivariate(match, comp=False):
h = match['h']
hx = match['hx']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
if h.is_rational_function():
# The maximum degree that the infinitesimals can take is
# calculated by this technique.
etax, etay, etad, xix, xiy, xid = symbols("etax etay etad xix xiy xid")
ipde = etax + (etay - xix)*h - xiy*h**2 - xid*hx - etad*hy
num, denom = cancel(ipde).as_numer_denom()
deg = Poly(num, x, y).total_degree()
deta = Function('deta')(x, y)
dxi = Function('dxi')(x, y)
ipde = (deta.diff(x) + (deta.diff(y) - dxi.diff(x))*h - (dxi.diff(y))*h**2
- dxi*hx - deta*hy)
xieq = Symbol("xi0")
etaeq = Symbol("eta0")
for i in range(deg + 1):
if i:
xieq += Add(*[
Symbol("xi_" + str(power) + "_" + str(i - power))*x**power*y**(i - power)
for power in range(i + 1)])
etaeq += Add(*[
Symbol("eta_" + str(power) + "_" + str(i - power))*x**power*y**(i - power)
for power in range(i + 1)])
pden, denom = (ipde.subs({dxi: xieq, deta: etaeq}).doit()).as_numer_denom()
pden = expand(pden)
# If the individual terms are monomials, the coefficients
# are grouped
if pden.is_polynomial(x, y) and pden.is_Add:
polyy = Poly(pden, x, y).as_dict()
if polyy:
symset = xieq.free_symbols.union(etaeq.free_symbols) - {x, y}
soldict = solve(polyy.values(), *symset)
if isinstance(soldict, list):
soldict = soldict[0]
if any(soldict.values()):
xired = xieq.subs(soldict)
etared = etaeq.subs(soldict)
# Scaling is done by substituting one for the parameters
# This can be any number except zero.
dict_ = dict((sym, 1) for sym in symset)
inf = {eta: etared.subs(dict_).subs(y, func),
xi: xired.subs(dict_).subs(y, func)}
return [inf]
def lie_heuristic_chi(match, comp=False):
h = match['h']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
if h.is_rational_function():
schi, schix, schiy = symbols("schi, schix, schiy")
cpde = schix + h*schiy - hy*schi
num, denom = cancel(cpde).as_numer_denom()
deg = Poly(num, x, y).total_degree()
chi = Function('chi')(x, y)
chix = chi.diff(x)
chiy = chi.diff(y)
cpde = chix + h*chiy - hy*chi
chieq = Symbol("chi")
for i in range(1, deg + 1):
chieq += Add(*[
Symbol("chi_" + str(power) + "_" + str(i - power))*x**power*y**(i - power)
for power in range(i + 1)])
cnum, cden = cancel(cpde.subs({chi : chieq}).doit()).as_numer_denom()
cnum = expand(cnum)
if cnum.is_polynomial(x, y) and cnum.is_Add:
cpoly = Poly(cnum, x, y).as_dict()
if cpoly:
solsyms = chieq.free_symbols - {x, y}
soldict = solve(cpoly.values(), *solsyms)
if isinstance(soldict, list):
soldict = soldict[0]
if any(soldict.values()):
chieq = chieq.subs(soldict)
dict_ = dict((sym, 1) for sym in solsyms)
chieq = chieq.subs(dict_)
# After finding chi, the main aim is to find out
# eta, xi by the equation eta = xi*h + chi
# One method to set xi, would be rearranging it to
# (eta/h) - xi = (chi/h). This would mean dividing
# chi by h would give -xi as the quotient and eta
# as the remainder. Thanks to Sean Vig for suggesting
# this method.
xic, etac = div(chieq, h)
inf = {eta: etac.subs(y, func), xi: -xic.subs(y, func)}
return [inf]
def lie_heuristic_function_sum(match, comp=False):
xieta = []
h = match['h']
func = match['func']
hinv = match['hinv']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
for odefac in [h, hinv]:
factor = odefac*((1/odefac).diff(x, 2))
sep = separatevars((1/factor).diff(y), dict=True, symbols=[x, y])
if sep and sep['coeff'] and sep[x].has(x) and sep[y].has(y):
k = Dummy("k")
try:
gy = k*integrate(sep[y], y)
except NotImplementedError:
pass
else:
fdd = 1/(k*sep[x]*sep['coeff'])
fx = simplify(fdd/factor - gy)
check = simplify(fx.diff(x, 2) - fdd)
if fx:
if not check:
fx = fx.subs(k, 1)
gy = (gy/k)
else:
sol = solve(check, k)
if sol:
sol = sol[0]
fx = fx.subs(k, sol)
gy = (gy/k)*sol
else:
continue
if odefac == hinv: # Inverse ODE
fx = fx.subs(x, y)
gy = gy.subs(y, x)
etaval = factor_terms(fx + gy)
if etaval.is_Mul:
etaval = Mul(*[arg for arg in etaval.args if arg.has(x, y)])
if odefac == hinv: # Inverse ODE
inf = {eta: etaval.subs(y, func), xi : S(0)}
else:
inf = {xi: etaval.subs(y, func), eta : S(0)}
if not comp:
return [inf]
else:
xieta.append(inf)
if xieta:
return xieta
def lie_heuristic_abaco2_similar(match, comp=False):
h = match['h']
hx = match['hx']
hy = match['hy']
func = match['func']
hinv = match['hinv']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
factor = cancel(h.diff(y)/h.diff(y, 2))
factorx = factor.diff(x)
factory = factor.diff(y)
if not factor.has(x) and not factor.has(y):
A = Wild('A', exclude=[y])
B = Wild('B', exclude=[y])
C = Wild('C', exclude=[x, y])
match = h.match(A + B*exp(y/C))
try:
tau = exp(-integrate(match[A]/match[C]), x)/match[B]
except NotImplementedError:
pass
else:
gx = match[A]*tau
return [{xi: tau, eta: gx}]
else:
gamma = cancel(factorx/factory)
if not gamma.has(y):
tauint = cancel((gamma*hy - gamma.diff(x) - hx)/(h + gamma))
if not tauint.has(y):
try:
tau = exp(integrate(tauint, x))
except NotImplementedError:
pass
else:
gx = -tau*gamma
return [{xi: tau, eta: gx}]
factor = cancel(hinv.diff(y)/hinv.diff(y, 2))
factorx = factor.diff(x)
factory = factor.diff(y)
if not factor.has(x) and not factor.has(y):
A = Wild('A', exclude=[y])
B = Wild('B', exclude=[y])
C = Wild('C', exclude=[x, y])
match = h.match(A + B*exp(y/C))
try:
tau = exp(-integrate(match[A]/match[C]), x)/match[B]
except NotImplementedError:
pass
else:
gx = match[A]*tau
return [{eta: tau.subs(x, func), xi: gx.subs(x, func)}]
else:
gamma = cancel(factorx/factory)
if not gamma.has(y):
tauint = cancel((gamma*hinv.diff(y) - gamma.diff(x) - hinv.diff(x))/(
hinv + gamma))
if not tauint.has(y):
try:
tau = exp(integrate(tauint, x))
except NotImplementedError:
pass
else:
gx = -tau*gamma
return [{eta: tau.subs(x, func), xi: gx.subs(x, func)}]
def lie_heuristic_abaco2_unique_unknown(match, comp=False):
h = match['h']
hx = match['hx']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
funclist = []
for atom in h.atoms(Pow):
base, exp = atom.as_base_exp()
if base.has(x) and base.has(y):
if not exp.is_Integer:
funclist.append(atom)
for function in h.atoms(AppliedUndef):
syms = function.free_symbols
if x in syms and y in syms:
funclist.append(function)
for f in funclist:
frac = cancel(f.diff(y)/f.diff(x))
sep = separatevars(frac, dict=True, symbols=[x, y])
if sep and sep['coeff']:
xitry1 = sep[x]
etatry1 = -1/(sep[y]*sep['coeff'])
pde1 = etatry1.diff(y)*h - xitry1.diff(x)*h - xitry1*hx - etatry1*hy
if not simplify(pde1):
return [{xi: xitry1, eta: etatry1.subs(y, func)}]
xitry2 = 1/etatry1
etatry2 = 1/xitry1
pde2 = etatry2.diff(x) - (xitry2.diff(y))*h**2 - xitry2*hx - etatry2*hy
if not simplify(expand(pde2)):
return [{xi: xitry2.subs(y, func), eta: etatry2}]
else:
etatry = -1/frac
pde = etatry.diff(x) + etatry.diff(y)*h - hx - etatry*hy
if not simplify(pde):
return [{xi: S(1), eta: etatry.subs(y, func)}]
xitry = -frac
pde = -xitry.diff(x)*h -xitry.diff(y)*h**2 - xitry*hx -hy
if not simplify(expand(pde)):
return [{xi: xitry.subs(y, func), eta: S(1)}]
def lie_heuristic_abaco2_unique_general(match, comp=False):
hx = match['hx']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
A = hx.diff(y)
B = hy.diff(y) + hy**2
C = hx.diff(x) - hx**2
if not (A and B and C):
return
Ax = A.diff(x)
Ay = A.diff(y)
Axy = Ax.diff(y)
Axx = Ax.diff(x)
Ayy = Ay.diff(y)
D = simplify(2*Axy + hx*Ay - Ax*hy + (hx*hy + 2*A)*A)*A - 3*Ax*Ay
if not D:
E1 = simplify(3*Ax**2 + ((hx**2 + 2*C)*A - 2*Axx)*A)
if E1:
E2 = simplify((2*Ayy + (2*B - hy**2)*A)*A - 3*Ay**2)
if not E2:
E3 = simplify(
E1*((28*Ax + 4*hx*A)*A**3 - E1*(hy*A + Ay)) - E1.diff(x)*8*A**4)
if not E3:
etaval = cancel((4*A**3*(Ax - hx*A) + E1*(hy*A - Ay))/(S(2)*A*E1))
if x not in etaval:
try:
etaval = exp(integrate(etaval, y))
except NotImplementedError:
pass
else:
xival = -4*A**3*etaval/E1
if y not in xival:
return [{xi: xival, eta: etaval.subs(y, func)}]
else:
E1 = simplify((2*Ayy + (2*B - hy**2)*A)*A - 3*Ay**2)
if E1:
E2 = simplify(
4*A**3*D - D**2 + E1*((2*Axx - (hx**2 + 2*C)*A)*A - 3*Ax**2))
if not E2:
E3 = simplify(
-(A*D)*E1.diff(y) + ((E1.diff(x) - hy*D)*A + 3*Ay*D +
(A*hx - 3*Ax)*E1)*E1)
if not E3:
etaval = cancel(((A*hx - Ax)*E1 - (Ay + A*hy)*D)/(S(2)*A*D))
if x not in etaval:
try:
etaval = exp(integrate(etaval, y))
except NotImplementedError:
pass
else:
xival = -E1*etaval/D
if y not in xival:
return [{xi: xival, eta: etaval.subs(y, func)}]
def lie_heuristic_linear(match, comp=False):
h = match['h']
hx = match['hx']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
coeffdict = {}
symbols = numbered_symbols("c", cls=Dummy)
symlist = [next(symbols) for _ in islice(symbols, 6)]
C0, C1, C2, C3, C4, C5 = symlist
pde = C3 + (C4 - C0)*h - (C0*x + C1*y + C2)*hx - (C3*x + C4*y + C5)*hy - C1*h**2
pde, denom = pde.as_numer_denom()
pde = powsimp(expand(pde))
if pde.is_Add:
terms = pde.args
for term in terms:
if term.is_Mul:
rem = Mul(*[m for m in term.args if not m.has(x, y)])
xypart = term/rem
if xypart not in coeffdict:
coeffdict[xypart] = rem
else:
coeffdict[xypart] += rem
else:
if term not in coeffdict:
coeffdict[term] = S(1)
else:
coeffdict[term] += S(1)
sollist = coeffdict.values()
soldict = solve(sollist, symlist)
if soldict:
if isinstance(soldict, list):
soldict = soldict[0]
subval = soldict.values()
if any(t for t in subval):
onedict = dict(zip(symlist, [1]*6))
xival = C0*x + C1*func + C2
etaval = C3*x + C4*func + C5
xival = xival.subs(soldict)
etaval = etaval.subs(soldict)
xival = xival.subs(onedict)
etaval = etaval.subs(onedict)
return [{xi: xival, eta: etaval}]
def sysode_linear_2eq_order1(match_):
x = match_['func'][0].func
y = match_['func'][1].func
func = match_['func']
fc = match_['func_coeff']
eq = match_['eq']
r = dict()
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
for i in range(2):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
# for equations Eq(a1*diff(x(t),t), a*x(t) + b*y(t) + k1)
# and Eq(a2*diff(x(t),t), c*x(t) + d*y(t) + k2)
r['a'] = -fc[0,x(t),0]/fc[0,x(t),1]
r['c'] = -fc[1,x(t),0]/fc[1,y(t),1]
r['b'] = -fc[0,y(t),0]/fc[0,x(t),1]
r['d'] = -fc[1,y(t),0]/fc[1,y(t),1]
forcing = [S(0),S(0)]
for i in range(2):
for j in Add.make_args(eq[i]):
if not j.has(x(t), y(t)):
forcing[i] += j
if not (forcing[0].has(t) or forcing[1].has(t)):
r['k1'] = forcing[0]
r['k2'] = forcing[1]
else:
raise NotImplementedError("Only homogeneous problems are supported" +
" (and constant inhomogeneity)")
if match_['type_of_equation'] == 'type1':
sol = _linear_2eq_order1_type1(x, y, t, r, eq)
if match_['type_of_equation'] == 'type2':
gsol = _linear_2eq_order1_type1(x, y, t, r, eq)
psol = _linear_2eq_order1_type2(x, y, t, r, eq)
sol = [Eq(x(t), gsol[0].rhs+psol[0]), Eq(y(t), gsol[1].rhs+psol[1])]
if match_['type_of_equation'] == 'type3':
sol = _linear_2eq_order1_type3(x, y, t, r, eq)
if match_['type_of_equation'] == 'type4':
sol = _linear_2eq_order1_type4(x, y, t, r, eq)
if match_['type_of_equation'] == 'type5':
sol = _linear_2eq_order1_type5(x, y, t, r, eq)
if match_['type_of_equation'] == 'type6':
sol = _linear_2eq_order1_type6(x, y, t, r, eq)
if match_['type_of_equation'] == 'type7':
sol = _linear_2eq_order1_type7(x, y, t, r, eq)
return sol
def _linear_2eq_order1_type1(x, y, t, r, eq):
C1, C2 = get_numbered_constants(eq, num=2)
a, b, c, d = r['a'], r['b'], r['c'], r['d']
real_coeff = all(v.is_real for v in (a, b, c, d))
D = (a - d)**2 + 4*b*c
l1 = (a + d + sqrt(D))/2
l2 = (a + d - sqrt(D))/2
equal_roots = Eq(D, 0).expand()
gsol1, gsol2 = [], []
# Solutions have exponential form if either D > 0 with real coefficients
# or D != 0 with complex coefficients. Eigenvalues are distinct.
# For each eigenvalue lam, pick an eigenvector, making sure we don't get (0, 0)
exponential_form = D > 0 if real_coeff else Not(equal_roots)
bad_ab_vector1 = And(Eq(b, 0), Eq(l1, a))
bad_ab_vector2 = And(Eq(b, 0), Eq(l2, a))
vector1 = Matrix((Piecewise((l1 - d, bad_ab_vector1), (b, True)),
Piecewise((c, bad_ab_vector1), (l1 - a, True))))
vector2 = Matrix((Piecewise((l2 - d, bad_ab_vector2), (b, True)),
Piecewise((c, bad_ab_vector2), (l2 - a, True))))
sol_vector = C1*exp(l1*t)*vector1 + C2*exp(l2*t)*vector2
gsol1.append((sol_vector[0], exponential_form))
gsol2.append((sol_vector[1], exponential_form))
trigonometric_form = D < 0 if real_coeff else False
sigma = re(l1)
if im(l1).is_positive:
beta = im(l1)
else:
beta = im(l2)
vector1 = Matrix((b, sigma - a))
vector2 = Matrix((0, beta))
sol_vector = exp(sigma*t) * (C1*(cos(beta*t)*vector1 - sin(beta*t)*vector2) + \
C2*(sin(beta*t)*vector1 + cos(beta*t)*vector2))
gsol1.append((sol_vector[0], trigonometric_form))
gsol2.append((sol_vector[1], trigonometric_form))
scalar_matrix = And(Eq(a, d), Eq(b, 0), Eq(c, 0))
vector1 = Matrix((S.One, S.Zero))
vector2 = Matrix((S.Zero, S.One))
sol_vector = exp(l1*t) * (C1*vector1 + C2*vector2)
gsol1.append((sol_vector[0], scalar_matrix))
gsol2.append((sol_vector[1], scalar_matrix))
vector1 = Matrix((Piecewise((l1 - d, bad_ab_vector1), (b, True)),
Piecewise((c, bad_ab_vector1), (l1 - a, True))))
vector2 = Matrix((Piecewise((S.One, bad_ab_vector1), (S.Zero, Eq(a, l1)),
(b/(a - l1), True)),
Piecewise((S.Zero, bad_ab_vector1), (S.One, Eq(a, l1)),
(S.Zero, True))))
sol_vector = exp(l1*t) * (C1*vector1 + C2*(vector2 + t*vector1))
gsol1.append((sol_vector[0], equal_roots))
gsol2.append((sol_vector[1], equal_roots))
return [Eq(x(t), Piecewise(*gsol1)), Eq(y(t), Piecewise(*gsol2))]
def _linear_2eq_order1_type2(x, y, t, r, eq):
r['k1'] = -r['k1']; r['k2'] = -r['k2']
if (r['a']*r['d'] - r['b']*r['c']) != 0:
x0, y0 = symbols('x0, y0', cls=Dummy)
sol = solve((r['a']*x0+r['b']*y0+r['k1'], r['c']*x0+r['d']*y0+r['k2']), x0, y0)
psol = [sol[x0], sol[y0]]
elif (r['a']*r['d'] - r['b']*r['c']) == 0 and (r['a']**2+r['b']**2) > 0:
k = r['c']/r['a']
sigma = r['a'] + r['b']*k
if sigma != 0:
sol1 = r['b']*sigma**-1*(r['k1']*k-r['k2'])*t - sigma**-2*(r['a']*r['k1']+r['b']*r['k2'])
sol2 = k*sol1 + (r['k2']-r['k1']*k)*t
else:
sol1 = r['b']*(r['k2']-r['k1']*k)*t**2 + r['k1']*t
sol2 = k*sol1 + (r['k2']-r['k1']*k)*t
psol = [sol1, sol2]
return psol
def _linear_2eq_order1_type3(x, y, t, r, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
F = Integral(r['a'], t)
G = Integral(r['b'], t)
sol1 = exp(F)*(C1*exp(G) + C2*exp(-G))
sol2 = exp(F)*(C1*exp(G) - C2*exp(-G))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order1_type4(x, y, t, r, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
if r['b'] == -r['c']:
F = exp(Integral(r['a'], t))
G = Integral(r['b'], t)
sol1 = F*(C1*cos(G) + C2*sin(G))
sol2 = F*(-C1*sin(G) + C2*cos(G))
elif r['d'] == -r['a']:
F = exp(Integral(r['c'], t))
G = Integral(r['d'], t)
sol1 = F*(-C1*sin(G) + C2*cos(G))
sol2 = F*(C1*cos(G) + C2*sin(G))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order1_type5(x, y, t, r, eq):
u, v = symbols('u, v', cls=Function)
T = Symbol('T')
if not cancel(r['c']/r['b']).has(t):
p = cancel(r['c']/r['b'])
q = cancel((r['d']-r['a'])/r['b'])
eq = (Eq(diff(u(T),T), v(T)), Eq(diff(v(T),T), p*u(T)+q*v(T)))
sol = dsolve(eq)
sol1 = exp(Integral(r['a'], t))*sol[0].rhs.subs(T, Integral(r['b'], t))
sol2 = exp(Integral(r['a'], t))*sol[1].rhs.subs(T, Integral(r['b'], t))
if not cancel(r['a']/r['d']).has(t):
p = cancel(r['a']/r['d'])
q = cancel((r['b']-r['c'])/r['d'])
sol = dsolve(Eq(diff(u(T),T), v(T)), Eq(diff(v(T),T), p*u(T)+q*v(T)))
sol1 = exp(Integral(r['c'], t))*sol[1].rhs.subs(T, Integral(r['d'], t))
sol2 = exp(Integral(r['c'], t))*sol[0].rhs.subs(T, Integral(r['d'], t))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order1_type6(x, y, t, r, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
p = 0
q = 0
p1 = cancel(r['c']/cancel(r['c']/r['d']).as_numer_denom()[0])
p2 = cancel(r['a']/cancel(r['a']/r['b']).as_numer_denom()[0])
for n, i in enumerate([p1, p2]):
for j in Mul.make_args(collect_const(i)):
if not j.has(t):
q = j
if q!=0 and n==0:
if ((r['c']/j - r['a'])/(r['b'] - r['d']/j)) == j:
p = 1
s = j
break
if q!=0 and n==1:
if ((r['a']/j - r['c'])/(r['d'] - r['b']/j)) == j:
p = 2
s = j
break
if p == 1:
equ = diff(x(t),t) - r['a']*x(t) - r['b']*(s*x(t) + C1*exp(-s*Integral(r['b'] - r['d']/s, t)))
hint1 = classify_ode(equ)[1]
sol1 = dsolve(equ, hint=hint1+'_Integral').rhs
sol2 = s*sol1 + C1*exp(-s*Integral(r['b'] - r['d']/s, t))
elif p ==2:
equ = diff(y(t),t) - r['c']*y(t) - r['d']*s*y(t) + C1*exp(-s*Integral(r['d'] - r['b']/s, t))
hint1 = classify_ode(equ)[1]
sol2 = dsolve(equ, hint=hint1+'_Integral').rhs
sol1 = s*sol2 + C1*exp(-s*Integral(r['d'] - r['b']/s, t))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order1_type7(x, y, t, r, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
e1 = r['a']*r['b']*r['c'] - r['b']**2*r['c'] + r['a']*diff(r['b'],t) - diff(r['a'],t)*r['b']
e2 = r['a']*r['c']*r['d'] - r['b']*r['c']**2 + diff(r['c'],t)*r['d'] - r['c']*diff(r['d'],t)
m1 = r['a']*r['b'] + r['b']*r['d'] + diff(r['b'],t)
m2 = r['a']*r['c'] + r['c']*r['d'] + diff(r['c'],t)
if e1 == 0:
sol1 = dsolve(r['b']*diff(x(t),t,t) - m1*diff(x(t),t)).rhs
sol2 = dsolve(diff(y(t),t) - r['c']*sol1 - r['d']*y(t)).rhs
elif e2 == 0:
sol2 = dsolve(r['c']*diff(y(t),t,t) - m2*diff(y(t),t)).rhs
sol1 = dsolve(diff(x(t),t) - r['a']*x(t) - r['b']*sol2).rhs
elif not (e1/r['b']).has(t) and not (m1/r['b']).has(t):
sol1 = dsolve(diff(x(t),t,t) - (m1/r['b'])*diff(x(t),t) - (e1/r['b'])*x(t)).rhs
sol2 = dsolve(diff(y(t),t) - r['c']*sol1 - r['d']*y(t)).rhs
elif not (e2/r['c']).has(t) and not (m2/r['c']).has(t):
sol2 = dsolve(diff(y(t),t,t) - (m2/r['c'])*diff(y(t),t) - (e2/r['c'])*y(t)).rhs
sol1 = dsolve(diff(x(t),t) - r['a']*x(t) - r['b']*sol2).rhs
else:
x0 = Function('x0')(t)
y0 = Function('y0')(t)
F = exp(Integral(r['a'],t))
P = exp(Integral(r['d'],t))
sol1 = C1*x0 + C2*x0*Integral(r['b']*F*P/x0**2, t)
sol2 = C1*y0 + C2*(F*P/x0 + y0*Integral(r['b']*F*P/x0**2, t))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def sysode_linear_2eq_order2(match_):
x = match_['func'][0].func
y = match_['func'][1].func
func = match_['func']
fc = match_['func_coeff']
eq = match_['eq']
r = dict()
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
for i in range(2):
eqs = []
for terms in Add.make_args(eq[i]):
eqs.append(terms/fc[i,func[i],2])
eq[i] = Add(*eqs)
r['a1'] = -fc[0,x(t),1]/fc[0,x(t),2] ; r['a2'] = -fc[1,x(t),1]/fc[1,y(t),2]
r['b1'] = -fc[0,y(t),1]/fc[0,x(t),2] ; r['b2'] = -fc[1,y(t),1]/fc[1,y(t),2]
r['c1'] = -fc[0,x(t),0]/fc[0,x(t),2] ; r['c2'] = -fc[1,x(t),0]/fc[1,y(t),2]
r['d1'] = -fc[0,y(t),0]/fc[0,x(t),2] ; r['d2'] = -fc[1,y(t),0]/fc[1,y(t),2]
const = [S(0), S(0)]
for i in range(2):
for j in Add.make_args(eq[i]):
if not (j.has(x(t)) or j.has(y(t))):
const[i] += j
r['e1'] = -const[0]
r['e2'] = -const[1]
if match_['type_of_equation'] == 'type1':
sol = _linear_2eq_order2_type1(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type2':
gsol = _linear_2eq_order2_type1(x, y, t, r, eq)
psol = _linear_2eq_order2_type2(x, y, t, r, eq)
sol = [Eq(x(t), gsol[0].rhs+psol[0]), Eq(y(t), gsol[1].rhs+psol[1])]
elif match_['type_of_equation'] == 'type3':
sol = _linear_2eq_order2_type3(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type4':
sol = _linear_2eq_order2_type4(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type5':
sol = _linear_2eq_order2_type5(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type6':
sol = _linear_2eq_order2_type6(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type7':
sol = _linear_2eq_order2_type7(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type8':
sol = _linear_2eq_order2_type8(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type9':
sol = _linear_2eq_order2_type9(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type10':
sol = _linear_2eq_order2_type10(x, y, t, r, eq)
elif match_['type_of_equation'] == 'type11':
sol = _linear_2eq_order2_type11(x, y, t, r, eq)
return sol
def _linear_2eq_order2_type1(x, y, t, r, eq):
r['a'] = r['c1']
r['b'] = r['d1']
r['c'] = r['c2']
r['d'] = r['d2']
l = Symbol('l')
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
chara_eq = l**4 - (r['a']+r['d'])*l**2 + r['a']*r['d'] - r['b']*r['c']
l1 = rootof(chara_eq, 0)
l2 = rootof(chara_eq, 1)
l3 = rootof(chara_eq, 2)
l4 = rootof(chara_eq, 3)
D = (r['a'] - r['d'])**2 + 4*r['b']*r['c']
if (r['a']*r['d'] - r['b']*r['c']) != 0:
if D != 0:
gsol1 = C1*r['b']*exp(l1*t) + C2*r['b']*exp(l2*t) + C3*r['b']*exp(l3*t) \
+ C4*r['b']*exp(l4*t)
gsol2 = C1*(l1**2-r['a'])*exp(l1*t) + C2*(l2**2-r['a'])*exp(l2*t) + \
C3*(l3**2-r['a'])*exp(l3*t) + C4*(l4**2-r['a'])*exp(l4*t)
else:
if r['a'] != r['d']:
k = sqrt(2*(r['a']+r['d']))
mid = r['b']*t+2*r['b']*k/(r['a']-r['d'])
gsol1 = 2*C1*mid*exp(k*t/2) + 2*C2*mid*exp(-k*t/2) + \
2*r['b']*C3*t*exp(k*t/2) + 2*r['b']*C4*t*exp(-k*t/2)
gsol2 = C1*(r['d']-r['a'])*t*exp(k*t/2) + C2*(r['d']-r['a'])*t*exp(-k*t/2) + \
C3*((r['d']-r['a'])*t+2*k)*exp(k*t/2) + C4*((r['d']-r['a'])*t-2*k)*exp(-k*t/2)
elif r['a'] == r['d'] != 0 and r['b'] == 0:
sa = sqrt(r['a'])
gsol1 = 2*sa*C1*exp(sa*t) + 2*sa*C2*exp(-sa*t)
gsol2 = r['c']*C1*t*exp(sa*t)-r['c']*C2*t*exp(-sa*t)+C3*exp(sa*t)+C4*exp(-sa*t)
elif r['a'] == r['d'] != 0 and r['c'] == 0:
sa = sqrt(r['a'])
gsol1 = r['b']*C1*t*exp(sa*t)-r['b']*C2*t*exp(-sa*t)+C3*exp(sa*t)+C4*exp(-sa*t)
gsol2 = 2*sa*C1*exp(sa*t) + 2*sa*C2*exp(-sa*t)
elif (r['a']*r['d'] - r['b']*r['c']) == 0 and (r['a']**2 + r['b']**2) > 0:
k = r['c']/r['a']
if r['a'] + r['b']*k != 0:
mid = sqrt(r['a'] + r['b']*k)
gsol1 = C1*exp(mid*t) + C2*exp(-mid*t) + C3*r['b']*t + C4*r['b']
gsol2 = C1*k*exp(mid*t) + C2*k*exp(-mid*t) - C3*r['a']*t - C4*r['a']
else:
gsol1 = C1*r['b']*t**3 + C2*r['b']*t**2 + C3*t + C4
gsol2 = k*gsol1 + 6*C1*t + 2*C2
return [Eq(x(t), gsol1), Eq(y(t), gsol2)]
def _linear_2eq_order2_type2(x, y, t, r, eq):
x0, y0 = symbols('x0, y0')
if r['c1']*r['d2'] - r['c2']*r['d1'] != 0:
sol = solve((r['c1']*x0+r['d1']*y0+r['e1'], r['c2']*x0+r['d2']*y0+r['e2']), x0, y0)
psol = [sol[x0], sol[y0]]
elif r['c1']*r['d2'] - r['c2']*r['d1'] == 0 and (r['c1']**2 + r['d1']**2) > 0:
k = r['c2']/r['c1']
sig = r['c1'] + r['d1']*k
if sig != 0:
psol1 = r['d1']*sig**-1*(r['e1']*k-r['e2'])*t**2/2 - \
sig**-2*(r['c1']*r['e1']+r['d1']*r['e2'])
psol2 = k*psol1 + (r['e2'] - r['e1']*k)*t**2/2
psol = [psol1, psol2]
else:
psol1 = r['d1']*(r['e2']-r['e1']*k)*t**4/24 + r['e1']*t**2/2
psol2 = k*psol1 + (r['e2']-r['e1']*k)*t**2/2
psol = [psol1, psol2]
return psol
def _linear_2eq_order2_type3(x, y, t, r, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
if r['b1']**2 - 4*r['c1'] > 0:
r['a'] = r['b1'] ; r['b'] = -r['c1']
alpha = r['a']/2 + sqrt(r['a']**2 + 4*r['b'])/2
beta = r['a']/2 - sqrt(r['a']**2 + 4*r['b'])/2
sol1 = C1*cos(alpha*t) + C2*sin(alpha*t) + C3*cos(beta*t) + C4*sin(beta*t)
sol2 = -C1*sin(alpha*t) + C2*cos(alpha*t) - C3*sin(beta*t) + C4*cos(beta*t)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type4(x, y, t, r, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
k = Symbol('k')
Ra, Ca, Rb, Cb = symbols('Ra, Ca, Rb, Cb')
a1 = r['a1'] ; a2 = r['a2']
b1 = r['b1'] ; b2 = r['b2']
c1 = r['c1'] ; c2 = r['c2']
d1 = r['d1'] ; d2 = r['d2']
k1 = r['e1'].expand().as_independent(t)[0]
k2 = r['e2'].expand().as_independent(t)[0]
ew1 = r['e1'].expand().as_independent(t)[1]
ew2 = powdenest(ew1).as_base_exp()[1]
ew3 = collect(ew2, t).coeff(t)
w = cancel(ew3/I)
peq1 = (-w**2+c1)*Ra - a1*w*Ca + d1*Rb - b1*w*Cb - k1
peq2 = a1*w*Ra + (-w**2+c1)*Ca + b1*w*Rb + d1*Cb
peq3 = c2*Ra - a2*w*Ca + (-w**2+d2)*Rb - b2*w*Cb - k2
peq4 = a2*w*Ra + c2*Ca + b2*w*Rb + (-w**2+d2)*Cb
psol = solve([peq1, peq2, peq3, peq4])
chareq = (k**2+a1*k+c1)*(k**2+b2*k+d2) - (b1*k+d1)*(a2*k+c2)
[k1, k2, k3, k4] = roots_quartic(Poly(chareq))
sol1 = -C1*(b1*k1+d1)*exp(k1*t) - C2*(b1*k2+d1)*exp(k2*t) - \
C3*(b1*k3+d1)*exp(k3*t) - C4*(b1*k4+d1)*exp(k4*t) + (Ra+I*Ca)*exp(I*w*t)
a1_ = (a1-1)
sol2 = C1*(k1**2+a1_*k1+c1)*exp(k1*t) + C2*(k2**2+a1_*k2+c1)*exp(k2*t) + \
C3*(k3**2+a1_*k3+c1)*exp(k3*t) + C4*(k4**2+a1_*k4+c1)*exp(k4*t) + (Rb+I*Cb)*exp(I*w*t)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type5(x, y, t, r, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
r['a'] = -r['d1'] ; r['b'] = -r['c2']
mul = sqrt(abs(r['a']*r['b']))
if r['a']*r['b'] > 0:
u = C1*r['a']*exp(mul*t**2/2) + C2*r['a']*exp(-mul*t**2/2)
v = C1*mul*exp(mul*t**2/2) - C2*mul*exp(-mul*t**2/2)
else:
u = C1*r['a']*cos(mul*t**2/2) + C2*r['a']*sin(mul*t**2/2)
v = -C1*mul*sin(mul*t**2/2) + C2*mul*cos(mul*t**2/2)
sol1 = C3*t + t*Integral(u/t**2, t)
sol2 = C4*t + t*Integral(v/t**2, t)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type6(x, y, t, r, eq):
k = Symbol('k')
z = Function('z')
num, den = cancel(
(r['c1']*x(t) + r['d1']*y(t))/
(r['c2']*x(t) + r['d2']*y(t))).as_numer_denom()
f = r['c1']/num.coeff(x(t))
a1 = num.coeff(x(t))
b1 = num.coeff(y(t))
a2 = den.coeff(x(t))
b2 = den.coeff(y(t))
chareq = k**2 - (a1 + b2)*k + a1*b2 - a2*b1
k1, k2 = [rootof(chareq, k) for k in range(Poly(chareq).degree())]
z1 = dsolve(diff(z(t),t,t) - k1*f*z(t)).rhs
z2 = dsolve(diff(z(t),t,t) - k2*f*z(t)).rhs
sol1 = (k1*z2 - k2*z1 + a1*(z1 - z2))/(a2*(k1-k2))
sol2 = (z1 - z2)/(k1 - k2)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type7(x, y, t, r, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
k = Symbol('k')
num, den = cancel(
(r['a1']*x(t) + r['b1']*y(t))/
(r['a2']*x(t) + r['b2']*y(t))).as_numer_denom()
f = r['a1']/num.coeff(x(t))
a1 = num.coeff(x(t))
b1 = num.coeff(y(t))
a2 = den.coeff(x(t))
b2 = den.coeff(y(t))
chareq = k**2 - (a1 + b2)*k + a1*b2 - a2*b1
[k1, k2] = [rootof(chareq, k) for k in range(Poly(chareq).degree())]
F = Integral(f, t)
z1 = C1*Integral(exp(k1*F), t) + C2
z2 = C3*Integral(exp(k2*F), t) + C4
sol1 = (k1*z2 - k2*z1 + a1*(z1 - z2))/(a2*(k1-k2))
sol2 = (z1 - z2)/(k1 - k2)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type8(x, y, t, r, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
num, den = cancel(r['d1']/r['c2']).as_numer_denom()
f = -r['d1']/num
a = num
b = den
mul = sqrt(abs(a*b))
Igral = Integral(t*f, t)
if a*b > 0:
u = C1*a*exp(mul*Igral) + C2*a*exp(-mul*Igral)
v = C1*mul*exp(mul*Igral) - C2*mul*exp(-mul*Igral)
else:
u = C1*a*cos(mul*Igral) + C2*a*sin(mul*Igral)
v = -C1*mul*sin(mul*Igral) + C2*mul*cos(mul*Igral)
sol1 = C3*t + t*Integral(u/t**2, t)
sol2 = C4*t + t*Integral(v/t**2, t)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type9(x, y, t, r, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
k = Symbol('k')
a1 = -r['a1']*t; a2 = -r['a2']*t
b1 = -r['b1']*t; b2 = -r['b2']*t
c1 = -r['c1']*t**2; c2 = -r['c2']*t**2
d1 = -r['d1']*t**2; d2 = -r['d2']*t**2
eq = (k**2+(a1-1)*k+c1)*(k**2+(b2-1)*k+d2)-(b1*k+d1)*(a2*k+c2)
[k1, k2, k3, k4] = roots_quartic(Poly(eq))
sol1 = -C1*(b1*k1+d1)*exp(k1*log(t)) - C2*(b1*k2+d1)*exp(k2*log(t)) - \
C3*(b1*k3+d1)*exp(k3*log(t)) - C4*(b1*k4+d1)*exp(k4*log(t))
a1_ = (a1-1)
sol2 = C1*(k1**2+a1_*k1+c1)*exp(k1*log(t)) + C2*(k2**2+a1_*k2+c1)*exp(k2*log(t)) \
+ C3*(k3**2+a1_*k3+c1)*exp(k3*log(t)) + C4*(k4**2+a1_*k4+c1)*exp(k4*log(t))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type10(x, y, t, r, eq):
u, v = symbols('u, v', cls=Function)
assert False
p = Wild('p', exclude=[t, t**2])
q = Wild('q', exclude=[t, t**2])
s = Wild('s', exclude=[t, t**2])
n = Wild('n', exclude=[t, t**2])
num, den = r['c1'].as_numer_denom()
dic = den.match((n*(p*t**2+q*t+s)**2).expand())
eqz = dic[p]*t**2 + dic[q]*t + dic[s]
a = num/dic[n]
b = cancel(r['d1']*eqz**2)
c = cancel(r['c2']*eqz**2)
d = cancel(r['d2']*eqz**2)
[msol1, msol2] = dsolve([Eq(diff(u(t), t, t), (a - dic[p]*dic[s] + dic[q]**2/4)*u(t) \
+ b*v(t)), Eq(diff(v(t),t,t), c*u(t) + (d - dic[p]*dic[s] + dic[q]**2/4)*v(t))])
sol1 = (msol1.rhs*sqrt(abs(eqz))).subs(t, Integral(1/eqz, t))
sol2 = (msol2.rhs*sqrt(abs(eqz))).subs(t, Integral(1/eqz, t))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order2_type11(x, y, t, r, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
u, v = symbols('u, v', cls=Function)
f = -r['c1'] ; g = -r['d1']
h = -r['c2'] ; p = -r['d2']
[msol1, msol2] = dsolve([Eq(diff(u(t),t), t*f*u(t) + t*g*v(t)), Eq(diff(v(t),t), t*h*u(t) + t*p*v(t))])
sol1 = C3*t + t*Integral(msol1.rhs/t**2, t)
sol2 = C4*t + t*Integral(msol2.rhs/t**2, t)
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def sysode_linear_3eq_order1(match_):
x = match_['func'][0].func
y = match_['func'][1].func
z = match_['func'][2].func
func = match_['func']
fc = match_['func_coeff']
eq = match_['eq']
r = dict()
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
for i in range(3):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
r['a1'] = fc[0,x(t),0]/fc[0,x(t),1]; r['a2'] = fc[1,x(t),0]/fc[1,y(t),1];
r['a3'] = fc[2,x(t),0]/fc[2,z(t),1]
r['b1'] = fc[0,y(t),0]/fc[0,x(t),1]; r['b2'] = fc[1,y(t),0]/fc[1,y(t),1];
r['b3'] = fc[2,y(t),0]/fc[2,z(t),1]
r['c1'] = fc[0,z(t),0]/fc[0,x(t),1]; r['c2'] = fc[1,z(t),0]/fc[1,y(t),1];
r['c3'] = fc[2,z(t),0]/fc[2,z(t),1]
for i in range(3):
for j in Add.make_args(eq[i]):
if not j.has(x(t), y(t), z(t)):
raise NotImplementedError("Only homogeneous problems are supported, non-homogeneous are not supported currently.")
if match_['type_of_equation'] == 'type1':
sol = _linear_3eq_order1_type1(x, y, z, t, r, eq)
if match_['type_of_equation'] == 'type2':
sol = _linear_3eq_order1_type2(x, y, z, t, r, eq)
if match_['type_of_equation'] == 'type3':
sol = _linear_3eq_order1_type3(x, y, z, t, r, eq)
if match_['type_of_equation'] == 'type4':
sol = _linear_3eq_order1_type4(x, y, z, t, r, eq)
if match_['type_of_equation'] == 'type6':
sol = _linear_neq_order1_type1(match_)
return sol
def _linear_3eq_order1_type1(x, y, z, t, r, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
a = -r['a1']; b = -r['a2']; c = -r['b2']
d = -r['a3']; k = -r['b3']; p = -r['c3']
sol1 = C1*exp(a*t)
sol2 = b*C1*exp(a*t)/(a-c) + C2*exp(c*t)
sol3 = C1*(d+b*k/(a-c))*exp(a*t)/(a-p) + k*C2*exp(c*t)/(c-p) + C3*exp(p*t)
return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)]
def _linear_3eq_order1_type2(x, y, z, t, r, eq):
C0, C1, C2, C3 = get_numbered_constants(eq, num=4, start=0)
a = -r['c2']; b = -r['a3']; c = -r['b1']
k = sqrt(a**2 + b**2 + c**2)
C3 = (-a*C1 - b*C2)/c
sol1 = a*C0 + k*C1*cos(k*t) + (c*C2-b*C3)*sin(k*t)
sol2 = b*C0 + k*C2*cos(k*t) + (a*C3-c*C1)*sin(k*t)
sol3 = c*C0 + k*C3*cos(k*t) + (b*C1-a*C2)*sin(k*t)
return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)]
def _linear_3eq_order1_type3(x, y, z, t, r, eq):
C0, C1, C2, C3 = get_numbered_constants(eq, num=4, start=0)
c = sqrt(r['b1']*r['c2'])
b = sqrt(r['b1']*r['a3'])
a = sqrt(r['c2']*r['a3'])
C3 = (-a**2*C1-b**2*C2)/c**2
k = sqrt(a**2 + b**2 + c**2)
sol1 = C0 + k*C1*cos(k*t) + a**-1*b*c*(C2-C3)*sin(k*t)
sol2 = C0 + k*C2*cos(k*t) + a*b**-1*c*(C3-C1)*sin(k*t)
sol3 = C0 + k*C3*cos(k*t) + a*b*c**-1*(C1-C2)*sin(k*t)
return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)]
def _linear_3eq_order1_type4(x, y, z, t, r, eq):
u, v, w = symbols('u, v, w', cls=Function)
a2, a3 = cancel(r['b1']/r['c1']).as_numer_denom()
f = cancel(r['b1']/a2)
b1 = cancel(r['a2']/f); b3 = cancel(r['c2']/f)
c1 = cancel(r['a3']/f); c2 = cancel(r['b3']/f)
a1, g = div(r['a1'],f)
b2 = div(r['b2'],f)[0]
c3 = div(r['c3'],f)[0]
trans_eq = (diff(u(t),t)-a1*u(t)-a2*v(t)-a3*w(t), diff(v(t),t)-b1*u(t)-\
b2*v(t)-b3*w(t), diff(w(t),t)-c1*u(t)-c2*v(t)-c3*w(t))
sol = dsolve(trans_eq)
sol1 = exp(Integral(g,t))*((sol[0].rhs).subs(t, Integral(f,t)))
sol2 = exp(Integral(g,t))*((sol[1].rhs).subs(t, Integral(f,t)))
sol3 = exp(Integral(g,t))*((sol[2].rhs).subs(t, Integral(f,t)))
return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)]
def sysode_linear_neq_order1(match_):
sol = _linear_neq_order1_type1(match_)
return sol
def _linear_neq_order1_type1(match_):
eq = match_['eq']
func = match_['func']
fc = match_['func_coeff']
n = len(eq)
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
constants = numbered_symbols(prefix='C', cls=Symbol, start=1)
M = Matrix(n,n,lambda i,j:-fc[i,func[j],0])
evector = M.eigenvects(simplify=True)
def is_complex(mat, root):
return Matrix(n, 1, lambda i,j: re(mat[i])*cos(im(root)*t) - im(mat[i])*sin(im(root)*t))
def is_complex_conjugate(mat, root):
return Matrix(n, 1, lambda i,j: re(mat[i])*sin(abs(im(root))*t) + im(mat[i])*cos(im(root)*t)*abs(im(root))/im(root))
conjugate_root = []
e_vector = zeros(n,1)
for evects in evector:
if evects[0] not in conjugate_root:
if len(evects[2])!=evects[1]:
var_mat = Matrix(n, 1, lambda i,j: Symbol('x'+str(i)))
Mnew = (M - evects[0]*eye(evects[2][-1].rows))*var_mat
w = [0 for i in range(evects[1])]
w[0] = evects[2][-1]
for r in range(1, evects[1]):
w_ = Mnew - w[r-1]
sol_dict = solve(list(w_), var_mat[1:])
sol_dict[var_mat[0]] = var_mat[0]
for key, value in sol_dict.items():
sol_dict[key] = value.subs(var_mat[0],1)
w[r] = Matrix(n, 1, lambda i,j: sol_dict[var_mat[i]])
evects[2].append(w[r])
for i in range(evects[1]):
C = next(constants)
for j in range(i+1):
if evects[0].has(I):
evects[2][j] = simplify(evects[2][j])
e_vector += C*is_complex(evects[2][j], evects[0])*t**(i-j)*exp(re(evects[0])*t)/factorial(i-j)
C = next(constants)
e_vector += C*is_complex_conjugate(evects[2][j], evects[0])*t**(i-j)*exp(re(evects[0])*t)/factorial(i-j)
else:
e_vector += C*evects[2][j]*t**(i-j)*exp(evects[0]*t)/factorial(i-j)
if evects[0].has(I):
conjugate_root.append(conjugate(evects[0]))
sol = []
for i in range(len(eq)):
sol.append(Eq(func[i],e_vector[i]))
return sol
def sysode_nonlinear_2eq_order1(match_):
func = match_['func']
eq = match_['eq']
fc = match_['func_coeff']
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
if match_['type_of_equation'] == 'type5':
sol = _nonlinear_2eq_order1_type5(func, t, eq)
return sol
x = func[0].func
y = func[1].func
for i in range(2):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
if match_['type_of_equation'] == 'type1':
sol = _nonlinear_2eq_order1_type1(x, y, t, eq)
elif match_['type_of_equation'] == 'type2':
sol = _nonlinear_2eq_order1_type2(x, y, t, eq)
elif match_['type_of_equation'] == 'type3':
sol = _nonlinear_2eq_order1_type3(x, y, t, eq)
elif match_['type_of_equation'] == 'type4':
sol = _nonlinear_2eq_order1_type4(x, y, t, eq)
return sol
def _nonlinear_2eq_order1_type1(x, y, t, eq):
C1, C2 = get_numbered_constants(eq, num=2)
n = Wild('n', exclude=[x(t),y(t)])
f = Wild('f')
u, v = symbols('u, v')
r = eq[0].match(diff(x(t),t) - x(t)**n*f)
g = ((diff(y(t),t) - eq[1])/r[f]).subs(y(t),v)
F = r[f].subs(x(t),u).subs(y(t),v)
n = r[n]
if n!=1:
phi = (C1 + (1-n)*Integral(1/g, v))**(1/(1-n))
else:
phi = C1*exp(Integral(1/g, v))
phi = phi.doit()
sol2 = solve(Integral(1/(g*F.subs(u,phi)), v).doit() - t - C2, v)
sol = []
for sols in sol2:
sol.append(Eq(x(t),phi.subs(v, sols)))
sol.append(Eq(y(t), sols))
return sol
def _nonlinear_2eq_order1_type2(x, y, t, eq):
C1, C2 = get_numbered_constants(eq, num=2)
n = Wild('n', exclude=[x(t),y(t)])
f = Wild('f')
u, v = symbols('u, v')
r = eq[0].match(diff(x(t),t) - exp(n*x(t))*f)
g = ((diff(y(t),t) - eq[1])/r[f]).subs(y(t),v)
F = r[f].subs(x(t),u).subs(y(t),v)
n = r[n]
if n:
phi = -1/n*log(C1 - n*Integral(1/g, v))
else:
phi = C1 + Integral(1/g, v)
phi = phi.doit()
sol2 = solve(Integral(1/(g*F.subs(u,phi)), v).doit() - t - C2, v)
sol = []
for sols in sol2:
sol.append(Eq(x(t),phi.subs(v, sols)))
sol.append(Eq(y(t), sols))
return sol
def _nonlinear_2eq_order1_type3(x, y, t, eq):
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
v = Function('v')
u = Symbol('u')
f = Wild('f')
g = Wild('g')
r1 = eq[0].match(diff(x(t),t) - f)
r2 = eq[1].match(diff(y(t),t) - g)
F = r1[f].subs(x(t), u).subs(y(t), v(u))
G = r2[g].subs(x(t), u).subs(y(t), v(u))
sol2r = dsolve(Eq(diff(v(u), u), G/F))
for sol2s in sol2r:
sol1 = solve(Integral(1/F.subs(v(u), sol2s.rhs), u).doit() - t - C2, u)
sol = []
for sols in sol1:
sol.append(Eq(x(t), sols))
sol.append(Eq(y(t), (sol2s.rhs).subs(u, sols)))
return sol
def _nonlinear_2eq_order1_type4(x, y, t, eq):
C1, C2 = get_numbered_constants(eq, num=2)
u, v = symbols('u, v')
U, V = symbols('U, V', cls=Function)
f = Wild('f')
g = Wild('g')
f1 = Wild('f1', exclude=[v,t])
f2 = Wild('f2', exclude=[v,t])
g1 = Wild('g1', exclude=[u,t])
g2 = Wild('g2', exclude=[u,t])
r1 = eq[0].match(diff(x(t),t) - f)
r2 = eq[1].match(diff(y(t),t) - g)
num, den = (
(r1[f].subs(x(t),u).subs(y(t),v))/
(r2[g].subs(x(t),u).subs(y(t),v))).as_numer_denom()
R1 = num.match(f1*g1)
R2 = den.match(f2*g2)
phi = (r1[f].subs(x(t),u).subs(y(t),v))/num
F1 = R1[f1]; F2 = R2[f2]
G1 = R1[g1]; G2 = R2[g2]
sol1r = solve(Integral(F2/F1, u).doit() - Integral(G1/G2,v).doit() - C1, u)
sol2r = solve(Integral(F2/F1, u).doit() - Integral(G1/G2,v).doit() - C1, v)
sol = []
for sols in sol1r:
sol.append(Eq(y(t), dsolve(diff(V(t),t) - F2.subs(u,sols).subs(v,V(t))*G2.subs(v,V(t))*phi.subs(u,sols).subs(v,V(t))).rhs))
for sols in sol2r:
sol.append(Eq(x(t), dsolve(diff(U(t),t) - F1.subs(u,U(t))*G1.subs(v,sols).subs(u,U(t))*phi.subs(v,sols).subs(u,U(t))).rhs))
return set(sol)
def _nonlinear_2eq_order1_type5(func, t, eq):
C1, C2 = get_numbered_constants(eq, num=2)
f = Wild('f')
g = Wild('g')
def check_type(x, y):
r1 = eq[0].match(t*diff(x(t),t) - x(t) + f)
r2 = eq[1].match(t*diff(y(t),t) - y(t) + g)
if not (r1 and r2):
r1 = eq[0].match(diff(x(t),t) - x(t)/t + f/t)
r2 = eq[1].match(diff(y(t),t) - y(t)/t + g/t)
if not (r1 and r2):
r1 = (-eq[0]).match(t*diff(x(t),t) - x(t) + f)
r2 = (-eq[1]).match(t*diff(y(t),t) - y(t) + g)
if not (r1 and r2):
r1 = (-eq[0]).match(diff(x(t),t) - x(t)/t + f/t)
r2 = (-eq[1]).match(diff(y(t),t) - y(t)/t + g/t)
return [r1, r2]
for func_ in func:
if isinstance(func_, list):
x = func[0][0].func
y = func[0][1].func
[r1, r2] = check_type(x, y)
if not (r1 and r2):
[r1, r2] = check_type(y, x)
x, y = y, x
x1 = diff(x(t),t); y1 = diff(y(t),t)
return {Eq(x(t), C1*t + r1[f].subs(x1,C1).subs(y1,C2)), Eq(y(t), C2*t + r2[g].subs(x1,C1).subs(y1,C2))}
def sysode_nonlinear_3eq_order1(match_):
x = match_['func'][0].func
y = match_['func'][1].func
z = match_['func'][2].func
eq = match_['eq']
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
if match_['type_of_equation'] == 'type1':
sol = _nonlinear_3eq_order1_type1(x, y, z, t, eq)
if match_['type_of_equation'] == 'type2':
sol = _nonlinear_3eq_order1_type2(x, y, z, t, eq)
if match_['type_of_equation'] == 'type3':
sol = _nonlinear_3eq_order1_type3(x, y, z, t, eq)
if match_['type_of_equation'] == 'type4':
sol = _nonlinear_3eq_order1_type4(x, y, z, t, eq)
if match_['type_of_equation'] == 'type5':
sol = _nonlinear_3eq_order1_type5(x, y, z, t, eq)
return sol
def _nonlinear_3eq_order1_type1(x, y, z, t, eq):
C1, C2 = get_numbered_constants(eq, num=2)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
r = (diff(x(t),t) - eq[0]).match(p*y(t)*z(t))
r.update((diff(y(t),t) - eq[1]).match(q*z(t)*x(t)))
r.update((diff(z(t),t) - eq[2]).match(s*x(t)*y(t)))
n1, d1 = r[p].as_numer_denom()
n2, d2 = r[q].as_numer_denom()
n3, d3 = r[s].as_numer_denom()
val = solve([n1*u-d1*v+d1*w, d2*u+n2*v-d2*w, d3*u-d3*v-n3*w],[u,v])
vals = [val[v], val[u]]
c = lcm(vals[0].as_numer_denom()[1], vals[1].as_numer_denom()[1])
b = vals[0].subs(w, c)
a = vals[1].subs(w, c)
y_x = sqrt(((c*C1-C2) - a*(c-a)*x(t)**2)/(b*(c-b)))
z_x = sqrt(((b*C1-C2) - a*(b-a)*x(t)**2)/(c*(b-c)))
z_y = sqrt(((a*C1-C2) - b*(a-b)*y(t)**2)/(c*(a-c)))
x_y = sqrt(((c*C1-C2) - b*(c-b)*y(t)**2)/(a*(c-a)))
x_z = sqrt(((b*C1-C2) - c*(b-c)*z(t)**2)/(a*(b-a)))
y_z = sqrt(((a*C1-C2) - c*(a-c)*z(t)**2)/(b*(a-b)))
sol1 = dsolve(a*diff(x(t),t) - (b-c)*y_x*z_x)
sol2 = dsolve(b*diff(y(t),t) - (c-a)*z_y*x_y)
sol3 = dsolve(c*diff(z(t),t) - (a-b)*x_z*y_z)
return [sol1, sol2, sol3]
def _nonlinear_3eq_order1_type2(x, y, z, t, eq):
C1, C2 = get_numbered_constants(eq, num=2)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
f = Wild('f')
r1 = (diff(x(t),t) - eq[0]).match(y(t)*z(t)*f)
r = collect_const(r1[f]).match(p*f)
r.update(((diff(y(t),t) - eq[1])/r[f]).match(q*z(t)*x(t)))
r.update(((diff(z(t),t) - eq[2])/r[f]).match(s*x(t)*y(t)))
n1, d1 = r[p].as_numer_denom()
n2, d2 = r[q].as_numer_denom()
n3, d3 = r[s].as_numer_denom()
val = solve([n1*u-d1*v+d1*w, d2*u+n2*v-d2*w, -d3*u+d3*v+n3*w],[u,v])
vals = [val[v], val[u]]
c = lcm(vals[0].as_numer_denom()[1], vals[1].as_numer_denom()[1])
a = vals[0].subs(w, c)
b = vals[1].subs(w, c)
y_x = sqrt(((c*C1-C2) - a*(c-a)*x(t)**2)/(b*(c-b)))
z_x = sqrt(((b*C1-C2) - a*(b-a)*x(t)**2)/(c*(b-c)))
z_y = sqrt(((a*C1-C2) - b*(a-b)*y(t)**2)/(c*(a-c)))
x_y = sqrt(((c*C1-C2) - b*(c-b)*y(t)**2)/(a*(c-a)))
x_z = sqrt(((b*C1-C2) - c*(b-c)*z(t)**2)/(a*(b-a)))
y_z = sqrt(((a*C1-C2) - c*(a-c)*z(t)**2)/(b*(a-b)))
sol1 = dsolve(a*diff(x(t),t) - (b-c)*y_x*z_x*r[f])
sol2 = dsolve(b*diff(y(t),t) - (c-a)*z_y*x_y*r[f])
sol3 = dsolve(c*diff(z(t),t) - (a-b)*x_z*y_z*r[f])
return [sol1, sol2, sol3]
def _nonlinear_3eq_order1_type3(x, y, z, t, eq):
C1 = get_numbered_constants(eq, num=1)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
F1, F2, F3 = symbols('F1, F2, F3', cls=Wild)
r1 = (diff(x(t), t) - eq[0]).match(F2-F3)
r = collect_const(r1[F2]).match(s*F2)
r.update(collect_const(r1[F3]).match(q*F3))
if eq[1].has(r[F2]) and not eq[1].has(r[F3]):
r[F2], r[F3] = r[F3], r[F2]
r[s], r[q] = -r[q], -r[s]
r.update((diff(y(t), t) - eq[1]).match(p*r[F3] - r[s]*F1))
a = r[p]; b = r[q]; c = r[s]
F1 = r[F1].subs(x(t), u).subs(y(t),v).subs(z(t), w)
F2 = r[F2].subs(x(t), u).subs(y(t),v).subs(z(t), w)
F3 = r[F3].subs(x(t), u).subs(y(t),v).subs(z(t), w)
z_xy = (C1-a*u-b*v)/c
y_zx = (C1-a*u-c*w)/b
x_yz = (C1-b*v-c*w)/a
y_x = dsolve(diff(v(u),u) - ((a*F3-c*F1)/(c*F2-b*F3)).subs(w,z_xy).subs(v,v(u))).rhs
z_x = dsolve(diff(w(u),u) - ((b*F1-a*F2)/(c*F2-b*F3)).subs(v,y_zx).subs(w,w(u))).rhs
z_y = dsolve(diff(w(v),v) - ((b*F1-a*F2)/(a*F3-c*F1)).subs(u,x_yz).subs(w,w(v))).rhs
x_y = dsolve(diff(u(v),v) - ((c*F2-b*F3)/(a*F3-c*F1)).subs(w,z_xy).subs(u,u(v))).rhs
y_z = dsolve(diff(v(w),w) - ((a*F3-c*F1)/(b*F1-a*F2)).subs(u,x_yz).subs(v,v(w))).rhs
x_z = dsolve(diff(u(w),w) - ((c*F2-b*F3)/(b*F1-a*F2)).subs(v,y_zx).subs(u,u(w))).rhs
sol1 = dsolve(diff(u(t),t) - (c*F2 - b*F3).subs(v,y_x).subs(w,z_x).subs(u,u(t))).rhs
sol2 = dsolve(diff(v(t),t) - (a*F3 - c*F1).subs(u,x_y).subs(w,z_y).subs(v,v(t))).rhs
sol3 = dsolve(diff(w(t),t) - (b*F1 - a*F2).subs(u,x_z).subs(v,y_z).subs(w,w(t))).rhs
return [sol1, sol2, sol3]
def _nonlinear_3eq_order1_type4(x, y, z, t, eq):
C1 = get_numbered_constants(eq, num=1)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
F1, F2, F3 = symbols('F1, F2, F3', cls=Wild)
r1 = eq[0].match(diff(x(t),t) - z(t)*F2 + y(t)*F3)
r = collect_const(r1[F2]).match(s*F2)
r.update(collect_const(r1[F3]).match(q*F3))
if eq[1].has(r[F2]) and not eq[1].has(r[F3]):
r[F2], r[F3] = r[F3], r[F2]
r[s], r[q] = -r[q], -r[s]
r.update((diff(y(t),t) - eq[1]).match(p*x(t)*r[F3] - r[s]*z(t)*F1))
a = r[p]; b = r[q]; c = r[s]
F1 = r[F1].subs(x(t),u).subs(y(t),v).subs(z(t),w)
F2 = r[F2].subs(x(t),u).subs(y(t),v).subs(z(t),w)
F3 = r[F3].subs(x(t),u).subs(y(t),v).subs(z(t),w)
x_yz = sqrt((C1 - b*v**2 - c*w**2)/a)
y_zx = sqrt((C1 - c*w**2 - a*u**2)/b)
z_xy = sqrt((C1 - a*u**2 - b*v**2)/c)
y_x = dsolve(diff(v(u),u) - ((a*u*F3-c*w*F1)/(c*w*F2-b*v*F3)).subs(w,z_xy).subs(v,v(u))).rhs
z_x = dsolve(diff(w(u),u) - ((b*v*F1-a*u*F2)/(c*w*F2-b*v*F3)).subs(v,y_zx).subs(w,w(u))).rhs
z_y = dsolve(diff(w(v),v) - ((b*v*F1-a*u*F2)/(a*u*F3-c*w*F1)).subs(u,x_yz).subs(w,w(v))).rhs
x_y = dsolve(diff(u(v),v) - ((c*w*F2-b*v*F3)/(a*u*F3-c*w*F1)).subs(w,z_xy).subs(u,u(v))).rhs
y_z = dsolve(diff(v(w),w) - ((a*u*F3-c*w*F1)/(b*v*F1-a*u*F2)).subs(u,x_yz).subs(v,v(w))).rhs
x_z = dsolve(diff(u(w),w) - ((c*w*F2-b*v*F3)/(b*v*F1-a*u*F2)).subs(v,y_zx).subs(u,u(w))).rhs
sol1 = dsolve(diff(u(t),t) - (c*w*F2 - b*v*F3).subs(v,y_x).subs(w,z_x).subs(u,u(t))).rhs
sol2 = dsolve(diff(v(t),t) - (a*u*F3 - c*w*F1).subs(u,x_y).subs(w,z_y).subs(v,v(t))).rhs
sol3 = dsolve(diff(w(t),t) - (b*v*F1 - a*u*F2).subs(u,x_z).subs(v,y_z).subs(w,w(t))).rhs
return [sol1, sol2, sol3]
def _nonlinear_3eq_order1_type5(x, y, z, t, eq):
C1 = get_numbered_constants(eq, num=1)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
F1, F2, F3 = symbols('F1, F2, F3', cls=Wild)
r1 = eq[0].match(diff(x(t), t) - x(t)*(F2 - F3))
r = collect_const(r1[F2]).match(s*F2)
r.update(collect_const(r1[F3]).match(q*F3))
if eq[1].has(r[F2]) and not eq[1].has(r[F3]):
r[F2], r[F3] = r[F3], r[F2]
r[s], r[q] = -r[q], -r[s]
r.update((diff(y(t), t) - eq[1]).match(y(t)*(p*r[F3] - r[s]*F1)))
a = r[p]; b = r[q]; c = r[s]
F1 = r[F1].subs(x(t), u).subs(y(t), v).subs(z(t), w)
F2 = r[F2].subs(x(t), u).subs(y(t), v).subs(z(t), w)
F3 = r[F3].subs(x(t), u).subs(y(t), v).subs(z(t), w)
x_yz = (C1*v**-b*w**-c)**-a
y_zx = (C1*w**-c*u**-a)**-b
z_xy = (C1*u**-a*v**-b)**-c
y_x = dsolve(diff(v(u), u) - ((v*(a*F3 - c*F1))/(u*(c*F2 - b*F3))).subs(w, z_xy).subs(v, v(u))).rhs
z_x = dsolve(diff(w(u), u) - ((w*(b*F1 - a*F2))/(u*(c*F2 - b*F3))).subs(v, y_zx).subs(w, w(u))).rhs
z_y = dsolve(diff(w(v), v) - ((w*(b*F1 - a*F2))/(v*(a*F3 - c*F1))).subs(u, x_yz).subs(w, w(v))).rhs
x_y = dsolve(diff(u(v), v) - ((u*(c*F2 - b*F3))/(v*(a*F3 - c*F1))).subs(w, z_xy).subs(u, u(v))).rhs
y_z = dsolve(diff(v(w), w) - ((v*(a*F3 - c*F1))/(w*(b*F1 - a*F2))).subs(u, x_yz).subs(v, v(w))).rhs
x_z = dsolve(diff(u(w), w) - ((u*(c*F2 - b*F3))/(w*(b*F1 - a*F2))).subs(v, y_zx).subs(u, u(w))).rhs
sol1 = dsolve(diff(u(t), t) - (u*(c*F2 - b*F3)).subs(v, y_x).subs(w, z_x).subs(u, u(t))).rhs
sol2 = dsolve(diff(v(t), t) - (v*(a*F3 - c*F1)).subs(u, x_y).subs(w, z_y).subs(v, v(t))).rhs
sol3 = dsolve(diff(w(t), t) - (w*(b*F1 - a*F2)).subs(u, x_z).subs(v, y_z).subs(w, w(t))).rhs
return [sol1, sol2, sol3]
| true | true |
f72e824990bb73cef7bba0e0f1c0e056f9cd231b | 1,362 | py | Python | setup.py | jwplayer/sparksteps | 8809ab42f22017aee9945bce8f7b3f3b70674bf8 | [
"Apache-2.0"
] | 74 | 2016-06-15T21:36:53.000Z | 2021-12-21T16:40:27.000Z | setup.py | jwplayer/sparksteps | 8809ab42f22017aee9945bce8f7b3f3b70674bf8 | [
"Apache-2.0"
] | 13 | 2017-03-28T19:33:02.000Z | 2021-01-21T12:05:32.000Z | setup.py | jwplayer/sparksteps | 8809ab42f22017aee9945bce8f7b3f3b70674bf8 | [
"Apache-2.0"
] | 16 | 2016-10-10T21:14:51.000Z | 2022-02-19T09:52:11.000Z | # -*- coding: utf-8 -*-
"""Distutils setup file, used to install or test 'sparksteps'."""
import textwrap
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
setup(
name='sparksteps',
description='Workflow tool to launch Spark jobs on AWS EMR',
long_description=readme,
packages=find_packages(exclude=['tests', 'examples', 'bootstrap']),
use_scm_version=True,
author='Kamil Sindi',
author_email='kamil@jwplayer.com',
url='https://github.com/jwplayer/sparksteps',
keywords='aws emr pyspark spark boto'.split(),
license='Apache License 2.0',
install_requires=[
'boto3>=1.3.1',
'polling==0.3.0'
],
setup_requires=[
'setuptools_scm',
'sphinx_rtd_theme',
],
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'sparksteps=sparksteps.__main__:main'
]
},
classifiers=textwrap.dedent("""
Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Environment :: Console
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
""").strip().splitlines(),
python_requires='>=3.6'
)
| 28.978723 | 71 | 0.624816 |
import textwrap
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
setup(
name='sparksteps',
description='Workflow tool to launch Spark jobs on AWS EMR',
long_description=readme,
packages=find_packages(exclude=['tests', 'examples', 'bootstrap']),
use_scm_version=True,
author='Kamil Sindi',
author_email='kamil@jwplayer.com',
url='https://github.com/jwplayer/sparksteps',
keywords='aws emr pyspark spark boto'.split(),
license='Apache License 2.0',
install_requires=[
'boto3>=1.3.1',
'polling==0.3.0'
],
setup_requires=[
'setuptools_scm',
'sphinx_rtd_theme',
],
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'sparksteps=sparksteps.__main__:main'
]
},
classifiers=textwrap.dedent("""
Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Environment :: Console
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
""").strip().splitlines(),
python_requires='>=3.6'
)
| true | true |
f72e82c9d7bd2b2e7b24df8a3f6dc4ce9687a9d7 | 699 | py | Python | src/podping_hivewriter/models/podping_settings.py | brianoflondon/podping-hivewriter | 0fab946b00ae1c53db72a2f0b48de3f02a771a1d | [
"MIT"
] | 11 | 2021-05-28T15:58:31.000Z | 2022-03-21T07:08:58.000Z | src/podping_hivewriter/models/podping_settings.py | brianoflondon/podping-hivewriter | 0fab946b00ae1c53db72a2f0b48de3f02a771a1d | [
"MIT"
] | 33 | 2021-06-12T09:23:47.000Z | 2022-02-27T06:34:05.000Z | src/podping_hivewriter/models/podping_settings.py | brianoflondon/podping-hivewriter | 0fab946b00ae1c53db72a2f0b48de3f02a771a1d | [
"MIT"
] | 3 | 2021-07-30T19:40:04.000Z | 2022-01-19T04:14:14.000Z | from typing import Tuple
from pydantic import BaseModel, validator
class PodpingSettings(BaseModel):
"""Dataclass for settings we will fetch from Hive"""
hive_operation_period: int = 3
max_url_list_bytes: int = 7500
diagnostic_report_period: int = 60
control_account: str = "podping"
control_account_check_period: int = 60
test_nodes: Tuple[str, ...] = ("https://testnet.openhive.network",)
@validator("hive_operation_period")
def hive_op_period_must_be_int_above_one(cls, v):
"""If anyone ever tries to set op period < 1 this will catch
it. Other float values coerced into int seconds"""
if v < 1:
v = 1
return v
| 30.391304 | 71 | 0.680973 | from typing import Tuple
from pydantic import BaseModel, validator
class PodpingSettings(BaseModel):
hive_operation_period: int = 3
max_url_list_bytes: int = 7500
diagnostic_report_period: int = 60
control_account: str = "podping"
control_account_check_period: int = 60
test_nodes: Tuple[str, ...] = ("https://testnet.openhive.network",)
@validator("hive_operation_period")
def hive_op_period_must_be_int_above_one(cls, v):
if v < 1:
v = 1
return v
| true | true |
f72e8306feafbbb1dafb789b86bcdde1ac7bd206 | 12,065 | py | Python | contextualized_topic_models/evaluation/measures.py | onlyrico/contextualized-topic-models | ac338eab6601cd34475d490ae8072fecb73bb0c2 | [
"MIT"
] | 1 | 2022-02-07T13:52:48.000Z | 2022-02-07T13:52:48.000Z | contextualized_topic_models/evaluation/measures.py | onlyrico/contextualized-topic-models | ac338eab6601cd34475d490ae8072fecb73bb0c2 | [
"MIT"
] | null | null | null | contextualized_topic_models/evaluation/measures.py | onlyrico/contextualized-topic-models | ac338eab6601cd34475d490ae8072fecb73bb0c2 | [
"MIT"
] | null | null | null | from gensim.corpora.dictionary import Dictionary
from gensim.models.coherencemodel import CoherenceModel
from gensim.models import KeyedVectors
import gensim.downloader as api
from scipy.spatial.distance import cosine
import abc
from contextualized_topic_models.evaluation.rbo import rbo
import numpy as np
import itertools
class Measure:
def __init__(self):
pass
def score(self):
pass
class TopicDiversity(Measure):
def __init__(self, topics):
super().__init__()
self.topics = topics
def score(self, topk=25):
"""
:param topk: topk words on which the topic diversity will be computed
:return:
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
unique_words = set()
for t in self.topics:
unique_words = unique_words.union(set(t[:topk]))
td = len(unique_words) / (topk * len(self.topics))
return td
class Coherence(abc.ABC):
"""
:param topics: a list of lists of the top-k words
:param texts: (list of lists of strings) represents the corpus on which the empirical frequencies of words are computed
"""
def __init__(self, topics, texts):
self.topics = topics
self.texts = texts
self.dictionary = Dictionary(self.texts)
@abc.abstractmethod
def score(self):
pass
class CoherenceNPMI(Coherence):
def __init__(self, topics, texts):
super().__init__(topics, texts)
def score(self, topk=10, per_topic=False):
"""
:param topk: how many most likely words to consider in the evaluation
:param per_topic: if True, returns the coherence value for each topic (default: False)
:return: NPMI coherence
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
npmi = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,
coherence='c_npmi', topn=topk)
if per_topic:
return npmi.get_coherence_per_topic()
else:
return npmi.get_coherence()
class CoherenceUMASS(Coherence):
def __init__(self, topics, texts):
super().__init__(topics, texts)
def score(self, topk=10, per_topic=False):
"""
:param topk: how many most likely words to consider in the evaluation
:param per_topic: if True, returns the coherence value for each topic (default: False)
:return: UMass coherence
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
umass = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,
coherence='u_mass', topn=topk)
if per_topic:
return umass.get_coherence_per_topic()
else:
return umass.get_coherence()
class CoherenceUCI(Coherence):
def __init__(self, topics, texts):
super().__init__(topics, texts)
def score(self, topk=10, per_topic=False):
"""
:param topk: how many most likely words to consider in the evaluation
:param per_topic: if True, returns the coherence value for each topic (default: False)
:return: UCI coherence
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
uci = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,
coherence='c_uci', topn=topk)
if per_topic:
return uci.get_coherence_per_topic()
else:
return uci.get_coherence()
class CoherenceCV(Coherence):
def __init__(self, topics, texts):
super().__init__(topics, texts)
def score(self, topk=10, per_topic=False):
"""
:param topk: how many most likely words to consider in the evaluation
:param per_topic: if True, returns the coherence value for each topic (default: False)
:return: C_V coherence
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
cv = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,
coherence='c_v', topn=topk)
if per_topic:
return cv.get_coherence_per_topic()
else:
return cv.get_coherence()
class CoherenceWordEmbeddings(Measure):
def __init__(self, topics, word2vec_path=None, binary=False):
"""
:param topics: a list of lists of the top-n most likely words
:param word2vec_path: if word2vec_file is specified, it retrieves the word embeddings file (in word2vec format) to
compute similarities between words, otherwise 'word2vec-google-news-300' is downloaded
:param binary: if the word2vec file is binary
"""
super().__init__()
self.topics = topics
self.binary = binary
if word2vec_path is None:
self.wv = api.load('word2vec-google-news-300')
else:
self.wv = KeyedVectors.load_word2vec_format(word2vec_path, binary=binary)
def score(self, topk=10, binary= False):
"""
:param topk: how many most likely words to consider in the evaluation
:return: topic coherence computed on the word embeddings similarities
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
arrays = []
for index, topic in enumerate(self.topics):
if len(topic) > 0:
local_simi = []
for word1, word2 in itertools.combinations(topic[0:topk], 2):
if word1 in self.wv.vocab and word2 in self.wv.vocab:
local_simi.append(self.wv.similarity(word1, word2))
arrays.append(np.mean(local_simi))
return np.mean(arrays)
class InvertedRBO(Measure):
def __init__(self, topics):
"""
:param topics: a list of lists of words
"""
super().__init__()
self.topics = topics
def score(self, topk = 10, weight=0.9):
"""
:param weight: p (float), default 1.0: Weight of each agreement at depth d:
p**(d-1). When set to 1.0, there is no weight, the rbo returns to average overlap.
:return: rank_biased_overlap over the topics
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
collect = []
for list1, list2 in itertools.combinations(self.topics, 2):
rbo_val = rbo.rbo(list1[:topk], list2[:topk], p=weight)[2]
collect.append(rbo_val)
return 1 - np.mean(collect)
class Matches(Measure):
def __init__(self, doc_distribution_original_language, doc_distribution_unseen_language):
"""
:param doc_distribution_original_language: numpy array of the topical distribution of
the documents in the original language (dim: num docs x num topics)
:param doc_distribution_unseen_language: numpy array of the topical distribution of the
documents in an unseen language (dim: num docs x num topics)
"""
super().__init__()
self.orig_lang_docs = doc_distribution_original_language
self.unseen_lang_docs = doc_distribution_unseen_language
if len(self.orig_lang_docs) != len(self.unseen_lang_docs):
raise Exception('Distributions of the comparable documents must have the same length')
def score(self):
"""
:return: proportion of matches between the predicted topic in the original language and
the predicted topic in the unseen language of the document distributions
"""
matches = 0
for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):
if np.argmax(d1) == np.argmax(d2):
matches = matches + 1
return matches/len(self.unseen_lang_docs)
class KLDivergence(Measure):
def __init__(self, doc_distribution_original_language, doc_distribution_unseen_language):
"""
:param doc_distribution_original_language: numpy array of the topical distribution of
the documents in the original language (dim: num docs x num topics)
:param doc_distribution_unseen_language: numpy array of the topical distribution of the
documents in an unseen language (dim: num docs x num topics)
"""
super().__init__()
self.orig_lang_docs = doc_distribution_original_language
self.unseen_lang_docs = doc_distribution_unseen_language
if len(self.orig_lang_docs) != len(self.unseen_lang_docs):
raise Exception('Distributions of the comparable documents must have the same length')
def score(self):
"""
:return: average kullback leibler divergence between the distributions
"""
kl_mean = 0
for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):
kl_mean = kl_mean + kl_div(d1, d2)
return kl_mean/len(self.unseen_lang_docs)
def kl_div(a, b):
a = np.asarray(a, dtype=np.float)
b = np.asarray(b, dtype=np.float)
return np.sum(np.where(a != 0, a * np.log(a / b), 0))
class CentroidDistance(Measure):
def __init__(self, doc_distribution_original_language, doc_distribution_unseen_language, topics, word2vec_path=None,
binary=True, topk=10):
"""
:param doc_distribution_original_language: numpy array of the topical distribution of the
documents in the original language (dim: num docs x num topics)
:param doc_distribution_unseen_language: numpy array of the topical distribution of the
documents in an unseen language (dim: num docs x num topics)
:param topics: a list of lists of the top-n most likely words
:param word2vec_path: if word2vec_file is specified, it retrieves the word embeddings
file (in word2vec format) to compute similarities between words, otherwise
'word2vec-google-news-300' is downloaded
:param binary: if the word2vec file is binary
:param topk: max number of topical words
"""
super().__init__()
self.topics = [t[:topk] for t in topics]
self.orig_lang_docs = doc_distribution_original_language
self.unseen_lang_docs = doc_distribution_unseen_language
if len(self.orig_lang_docs) != len(self.unseen_lang_docs):
raise Exception('Distributions of the comparable documents must have the same length')
if word2vec_path is None:
self.wv = api.load('word2vec-google-news-300')
else:
self.wv = KeyedVectors.load_word2vec_format(word2vec_path, binary=binary)
def score(self):
"""
:return: average centroid distance between the words of the most likely topic of the
document distributions
"""
cd = 0
for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):
top_words_orig = self.topics[np.argmax(d1)]
top_words_unseen = self.topics[np.argmax(d2)]
centroid_lang = self.get_centroid(top_words_orig)
centroid_en = self.get_centroid(top_words_unseen)
cd += (1 - cosine(centroid_lang, centroid_en))
return cd/len(self.unseen_lang_docs)
def get_centroid(self, word_list):
vector_list = []
for word in word_list:
if word in self.wv.vocab:
vector_list.append(self.wv.get_vector(word))
vec = sum(vector_list)
return vec / np.linalg.norm(vec)
| 39.818482 | 123 | 0.630999 | from gensim.corpora.dictionary import Dictionary
from gensim.models.coherencemodel import CoherenceModel
from gensim.models import KeyedVectors
import gensim.downloader as api
from scipy.spatial.distance import cosine
import abc
from contextualized_topic_models.evaluation.rbo import rbo
import numpy as np
import itertools
class Measure:
def __init__(self):
pass
def score(self):
pass
class TopicDiversity(Measure):
def __init__(self, topics):
super().__init__()
self.topics = topics
def score(self, topk=25):
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
unique_words = set()
for t in self.topics:
unique_words = unique_words.union(set(t[:topk]))
td = len(unique_words) / (topk * len(self.topics))
return td
class Coherence(abc.ABC):
def __init__(self, topics, texts):
self.topics = topics
self.texts = texts
self.dictionary = Dictionary(self.texts)
@abc.abstractmethod
def score(self):
pass
class CoherenceNPMI(Coherence):
def __init__(self, topics, texts):
super().__init__(topics, texts)
def score(self, topk=10, per_topic=False):
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
npmi = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,
coherence='c_npmi', topn=topk)
if per_topic:
return npmi.get_coherence_per_topic()
else:
return npmi.get_coherence()
class CoherenceUMASS(Coherence):
def __init__(self, topics, texts):
super().__init__(topics, texts)
def score(self, topk=10, per_topic=False):
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
umass = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,
coherence='u_mass', topn=topk)
if per_topic:
return umass.get_coherence_per_topic()
else:
return umass.get_coherence()
class CoherenceUCI(Coherence):
def __init__(self, topics, texts):
super().__init__(topics, texts)
def score(self, topk=10, per_topic=False):
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
uci = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,
coherence='c_uci', topn=topk)
if per_topic:
return uci.get_coherence_per_topic()
else:
return uci.get_coherence()
class CoherenceCV(Coherence):
def __init__(self, topics, texts):
super().__init__(topics, texts)
def score(self, topk=10, per_topic=False):
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
cv = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,
coherence='c_v', topn=topk)
if per_topic:
return cv.get_coherence_per_topic()
else:
return cv.get_coherence()
class CoherenceWordEmbeddings(Measure):
def __init__(self, topics, word2vec_path=None, binary=False):
super().__init__()
self.topics = topics
self.binary = binary
if word2vec_path is None:
self.wv = api.load('word2vec-google-news-300')
else:
self.wv = KeyedVectors.load_word2vec_format(word2vec_path, binary=binary)
def score(self, topk=10, binary= False):
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
arrays = []
for index, topic in enumerate(self.topics):
if len(topic) > 0:
local_simi = []
for word1, word2 in itertools.combinations(topic[0:topk], 2):
if word1 in self.wv.vocab and word2 in self.wv.vocab:
local_simi.append(self.wv.similarity(word1, word2))
arrays.append(np.mean(local_simi))
return np.mean(arrays)
class InvertedRBO(Measure):
def __init__(self, topics):
super().__init__()
self.topics = topics
def score(self, topk = 10, weight=0.9):
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
collect = []
for list1, list2 in itertools.combinations(self.topics, 2):
rbo_val = rbo.rbo(list1[:topk], list2[:topk], p=weight)[2]
collect.append(rbo_val)
return 1 - np.mean(collect)
class Matches(Measure):
def __init__(self, doc_distribution_original_language, doc_distribution_unseen_language):
super().__init__()
self.orig_lang_docs = doc_distribution_original_language
self.unseen_lang_docs = doc_distribution_unseen_language
if len(self.orig_lang_docs) != len(self.unseen_lang_docs):
raise Exception('Distributions of the comparable documents must have the same length')
def score(self):
matches = 0
for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):
if np.argmax(d1) == np.argmax(d2):
matches = matches + 1
return matches/len(self.unseen_lang_docs)
class KLDivergence(Measure):
def __init__(self, doc_distribution_original_language, doc_distribution_unseen_language):
super().__init__()
self.orig_lang_docs = doc_distribution_original_language
self.unseen_lang_docs = doc_distribution_unseen_language
if len(self.orig_lang_docs) != len(self.unseen_lang_docs):
raise Exception('Distributions of the comparable documents must have the same length')
def score(self):
kl_mean = 0
for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):
kl_mean = kl_mean + kl_div(d1, d2)
return kl_mean/len(self.unseen_lang_docs)
def kl_div(a, b):
a = np.asarray(a, dtype=np.float)
b = np.asarray(b, dtype=np.float)
return np.sum(np.where(a != 0, a * np.log(a / b), 0))
class CentroidDistance(Measure):
def __init__(self, doc_distribution_original_language, doc_distribution_unseen_language, topics, word2vec_path=None,
binary=True, topk=10):
super().__init__()
self.topics = [t[:topk] for t in topics]
self.orig_lang_docs = doc_distribution_original_language
self.unseen_lang_docs = doc_distribution_unseen_language
if len(self.orig_lang_docs) != len(self.unseen_lang_docs):
raise Exception('Distributions of the comparable documents must have the same length')
if word2vec_path is None:
self.wv = api.load('word2vec-google-news-300')
else:
self.wv = KeyedVectors.load_word2vec_format(word2vec_path, binary=binary)
def score(self):
cd = 0
for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):
top_words_orig = self.topics[np.argmax(d1)]
top_words_unseen = self.topics[np.argmax(d2)]
centroid_lang = self.get_centroid(top_words_orig)
centroid_en = self.get_centroid(top_words_unseen)
cd += (1 - cosine(centroid_lang, centroid_en))
return cd/len(self.unseen_lang_docs)
def get_centroid(self, word_list):
vector_list = []
for word in word_list:
if word in self.wv.vocab:
vector_list.append(self.wv.get_vector(word))
vec = sum(vector_list)
return vec / np.linalg.norm(vec)
| true | true |
f72e8350d7481236cc8e11a4dc5d4bd60251c0b9 | 6,102 | py | Python | beliefs/factors/discrete_factor.py | drivergroup/beliefs | 7e0b2a02d719f5b1c889d72ac1e9421971cc120b | [
"MIT"
] | 2 | 2019-07-11T17:42:07.000Z | 2020-02-10T05:19:53.000Z | beliefs/factors/discrete_factor.py | drivergroup/beliefs | 7e0b2a02d719f5b1c889d72ac1e9421971cc120b | [
"MIT"
] | 1 | 2019-07-11T19:13:19.000Z | 2019-07-11T19:13:19.000Z | beliefs/factors/discrete_factor.py | driver-oss/beliefs | 7e0b2a02d719f5b1c889d72ac1e9421971cc120b | [
"MIT"
] | 2 | 2019-07-11T19:02:54.000Z | 2020-02-10T05:20:01.000Z | """
The MIT License (MIT)
Copyright (c) 2013-2017 pgmpy
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import copy
import numpy as np
class DiscreteFactor:
def __init__(self, variables, cardinality, values=None, state_names=None):
"""
Args
variables: list,
variables in the scope of the factor
cardinality: list,
cardinalities of each variable, where len(cardinality)=len(variables)
values: list,
row vector of values of variables with ordering such that right-most variables
defined in `variables` cycle through their values the fastest
state_names: dictionary,
mapping variables to their states, of format {label_name: ['state1', 'state2']}
"""
self.variables = list(variables)
self.cardinality = list(cardinality)
if values is None:
self._values = None
else:
self._values = np.array(values).reshape(self.cardinality)
self.state_names = state_names
def __mul__(self, other):
return self.product(other)
def copy(self):
"""Return a copy of the factor"""
return self.__class__(self.variables,
self.cardinality,
self._values,
copy.deepcopy(self.state_names))
@property
def values(self):
return self._values
def update_values(self, new_values):
"""We make this available because _values is allowed to be None on init"""
self._values = np.array(new_values).reshape(self.cardinality)
def get_value_for_state_vector(self, dict_of_states):
"""
Return the value for a dictionary of variable states.
Args
dict_of_states: dictionary,
of format {label_name1: 'state1', label_name2: 'True'}
Returns
probability, a float, the factor value for a specific combination of variable states
"""
assert sorted(dict_of_states.keys()) == sorted(self.variables), \
"The keys for the dictionary of states must match the variables in factor scope."
state_coordinates = []
for var in self.variables:
var_state = dict_of_states[var]
idx_in_var_axis = self.state_names[var].index(var_state)
state_coordinates.append(idx_in_var_axis)
return self.values[tuple(state_coordinates)]
def add_new_variables_from_other_factor(self, other):
"""Add new variables from `other` factor to the factor."""
extra_vars = set(other.variables) - set(self.variables)
# if all of these variables already exist there is nothing to do
if len(extra_vars) == 0:
return
# otherwise, extend the values array
slice_ = [slice(None)] * len(self.variables)
slice_.extend([np.newaxis] * len(extra_vars))
self._values = self._values[slice_]
self.variables.extend(extra_vars)
new_card_var = other.get_cardinality(extra_vars)
self.cardinality.extend([new_card_var[var] for var in extra_vars])
def get_cardinality(self, variables):
return {var: self.cardinality[self.variables.index(var)] for var in variables}
def product(self, other):
left = self.copy()
if isinstance(other, (int, float)):
return self.values * other
else:
assert isinstance(other, DiscreteFactor), \
"__mul__ is only defined between subclasses of DiscreteFactor"
right = other.copy()
left.add_new_variables_from_other_factor(right)
right.add_new_variables_from_other_factor(left)
# reorder variables in right factor to match order in left
source_axes = list(range(right.values.ndim))
destination_axes = [right.variables.index(var) for var in left.variables]
right.variables = [right.variables[idx] for idx in destination_axes]
# rearrange values in right factor to correspond to the reordered variables
right._values = np.moveaxis(right.values, source_axes, destination_axes)
left._values = left.values * right.values
return left
def marginalize(self, vars):
"""
Args
vars: list,
variables over which to marginalize the factor
Returns
DiscreteFactor, whose scope is set(self.variables) - set(vars)
"""
phi = copy.deepcopy(self)
var_indexes = []
for var in vars:
if var not in phi.variables:
raise ValueError('{} not in scope'.format(var))
else:
var_indexes.append(self.variables.index(var))
index_to_keep = sorted(set(range(len(self.variables))) - set(var_indexes))
phi.variables = [self.variables[index] for index in index_to_keep]
phi.cardinality = [self.cardinality[index] for index in index_to_keep]
phi._values = np.sum(phi.values, axis=tuple(var_indexes))
return phi
| 40.68 | 96 | 0.654376 |
import copy
import numpy as np
class DiscreteFactor:
def __init__(self, variables, cardinality, values=None, state_names=None):
self.variables = list(variables)
self.cardinality = list(cardinality)
if values is None:
self._values = None
else:
self._values = np.array(values).reshape(self.cardinality)
self.state_names = state_names
def __mul__(self, other):
return self.product(other)
def copy(self):
return self.__class__(self.variables,
self.cardinality,
self._values,
copy.deepcopy(self.state_names))
@property
def values(self):
return self._values
def update_values(self, new_values):
self._values = np.array(new_values).reshape(self.cardinality)
def get_value_for_state_vector(self, dict_of_states):
assert sorted(dict_of_states.keys()) == sorted(self.variables), \
"The keys for the dictionary of states must match the variables in factor scope."
state_coordinates = []
for var in self.variables:
var_state = dict_of_states[var]
idx_in_var_axis = self.state_names[var].index(var_state)
state_coordinates.append(idx_in_var_axis)
return self.values[tuple(state_coordinates)]
def add_new_variables_from_other_factor(self, other):
extra_vars = set(other.variables) - set(self.variables)
if len(extra_vars) == 0:
return
slice_ = [slice(None)] * len(self.variables)
slice_.extend([np.newaxis] * len(extra_vars))
self._values = self._values[slice_]
self.variables.extend(extra_vars)
new_card_var = other.get_cardinality(extra_vars)
self.cardinality.extend([new_card_var[var] for var in extra_vars])
def get_cardinality(self, variables):
return {var: self.cardinality[self.variables.index(var)] for var in variables}
def product(self, other):
left = self.copy()
if isinstance(other, (int, float)):
return self.values * other
else:
assert isinstance(other, DiscreteFactor), \
"__mul__ is only defined between subclasses of DiscreteFactor"
right = other.copy()
left.add_new_variables_from_other_factor(right)
right.add_new_variables_from_other_factor(left)
source_axes = list(range(right.values.ndim))
destination_axes = [right.variables.index(var) for var in left.variables]
right.variables = [right.variables[idx] for idx in destination_axes]
right._values = np.moveaxis(right.values, source_axes, destination_axes)
left._values = left.values * right.values
return left
def marginalize(self, vars):
phi = copy.deepcopy(self)
var_indexes = []
for var in vars:
if var not in phi.variables:
raise ValueError('{} not in scope'.format(var))
else:
var_indexes.append(self.variables.index(var))
index_to_keep = sorted(set(range(len(self.variables))) - set(var_indexes))
phi.variables = [self.variables[index] for index in index_to_keep]
phi.cardinality = [self.cardinality[index] for index in index_to_keep]
phi._values = np.sum(phi.values, axis=tuple(var_indexes))
return phi
| true | true |
f72e83933f876cd4572db0e34fcdcc95f8a2fb8c | 6,388 | py | Python | toontown/fishing/FishPhoto.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 1 | 2021-02-13T22:40:50.000Z | 2021-02-13T22:40:50.000Z | toontown/fishing/FishPhoto.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | toontown/fishing/FishPhoto.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 3 | 2021-06-03T05:36:36.000Z | 2021-06-22T15:07:31.000Z | from direct.directnotify import DirectNotifyGlobal
from panda3d.core import *
from direct.interval.IntervalGlobal import *
import FishGlobals
class DirectRegion(NodePath):
notify = DirectNotifyGlobal.directNotify.newCategory('DirectRegion')
def __init__(self, parent = aspect2d):
NodePath.__init__(self)
self.assign(parent.attachNewNode('DirectRegion'))
def destroy(self):
self.unload()
def setBounds(self, *bounds):
self.bounds = bounds
def setColor(self, *colors):
self.color = colors
def show(self):
pass
def hide(self):
pass
def load(self):
if not hasattr(self, 'cRender'):
self.cRender = NodePath('fishSwimRender')
self.fishSwimCamera = self.cRender.attachNewNode('fishSwimCamera')
self.cCamNode = Camera('fishSwimCam')
self.cLens = PerspectiveLens()
self.cLens.setFov(40, 40)
self.cLens.setNear(0.1)
self.cLens.setFar(100.0)
self.cCamNode.setLens(self.cLens)
self.cCamNode.setScene(self.cRender)
self.fishSwimCam = self.fishSwimCamera.attachNewNode(self.cCamNode)
cm = CardMaker('displayRegionCard')
apply(cm.setFrame, self.bounds)
self.card = card = self.attachNewNode(cm.generate())
apply(card.setColor, self.color)
newBounds = card.getTightBounds()
ll = render2d.getRelativePoint(card, newBounds[0])
ur = render2d.getRelativePoint(card, newBounds[1])
newBounds = [ll.getX(),
ur.getX(),
ll.getZ(),
ur.getZ()]
newBounds = map(lambda x: max(0.0, min(1.0, (x + 1.0) / 2.0)), newBounds)
self.cDr = base.win.makeDisplayRegion(*newBounds)
self.cDr.setSort(10)
self.cDr.setClearColor(card.getColor())
self.cDr.setClearDepthActive(1)
self.cDr.setClearColorActive(1)
self.cDr.setCamera(self.fishSwimCam)
return self.cRender
def unload(self):
if hasattr(self, 'cRender'):
base.win.removeDisplayRegion(self.cDr)
del self.cRender
del self.fishSwimCamera
del self.cCamNode
del self.cLens
del self.fishSwimCam
del self.cDr
class FishPhoto(NodePath):
notify = DirectNotifyGlobal.directNotify.newCategory('FishPhoto')
def __init__(self, fish = None, parent = aspect2d):
NodePath.__init__(self)
self.assign(parent.attachNewNode('FishPhoto'))
self.fish = fish
self.actor = None
self.sound = None
self.soundTrack = None
self.track = None
self.fishFrame = None
return
def destroy(self):
self.hide()
if hasattr(self, 'background'):
del self.background
self.fish = None
del self.soundTrack
del self.track
return
def update(self, fish):
self.fish = fish
def setSwimBounds(self, *bounds):
self.swimBounds = bounds
def setSwimColor(self, *colors):
self.swimColor = colors
def load(self):
pass
def makeFishFrame(self, actor):
actor.setDepthTest(1)
actor.setDepthWrite(1)
if not hasattr(self, 'fishDisplayRegion'):
self.fishDisplayRegion = DirectRegion(parent=self)
apply(self.fishDisplayRegion.setBounds, self.swimBounds)
apply(self.fishDisplayRegion.setColor, self.swimColor)
frame = self.fishDisplayRegion.load()
pitch = frame.attachNewNode('pitch')
rotate = pitch.attachNewNode('rotate')
scale = rotate.attachNewNode('scale')
actor.reparentTo(scale)
bMin, bMax = actor.getTightBounds()
center = (bMin + bMax) / 2.0
actor.setPos(-center[0], -center[1], -center[2])
genus = self.fish.getGenus()
fishInfo = FishGlobals.FishFileDict.get(genus, FishGlobals.FishFileDict[-1])
fishPos = fishInfo[5]
if fishPos:
actor.setPos(fishPos[0], fishPos[1], fishPos[2])
scale.setScale(fishInfo[6])
rotate.setH(fishInfo[7])
pitch.setP(fishInfo[8])
pitch.setY(2)
return frame
def show(self, showBackground = 0):
messenger.send('wakeup')
if self.fishFrame:
self.actor.cleanup()
if hasattr(self, 'fishDisplayRegion'):
self.fishDisplayRegion.unload()
self.hide()
self.actor = self.fish.getActor()
self.actor.setTwoSided(1)
self.fishFrame = self.makeFishFrame(self.actor)
if showBackground:
if not hasattr(self, 'background'):
background = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
background = background.find('**/Fish_BG')
self.background = background
self.background.setPos(0, 15, 0)
self.background.setScale(11)
self.background.reparentTo(self.fishFrame)
self.sound, loop, delay, playRate = self.fish.getSound()
if playRate is not None:
self.actor.setPlayRate(playRate, 'intro')
self.actor.setPlayRate(playRate, 'swim')
introDuration = self.actor.getDuration('intro')
track = Parallel(Sequence(Func(self.actor.play, 'intro'), Wait(introDuration), Func(self.actor.loop, 'swim')))
if self.sound:
soundTrack = Sequence(Wait(delay), Func(self.sound.play))
if loop:
duration = max(introDuration, self.sound.length())
soundTrack.append(Wait(duration - delay))
track.append(Func(soundTrack.loop))
self.soundTrack = soundTrack
else:
track.append(soundTrack)
self.track = track
self.track.start()
return
def hide(self):
if hasattr(self, 'fishDisplayRegion'):
self.fishDisplayRegion.unload()
if self.actor:
self.actor.stop()
if self.sound:
self.sound.stop()
self.sound = None
if self.soundTrack:
self.soundTrack.pause()
self.soundTrack = None
if self.track:
self.track.pause()
self.track = None
return
| 34.717391 | 118 | 0.591578 | from direct.directnotify import DirectNotifyGlobal
from panda3d.core import *
from direct.interval.IntervalGlobal import *
import FishGlobals
class DirectRegion(NodePath):
notify = DirectNotifyGlobal.directNotify.newCategory('DirectRegion')
def __init__(self, parent = aspect2d):
NodePath.__init__(self)
self.assign(parent.attachNewNode('DirectRegion'))
def destroy(self):
self.unload()
def setBounds(self, *bounds):
self.bounds = bounds
def setColor(self, *colors):
self.color = colors
def show(self):
pass
def hide(self):
pass
def load(self):
if not hasattr(self, 'cRender'):
self.cRender = NodePath('fishSwimRender')
self.fishSwimCamera = self.cRender.attachNewNode('fishSwimCamera')
self.cCamNode = Camera('fishSwimCam')
self.cLens = PerspectiveLens()
self.cLens.setFov(40, 40)
self.cLens.setNear(0.1)
self.cLens.setFar(100.0)
self.cCamNode.setLens(self.cLens)
self.cCamNode.setScene(self.cRender)
self.fishSwimCam = self.fishSwimCamera.attachNewNode(self.cCamNode)
cm = CardMaker('displayRegionCard')
apply(cm.setFrame, self.bounds)
self.card = card = self.attachNewNode(cm.generate())
apply(card.setColor, self.color)
newBounds = card.getTightBounds()
ll = render2d.getRelativePoint(card, newBounds[0])
ur = render2d.getRelativePoint(card, newBounds[1])
newBounds = [ll.getX(),
ur.getX(),
ll.getZ(),
ur.getZ()]
newBounds = map(lambda x: max(0.0, min(1.0, (x + 1.0) / 2.0)), newBounds)
self.cDr = base.win.makeDisplayRegion(*newBounds)
self.cDr.setSort(10)
self.cDr.setClearColor(card.getColor())
self.cDr.setClearDepthActive(1)
self.cDr.setClearColorActive(1)
self.cDr.setCamera(self.fishSwimCam)
return self.cRender
def unload(self):
if hasattr(self, 'cRender'):
base.win.removeDisplayRegion(self.cDr)
del self.cRender
del self.fishSwimCamera
del self.cCamNode
del self.cLens
del self.fishSwimCam
del self.cDr
class FishPhoto(NodePath):
notify = DirectNotifyGlobal.directNotify.newCategory('FishPhoto')
def __init__(self, fish = None, parent = aspect2d):
NodePath.__init__(self)
self.assign(parent.attachNewNode('FishPhoto'))
self.fish = fish
self.actor = None
self.sound = None
self.soundTrack = None
self.track = None
self.fishFrame = None
return
def destroy(self):
self.hide()
if hasattr(self, 'background'):
del self.background
self.fish = None
del self.soundTrack
del self.track
return
def update(self, fish):
self.fish = fish
def setSwimBounds(self, *bounds):
self.swimBounds = bounds
def setSwimColor(self, *colors):
self.swimColor = colors
def load(self):
pass
def makeFishFrame(self, actor):
actor.setDepthTest(1)
actor.setDepthWrite(1)
if not hasattr(self, 'fishDisplayRegion'):
self.fishDisplayRegion = DirectRegion(parent=self)
apply(self.fishDisplayRegion.setBounds, self.swimBounds)
apply(self.fishDisplayRegion.setColor, self.swimColor)
frame = self.fishDisplayRegion.load()
pitch = frame.attachNewNode('pitch')
rotate = pitch.attachNewNode('rotate')
scale = rotate.attachNewNode('scale')
actor.reparentTo(scale)
bMin, bMax = actor.getTightBounds()
center = (bMin + bMax) / 2.0
actor.setPos(-center[0], -center[1], -center[2])
genus = self.fish.getGenus()
fishInfo = FishGlobals.FishFileDict.get(genus, FishGlobals.FishFileDict[-1])
fishPos = fishInfo[5]
if fishPos:
actor.setPos(fishPos[0], fishPos[1], fishPos[2])
scale.setScale(fishInfo[6])
rotate.setH(fishInfo[7])
pitch.setP(fishInfo[8])
pitch.setY(2)
return frame
def show(self, showBackground = 0):
messenger.send('wakeup')
if self.fishFrame:
self.actor.cleanup()
if hasattr(self, 'fishDisplayRegion'):
self.fishDisplayRegion.unload()
self.hide()
self.actor = self.fish.getActor()
self.actor.setTwoSided(1)
self.fishFrame = self.makeFishFrame(self.actor)
if showBackground:
if not hasattr(self, 'background'):
background = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
background = background.find('**/Fish_BG')
self.background = background
self.background.setPos(0, 15, 0)
self.background.setScale(11)
self.background.reparentTo(self.fishFrame)
self.sound, loop, delay, playRate = self.fish.getSound()
if playRate is not None:
self.actor.setPlayRate(playRate, 'intro')
self.actor.setPlayRate(playRate, 'swim')
introDuration = self.actor.getDuration('intro')
track = Parallel(Sequence(Func(self.actor.play, 'intro'), Wait(introDuration), Func(self.actor.loop, 'swim')))
if self.sound:
soundTrack = Sequence(Wait(delay), Func(self.sound.play))
if loop:
duration = max(introDuration, self.sound.length())
soundTrack.append(Wait(duration - delay))
track.append(Func(soundTrack.loop))
self.soundTrack = soundTrack
else:
track.append(soundTrack)
self.track = track
self.track.start()
return
def hide(self):
if hasattr(self, 'fishDisplayRegion'):
self.fishDisplayRegion.unload()
if self.actor:
self.actor.stop()
if self.sound:
self.sound.stop()
self.sound = None
if self.soundTrack:
self.soundTrack.pause()
self.soundTrack = None
if self.track:
self.track.pause()
self.track = None
return
| true | true |
f72e8847156e836eb2f014e2a896ec4b5ce4fc3d | 3,837 | py | Python | sceptre/context.py | janrotter/sceptre | 05c6742ff00c56f7d49e37124344851ea8e144a2 | [
"Apache-2.0"
] | null | null | null | sceptre/context.py | janrotter/sceptre | 05c6742ff00c56f7d49e37124344851ea8e144a2 | [
"Apache-2.0"
] | 1 | 2021-03-26T00:46:38.000Z | 2021-03-26T00:46:38.000Z | sceptre/context.py | isotoma/sceptre | 05c6742ff00c56f7d49e37124344851ea8e144a2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
sceptre.context
This module implements the SceptreContext class which holds details about the
paths used in a Sceptre project.
"""
from os import path
class SceptreContext(object):
"""
SceptreContext is a place that holds data that is relevant to the
project, including references to the project paths such as the path to your
Sceptre project, templates path, config path, and the default names for
your configuration files.
:param project_path: Absolute path to the base sceptre project folder
:type project_path: str
:param command_path: The relative path to either StackGroup or Stack.
:type command_path: str
:param user_variables: Used to replace the value of anyvitem in a Config\
file with a value defined by the CLI flag or in a YAML variable\
file
:type user_variables: dict
:param options: The options specified in by the CLI command
:type options: dict
:param output_format: Specify the output format. Available formats:\
[yaml, json]
:type output_format: str
:param no_colour: Specify whether colouring should be used in the CLI\
output
:type no_colour: bool
"""
def __init__(self, project_path, command_path,
user_variables=None, options=None, output_format=None,
no_colour=False):
# project_path: absolute path to the base sceptre project folder
# e.g. absolute_path/to/sceptre_directory
self.project_path = project_path
# config_path: holds the project stack_groups
# e.g {project_path}/config
self.config_path = "config" # user definable later in v2
# command_path path to either stack group or stack
# e.g. {project_path/config_path}/command_path
self.command_path = command_path
# config_file: stack group config. User definable later in v2
# e.g. {project_path/config/command_path}/config_file
self.config_file = "config.yaml"
# templates_path: holds tempaltes. User definable later in v2
# e.g. {project_path/}templates
self.templates_path = "templates"
self.user_variables = user_variables if user_variables else {}
self.user_variables = user_variables\
if user_variables is not None else {}
self.options = options if options else {}
self.output_format = output_format if output_format else ""
self.no_colour = no_colour if no_colour is True else False
def full_config_path(self):
"""
Returns the config path in the format: ``project_path/config_path``.
:returns: The absolute path to the config directory
:rtype: str
"""
return path.join(self.project_path, self.config_path)
def full_command_path(self):
"""
Returns the command path in the format:
``project_path/config_path/command_path``.
:returns: The absolute path to the path that will be executed
:rtype: str
"""
return path.join(self.project_path, self.config_path,
self.command_path)
def full_templates_path(self):
"""
Returns the templates path in the format: project_path/templates_path.
:returns: The absolute path to the templates directory
:rtype: str
"""
return path.join(self.project_path, self.templates_path)
def command_path_is_stack(self):
"""
Returns True if the command path is a file.
:returns: True if the command path is a file
:rtype: bool
"""
return path.isfile(
path.join(
self.project_path,
self.config_path,
self.command_path
)
)
| 33.077586 | 79 | 0.649466 |
from os import path
class SceptreContext(object):
def __init__(self, project_path, command_path,
user_variables=None, options=None, output_format=None,
no_colour=False):
self.project_path = project_path
self.config_path = "config"
self.command_path = command_path
self.config_file = "config.yaml"
self.templates_path = "templates"
self.user_variables = user_variables if user_variables else {}
self.user_variables = user_variables\
if user_variables is not None else {}
self.options = options if options else {}
self.output_format = output_format if output_format else ""
self.no_colour = no_colour if no_colour is True else False
def full_config_path(self):
return path.join(self.project_path, self.config_path)
def full_command_path(self):
return path.join(self.project_path, self.config_path,
self.command_path)
def full_templates_path(self):
return path.join(self.project_path, self.templates_path)
def command_path_is_stack(self):
return path.isfile(
path.join(
self.project_path,
self.config_path,
self.command_path
)
)
| true | true |
f72e88acf96609ef2f0f8f0c3513b465a09bb0a5 | 113,890 | py | Python | emscripten.py | conversy/emscripten | 0bebbec826ed50a7e93c45242e1729ac2c002d3e | [
"MIT"
] | 1 | 2020-07-26T05:50:02.000Z | 2020-07-26T05:50:02.000Z | emscripten.py | GwG422/emscripten | ffed1d38b7692989583ad832cea013cc299df1d2 | [
"MIT"
] | null | null | null | emscripten.py | GwG422/emscripten | ffed1d38b7692989583ad832cea013cc299df1d2 | [
"MIT"
] | 1 | 2020-07-26T16:08:10.000Z | 2020-07-26T16:08:10.000Z | # Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""A small wrapper script around the core JS compiler. This calls that
compiler with the settings given to it. It can also read data from C/C++
header files (so that the JS compiler can see the constants in those
headers, for the libc implementation in JS).
"""
from __future__ import print_function
import difflib
import os
import json
import subprocess
import re
import time
import logging
import pprint
from collections import OrderedDict
from tools import shared
from tools import gen_struct_info
from tools import jsrun
from tools.response_file import substitute_response_files
from tools.shared import WINDOWS, asstr, path_from_root, exit_with_error, asmjs_mangle, treat_as_user_function
from tools.toolchain_profiler import ToolchainProfiler
from tools.minified_js_name_generator import MinifiedJsNameGenerator
logger = logging.getLogger('emscripten')
STDERR_FILE = os.environ.get('EMCC_STDERR_FILE')
if STDERR_FILE:
STDERR_FILE = os.path.abspath(STDERR_FILE)
logger.info('logging stderr in js compiler phase into %s' % STDERR_FILE)
STDERR_FILE = open(STDERR_FILE, 'w')
def get_configuration():
if hasattr(get_configuration, 'configuration'):
return get_configuration.configuration
configuration = shared.Configuration(environ=os.environ)
get_configuration.configuration = configuration
return configuration
def quote(prop):
if shared.Settings.USE_CLOSURE_COMPILER == 2:
return ''.join(["'" + p + "'" for p in prop.split('.')])
else:
return prop
def access_quote(prop):
if shared.Settings.USE_CLOSURE_COMPILER == 2:
return ''.join(["['" + p + "']" for p in prop.split('.')])
else:
return '.' + prop
def emscript_fastcomp(infile, outfile, memfile, compiler_engine,
temp_files, DEBUG):
"""Runs the emscripten LLVM-to-JS compiler.
Args:
infile: The path to the input LLVM assembly file.
outfile: An open file object where the output is written.
"""
assert shared.Settings.ASM_JS, 'fastcomp is asm.js-only (mode 1 or 2)'
success = False
try:
# Overview:
# * Run LLVM backend to emit JS. JS includes function bodies, memory initializer,
# and various metadata
# * Run compiler.js on the metadata to emit the shell js code, pre/post-ambles,
# JS library dependencies, etc.
# metadata is modified by reference in some of the below
# these functions are split up to force variables to go out of scope and allow
# memory to be reclaimed
with ToolchainProfiler.profile_block('get_and_parse_backend'):
backend_output = compile_js(infile, temp_files, DEBUG)
funcs, metadata, mem_init = parse_fastcomp_output(backend_output, DEBUG)
fixup_metadata_tables(metadata)
funcs = fixup_functions(funcs, metadata)
with ToolchainProfiler.profile_block('compiler_glue'):
glue, forwarded_data = compiler_glue(metadata, compiler_engine, temp_files, DEBUG)
with ToolchainProfiler.profile_block('function_tables_and_exports'):
(post, function_table_data, bundled_args) = (
function_tables_and_exports(funcs, metadata, mem_init, glue, forwarded_data, outfile, DEBUG))
with ToolchainProfiler.profile_block('write_output_file'):
finalize_output(outfile, post, function_table_data, bundled_args, metadata, DEBUG)
success = True
finally:
outfile.close()
if not success:
shared.try_delete(outfile.name) # remove partial output
def compile_js(infile, temp_files, DEBUG):
"""Compile infile with asm.js backend, return the contents of the compiled js"""
with temp_files.get_file('.4.js') as temp_js:
backend_cmd = create_backend_cmd(infile, temp_js)
if DEBUG:
logger.debug('emscript: llvm backend: ' + ' '.join(backend_cmd))
t = time.time()
shared.print_compiler_stage(backend_cmd)
with ToolchainProfiler.profile_block('emscript_llvm_backend'):
shared.check_call(backend_cmd)
if DEBUG:
logger.debug(' emscript: llvm backend took %s seconds' % (time.time() - t))
# Split up output
backend_output = open(temp_js).read()
return backend_output
def parse_fastcomp_output(backend_output, DEBUG):
start_funcs_marker = '// EMSCRIPTEN_START_FUNCTIONS'
end_funcs_marker = '// EMSCRIPTEN_END_FUNCTIONS'
metadata_split_marker = '// EMSCRIPTEN_METADATA'
start_funcs = backend_output.index(start_funcs_marker)
end_funcs = backend_output.rindex(end_funcs_marker)
metadata_split = backend_output.rindex(metadata_split_marker)
funcs = backend_output[start_funcs + len(start_funcs_marker):end_funcs]
metadata_raw = backend_output[metadata_split + len(metadata_split_marker):]
mem_init = backend_output[end_funcs + len(end_funcs_marker):metadata_split]
# we no longer use the "Runtime" object. TODO: stop emiting it in the backend
mem_init = mem_init.replace('Runtime.', '')
try:
metadata = json.loads(metadata_raw, object_pairs_hook=OrderedDict)
except ValueError:
logger.error('emscript: failure to parse metadata output from compiler backend. raw output is: \n' + metadata_raw)
raise
# This key is being added to fastcomp but doesn't exist in the current
# version.
metadata.setdefault('externFunctions', [])
if 'externUses' not in metadata:
exit_with_error('Your fastcomp compiler is out of date, please update! (need >= 1.38.26)')
# JS optimizer turns some heap accesses to others as an optimization, so make HEAP8 imply HEAPU8, HEAP16->HEAPU16, and HEAPF64->HEAPF32.
if 'Int8Array' in metadata['externUses']:
metadata['externUses'] += ['Uint8Array']
if 'Int16Array' in metadata['externUses']:
metadata['externUses'] += ['Uint16Array']
if 'Float64Array' in metadata['externUses']:
metadata['externUses'] += ['Float32Array']
# If we are generating references to Math.fround() from here in emscripten.py, declare it used as well.
if provide_fround() or metadata['simd']:
metadata['externUses'] += ['Math.fround']
# functions marked llvm.used in the code are exports requested by the user
shared.Building.user_requested_exports += metadata['exports']
# In MINIMAL_RUNTIME stackSave() and stackRestore are JS library functions. If LLVM backend generated
# calls to invoke_*() functions that save and restore the stack, we must include the stack functions
# explicitly into the build. (In traditional runtime the stack functions are always present, so this
# tracking is not needed)
if shared.Settings.MINIMAL_RUNTIME and (len(metadata['invokeFuncs']) > 0 or shared.Settings.LINKABLE):
shared.Settings.EXPORTED_FUNCTIONS += ['stackSave', 'stackRestore']
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$stackSave', '$stackRestore']
return funcs, metadata, mem_init
def fixup_metadata_tables(metadata):
# if emulating pointer casts, force all tables to the size of the largest
# (for wasm, we use binaryen's fpcast-emu pass, we don't need to do anything
# here)
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM:
max_size = 0
for k, v in metadata['tables'].items():
max_size = max(max_size, v.count(',') + 1)
for k, v in metadata['tables'].items():
curr = v.count(',') + 1
if curr < max_size:
if v.count('[]') == 1:
metadata['tables'][k] = v.replace(']', (','.join(['0'] * (max_size - curr)) + ']'))
else:
metadata['tables'][k] = v.replace(']', (',0' * (max_size - curr)) + ']')
if shared.Settings.SIDE_MODULE:
for k in metadata['tables'].keys():
metadata['tables'][k] = metadata['tables'][k].replace('var FUNCTION_TABLE_', 'var SIDE_FUNCTION_TABLE_')
def fixup_functions(funcs, metadata):
# function table masks
table_sizes = {}
for k, v in metadata['tables'].items():
# undercounts by one, but that is what we want
table_sizes[k] = str(v.count(','))
# if shared.Settings.ASSERTIONS >= 2 and table_sizes[k] == 0:
# shared.warning('no function pointers with signature ' + k + ', but there is a call, which will abort if it occurs (this can result from undefined behavior, check for compiler warnings on your source files and consider -Werror)'
funcs = re.sub(r"#FM_(\w+)#", lambda m: table_sizes[m.groups(0)[0]], funcs)
# fix +float into float.0, if not running js opts
if not shared.Settings.RUNNING_JS_OPTS:
def fix_dot_zero(m):
num = m.group(3)
# TODO: handle 0x floats?
if num.find('.') < 0:
e = num.find('e')
if e < 0:
num += '.0'
else:
num = num[:e] + '.0' + num[e:]
return m.group(1) + m.group(2) + num
funcs = re.sub(r'([(=,+\-*/%<>:?] *)\+(-?)((0x)?[0-9a-f]*\.?[0-9]+([eE][-+]?[0-9]+)?)', fix_dot_zero, funcs)
return funcs
def compiler_glue(metadata, compiler_engine, temp_files, DEBUG):
if DEBUG:
logger.debug('emscript: js compiler glue')
t = time.time()
# FIXME: do these one by one as normal js lib funcs
metadata['declares'] = [i64_func for i64_func in metadata['declares'] if i64_func not in ['getHigh32', 'setHigh32']]
update_settings_glue(metadata, DEBUG)
assert not (metadata['simd'] and shared.Settings.WASM), 'SIMD is used, but not supported in WASM mode yet'
assert not (shared.Settings.SIMD and shared.Settings.WASM), 'SIMD is requested, but not supported in WASM mode yet'
glue, forwarded_data = compile_settings(compiler_engine, temp_files)
if DEBUG:
logger.debug(' emscript: glue took %s seconds' % (time.time() - t))
return glue, forwarded_data
def analyze_table(function_table_data):
def table_size(table):
table_contents = table[table.index('[') + 1: table.index(']')]
if len(table_contents) == 0: # empty table
return 0
return table_contents.count(',') + 1
# note that this is a minimal estimate, as when asm2wasm lays out tables it adds padding
table_total_size = sum(table_size(s) for s in function_table_data.values())
shared.Settings.WASM_TABLE_SIZE = table_total_size
# Extracts from JS library code dependencies to runtime primitives.
def get_asm_extern_primitives(pre):
primitives = re.search(r'\/\/ ASM_LIBRARY EXTERN PRIMITIVES: ([^\n]*)', pre)
if primitives:
return [x.strip().replace('Math_', 'Math.') for x in primitives.group(1).split(',')]
else:
return []
def compute_minimal_runtime_initializer_and_exports(post, initializers, exports, receiving):
# Generate invocations for all global initializers directly off the asm export object, e.g. asm['__GLOBAL__INIT']();
post = post.replace('/*** RUN_GLOBAL_INITIALIZERS(); ***/', '\n'.join(["asm['" + x + "']();" for x in global_initializer_funcs(initializers)]))
if shared.Settings.WASM:
# Declare all exports out to global JS scope so that JS library functions can access them in a way that minifies well with Closure
# e.g. var a,b,c,d,e,f;
exports_that_are_not_initializers = [x for x in exports if x not in initializers]
if shared.Settings.WASM_BACKEND:
# In Wasm backend the exports are still unmangled at this point, so mangle the names here
exports_that_are_not_initializers = [asmjs_mangle(x) for x in exports_that_are_not_initializers]
post = post.replace('/*** ASM_MODULE_EXPORTS_DECLARES ***/', 'var ' + ','.join(exports_that_are_not_initializers) + ';')
# Generate assignments from all asm.js/wasm exports out to the JS variables above: e.g. a = asm['a']; b = asm['b'];
post = post.replace('/*** ASM_MODULE_EXPORTS ***/', receiving)
receiving = ''
return post, receiving
def function_tables_and_exports(funcs, metadata, mem_init, glue, forwarded_data, outfile, DEBUG):
if DEBUG:
logger.debug('emscript: python processing: function tables and exports')
t = time.time()
forwarded_json = json.loads(forwarded_data)
# merge in information from llvm backend
function_table_data = metadata['tables']
if shared.Settings.WASM:
analyze_table(function_table_data)
# merge forwarded data
shared.Settings.EXPORTED_FUNCTIONS = forwarded_json['EXPORTED_FUNCTIONS']
pre, post = glue.split('// EMSCRIPTEN_END_FUNCS')
pre = apply_script_source(pre)
asm_extern_primitives = get_asm_extern_primitives(pre)
metadata['externUses'] += asm_extern_primitives
pre = memory_and_global_initializers(pre, metadata, mem_init)
pre, funcs_js = get_js_funcs(pre, funcs)
all_exported_functions = get_all_exported_functions(function_table_data)
all_implemented = get_all_implemented(forwarded_json, metadata)
report_missing_symbols(all_implemented, pre)
implemented_functions = get_implemented_functions(metadata)
pre = include_asm_consts(pre, forwarded_json, metadata)
pre = apply_table(pre)
outfile.write(pre)
pre = None
# Move preAsms to their right place
def move_preasm(m):
contents = m.groups(0)[0]
outfile.write(contents + '\n')
return ''
if not shared.Settings.BOOTSTRAPPING_STRUCT_INFO and len(funcs_js) > 1:
funcs_js[1] = re.sub(r'/\* PRE_ASM \*/(.*)\n', move_preasm, funcs_js[1])
if 'pre' in function_table_data:
pre_tables = function_table_data['pre']
del function_table_data['pre']
else:
pre_tables = ''
function_table_sigs = list(function_table_data.keys())
in_table, debug_tables, function_tables_defs = make_function_tables_defs(
implemented_functions, all_implemented, function_table_data, metadata)
exported_implemented_functions = get_exported_implemented_functions(
all_exported_functions, all_implemented, metadata)
# List of function signatures of used 'invoke_xxx()' functions in the application
# For backwards compatibility if one might be using a mismatching Emscripten compiler version, if 'invokeFuncs' is not present in metadata,
# use the full list of signatures in function table and generate invoke_() functions for all signatures in the program (producing excessive code size)
# we must also emit the full list if we are emitting code that can be linked later
if 'invokeFuncs' in metadata and not shared.Settings.LINKABLE:
invoke_function_names = metadata['invokeFuncs']
else:
invoke_function_names = ['invoke_' + x for x in function_table_sigs]
asm_setup = create_asm_setup(debug_tables, function_table_data, invoke_function_names, metadata)
basic_funcs = create_basic_funcs(function_table_sigs, invoke_function_names)
basic_vars = create_basic_vars(exported_implemented_functions, forwarded_json, metadata)
funcs_js += create_mftCall_funcs(function_table_data)
exports = create_exports(exported_implemented_functions, in_table, function_table_data, metadata)
# calculate globals
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except KeyError:
pass
if not shared.Settings.RELOCATABLE:
global_vars = metadata['externs']
else:
global_vars = [] # linkable code accesses globals through function calls
global_funcs = set(key for key, value in forwarded_json['Functions']['libraryFunctions'].items() if value != 2)
global_funcs = sorted(global_funcs.difference(set(global_vars)).difference(implemented_functions))
if shared.Settings.RELOCATABLE:
global_funcs += ['g$' + extern for extern in metadata['externs']]
global_funcs += ['fp$' + extern for extern in metadata['externFunctions']]
# Tracks the set of used (minified) function names in
# JS symbols imported to asm.js module.
minified_js_names = MinifiedJsNameGenerator()
# Converts list of imports ['foo', 'bar', ...] to a dictionary of
# name mappings in form { 'minified': 'unminified', ... }
def define_asmjs_import_names(imports):
if shared.Settings.MINIFY_ASMJS_IMPORT_NAMES:
return [(minified_js_names.generate(), i) for i in imports]
else:
return [(i, i) for i in imports]
basic_funcs = define_asmjs_import_names(basic_funcs)
global_funcs = define_asmjs_import_names(global_funcs)
basic_vars = define_asmjs_import_names(basic_vars)
global_vars = define_asmjs_import_names(global_vars)
bg_funcs = basic_funcs + global_funcs
bg_vars = basic_vars + global_vars
asm_global_funcs = create_asm_global_funcs(bg_funcs, metadata)
asm_global_vars = create_asm_global_vars(bg_vars)
the_global = create_the_global(metadata)
sending_vars = bg_funcs + bg_vars
sending = OrderedDict([(math_fix(minified), unminified) for (minified, unminified) in sending_vars])
if shared.Settings.WASM:
add_standard_wasm_imports(sending)
sorted_sending_keys = sorted(sending.keys())
sending = '{ ' + ', '.join('"' + k + '": ' + sending[k] for k in sorted_sending_keys) + ' }'
receiving = create_receiving(function_table_data, function_tables_defs,
exported_implemented_functions, metadata['initializers'])
post = apply_table(post)
post = apply_static_code_hooks(post)
if shared.Settings.MINIMAL_RUNTIME:
post, receiving = compute_minimal_runtime_initializer_and_exports(post, metadata['initializers'], [mangled for mangled, unmangled in shared.Settings.MODULE_EXPORTS], receiving)
function_tables_impls = make_function_tables_impls(function_table_data)
final_function_tables = '\n'.join(function_tables_impls) + '\n' + function_tables_defs
if shared.Settings.EMULATED_FUNCTION_POINTERS:
final_function_tables = (
final_function_tables
.replace("asm['", '')
.replace("']", '')
.replace('var SIDE_FUNCTION_TABLE_', 'var FUNCTION_TABLE_')
.replace('var dynCall_', '//')
)
if DEBUG:
logger.debug('asm text sizes' + str([
[len(s) for s in funcs_js], len(asm_setup), len(asm_global_vars), len(asm_global_funcs), len(pre_tables),
len('\n'.join(function_tables_impls)), len(function_tables_defs) + (function_tables_defs.count('\n') * len(' ')),
len(exports), len(the_global), len(sending), len(receiving)]))
logger.debug(' emscript: python processing: function tables and exports took %s seconds' % (time.time() - t))
bundled_args = (funcs_js, asm_setup, the_global, sending, receiving, asm_global_vars,
asm_global_funcs, pre_tables, final_function_tables, exports)
return (post, function_table_data, bundled_args)
def finalize_output(outfile, post, function_table_data, bundled_args, metadata, DEBUG):
function_table_sigs = function_table_data.keys()
module = create_module_asmjs(function_table_sigs, metadata, *bundled_args)
if DEBUG:
logger.debug('emscript: python processing: finalize')
t = time.time()
write_output_file(outfile, post, module)
module = None
if DEBUG:
logger.debug(' emscript: python processing: finalize took %s seconds' % (time.time() - t))
write_cyberdwarf_data(outfile, metadata)
# Given JS code that consists only exactly of a series of "var a = ...;\n var b = ...;" statements,
# this function collapses the redundant 'var ' statements at the beginning of each line to a
# single var a =..., b=..., c=...; statement.
def collapse_redundant_vars(code):
if shared.Settings.WASM:
return code # Skip if targeting Wasm, this does not matter there
old_code = ''
while code != old_code: # Repeated vars overlap, so can't run in one regex pass. Runs in O(log(N)) time
old_code = code
code = re.sub(r'(var [^;]*);\s*var ', r'\1,\n ', code)
return code
def global_initializer_funcs(initializers):
# If we have at most one global ctor, no need to group global initializers.
# Also in EVAL_CTORS mode, we want to try to evaluate the individual ctor functions, so in that mode,
# do not group ctors into one.
return ['globalCtors'] if (len(initializers) > 1 and not shared.Settings.EVAL_CTORS) else initializers
# Each .cpp file with global constructors generates a __GLOBAL__init() function that needs to be
# called to construct the global objects in that compilation unit. This function groups all these
# global initializer functions together into a single globalCtors() function that lives inside the
# asm.js/wasm module, and gets exported out to JS scope to be called at the startup of the application.
def create_global_initializer(initializers):
# If we have no global ctors, don't even generate a dummy empty function to save code space
# Also in EVAL_CTORS mode, we want to try to evaluate the individual ctor functions, so in that mode,
# we do not group ctors into one.
if 'globalCtors' not in global_initializer_funcs(initializers):
return ''
global_initializer = ''' function globalCtors() {
%s
}''' % '\n '.join(i + '();' for i in initializers)
return global_initializer
def create_module_asmjs(function_table_sigs, metadata,
funcs_js, asm_setup, the_global, sending, receiving, asm_global_vars,
asm_global_funcs, pre_tables, final_function_tables, exports):
receiving += create_named_globals(metadata)
runtime_funcs = create_runtime_funcs_asmjs(exports, metadata)
asm_start_pre = create_asm_start_pre(asm_setup, the_global, sending, metadata)
memory_views = create_memory_views(metadata)
asm_temp_vars = create_asm_temp_vars(metadata)
asm_runtime_thread_local_vars = create_asm_runtime_thread_local_vars()
stack = ''
if not shared.Settings.RELOCATABLE and not (shared.Settings.WASM and shared.Settings.SIDE_MODULE):
if 'STACKTOP' in shared.Settings.ASM_PRIMITIVE_VARS:
stack += apply_memory(' var STACKTOP = {{{ STACK_BASE }}};\n')
if 'STACK_MAX' in shared.Settings.ASM_PRIMITIVE_VARS:
stack += apply_memory(' var STACK_MAX = {{{ STACK_MAX }}};\n')
if 'tempFloat' in shared.Settings.ASM_PRIMITIVE_VARS:
temp_float = ' var tempFloat = %s;\n' % ('Math_fround(0)' if provide_fround() else '0.0')
else:
temp_float = ''
async_state = ' var asyncState = 0;\n' if shared.Settings.EMTERPRETIFY_ASYNC else ''
f0_fround = ' const f0 = Math_fround(0);\n' if provide_fround() else ''
replace_memory = create_replace_memory(metadata)
start_funcs_marker = '\n// EMSCRIPTEN_START_FUNCS\n'
asm_end = create_asm_end(exports)
asm_variables = collapse_redundant_vars(memory_views + asm_global_vars + asm_temp_vars + asm_runtime_thread_local_vars + '\n' + asm_global_funcs + stack + temp_float + async_state + f0_fround)
asm_global_initializer = create_global_initializer(metadata['initializers'])
module = [
asm_start_pre,
asm_variables,
replace_memory,
start_funcs_marker,
asm_global_initializer
] + runtime_funcs + funcs_js + [
'\n ',
pre_tables, final_function_tables, asm_end,
'\n', receiving, ';\n'
]
if shared.Settings.SIDE_MODULE:
module.append('''
parentModule['registerFunctions'](%s, Module);
''' % str([str(f) for f in function_table_sigs]))
return module
def write_output_file(outfile, post, module):
for i in range(len(module)): # do this loop carefully to save memory
module[i] = normalize_line_endings(module[i])
outfile.write(module[i])
post = normalize_line_endings(post)
outfile.write(post)
def write_cyberdwarf_data(outfile, metadata):
if not shared.Settings.CYBERDWARF:
return
assert('cyberdwarf_data' in metadata)
cd_file_name = outfile.name + ".cd"
with open(cd_file_name, 'w') as f:
json.dump({'cyberdwarf': metadata['cyberdwarf_data']}, f)
def create_backend_cmd(infile, temp_js):
"""Create asm.js backend command from settings dict"""
args = [
shared.LLVM_COMPILER, infile, '-march=js', '-filetype=asm', '-o', temp_js,
'-emscripten-stack-size=%d' % shared.Settings.TOTAL_STACK,
'-O%s' % shared.Settings.OPT_LEVEL,
]
if shared.Settings.PRECISE_F32:
args += ['-emscripten-precise-f32']
if shared.Settings.USE_PTHREADS:
args += ['-emscripten-enable-pthreads']
if shared.Settings.WARN_UNALIGNED:
args += ['-emscripten-warn-unaligned']
if shared.Settings.RESERVED_FUNCTION_POINTERS > 0:
args += ['-emscripten-reserved-function-pointers=%d' % shared.Settings.RESERVED_FUNCTION_POINTERS]
if shared.Settings.ASSERTIONS > 0:
args += ['-emscripten-assertions=%d' % shared.Settings.ASSERTIONS]
if shared.Settings.ALIASING_FUNCTION_POINTERS == 0:
args += ['-emscripten-no-aliasing-function-pointers']
if shared.Settings.EMULATED_FUNCTION_POINTERS:
args += ['-emscripten-emulated-function-pointers']
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS:
args += ['-emscripten-emulate-function-pointer-casts']
if shared.Settings.RELOCATABLE:
args += ['-emscripten-relocatable']
args += ['-emscripten-global-base=0']
elif shared.Settings.GLOBAL_BASE >= 0:
args += ['-emscripten-global-base=%d' % shared.Settings.GLOBAL_BASE]
if shared.Settings.SIDE_MODULE:
args += ['-emscripten-side-module']
if shared.Settings.LEGALIZE_JS_FFI != 1:
args += ['-emscripten-legalize-javascript-ffi=0']
if shared.Settings.DISABLE_EXCEPTION_CATCHING != 1:
args += ['-enable-emscripten-cpp-exceptions']
if shared.Settings.DISABLE_EXCEPTION_CATCHING == 2:
args += ['-emscripten-cpp-exceptions-whitelist=' + ','.join(shared.Settings.EXCEPTION_CATCHING_WHITELIST or ['fake'])]
if not shared.Settings.EXIT_RUNTIME:
args += ['-emscripten-no-exit-runtime']
if shared.Settings.WORKAROUND_IOS_9_RIGHT_SHIFT_BUG:
args += ['-emscripten-asmjs-work-around-ios-9-right-shift-bug']
if shared.Settings.WASM:
args += ['-emscripten-wasm']
if shared.Building.is_wasm_only():
args += ['-emscripten-only-wasm']
if shared.Settings.CYBERDWARF:
args += ['-enable-cyberdwarf']
return args
def optimize_syscalls(declares, DEBUG):
"""Disables filesystem if only a limited subset of syscalls is used.
Our syscalls are static, and so if we see a very limited set of them - in particular,
no open() syscall and just simple writing - then we don't need full filesystem support.
If FORCE_FILESYSTEM is set, we can't do this. We also don't do it if INCLUDE_FULL_LIBRARY, since
not including the filesystem would mean not including the full JS libraries, and the same for
MAIN_MODULE since a side module might need the filesystem.
"""
relevant_settings = ['FORCE_FILESYSTEM', 'INCLUDE_FULL_LIBRARY', 'MAIN_MODULE']
if any(shared.Settings[s] for s in relevant_settings):
return
if shared.Settings.FILESYSTEM == 0:
# without filesystem support, it doesn't matter what syscalls need
shared.Settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
else:
syscall_prefixes = ('__syscall', 'fd_', '__wasi_fd_')
syscalls = [d for d in declares if d.startswith(syscall_prefixes)]
# check if the only filesystem syscalls are in: close, ioctl, llseek, write
# (without open, etc.. nothing substantial can be done, so we can disable
# extra filesystem support in that case)
if set(syscalls).issubset(set([
'__syscall6', '__syscall54', '__syscall140',
'fd_seek', '__wasi_fd_seek',
'fd_write', '__wasi_fd_write',
'fd_close', '__wasi_fd_close',
])):
if DEBUG:
logger.debug('very limited syscalls (%s) so disabling full filesystem support', ', '.join(str(s) for s in syscalls))
shared.Settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
def is_int(x):
try:
int(x)
return True
except ValueError:
return False
def align_memory(addr):
return (addr + 15) & -16
def align_static_bump(metadata):
metadata['staticBump'] = align_memory(metadata['staticBump'])
return metadata['staticBump']
def update_settings_glue(metadata, DEBUG):
optimize_syscalls(metadata['declares'], DEBUG)
if shared.Settings.CYBERDWARF:
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE.append("cyberdwarf_Debugger")
shared.Settings.EXPORTED_FUNCTIONS.append("cyberdwarf_Debugger")
# Integrate info from backend
if shared.Settings.SIDE_MODULE:
# we don't need any JS library contents in side modules
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE = []
if metadata.get('cantValidate') and shared.Settings.ASM_JS != 2:
shared.WarningManager.warn('ALMOST_ASM', 'disabling asm.js validation due to use of non-supported features: ' + metadata['cantValidate'])
shared.Settings.ASM_JS = 2
all_funcs = shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE + [shared.JS.to_nice_ident(d) for d in metadata['declares']]
implemented_funcs = [x[1:] for x in metadata['implementedFunctions']]
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE = sorted(set(all_funcs).difference(implemented_funcs))
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [x[1:] for x in metadata['externs']]
if metadata['simd']:
shared.Settings.SIMD = 1
if shared.Settings.ASM_JS != 2:
shared.WarningManager.warn('ALMOST_ASM', 'disabling asm.js validation due to use of SIMD')
shared.Settings.ASM_JS = 2
shared.Settings.MAX_GLOBAL_ALIGN = metadata['maxGlobalAlign']
shared.Settings.IMPLEMENTED_FUNCTIONS = metadata['implementedFunctions']
# Extract the list of function signatures that MAIN_THREAD_EM_ASM blocks in
# the compiled code have, each signature will need a proxy function invoker
# generated for it.
def read_proxied_function_signatures(asmConsts):
proxied_function_signatures = set()
for _, sigs, proxying_types in asmConsts.values():
for sig, proxying_type in zip(sigs, proxying_types):
if proxying_type == 'sync_on_main_thread_':
proxied_function_signatures.add(sig + '_sync')
elif proxying_type == 'async_on_main_thread_':
proxied_function_signatures.add(sig + '_async')
return list(proxied_function_signatures)
shared.Settings.PROXIED_FUNCTION_SIGNATURES = read_proxied_function_signatures(metadata['asmConsts'])
shared.Settings.STATIC_BUMP = align_static_bump(metadata)
if shared.Settings.WASM_BACKEND:
shared.Settings.BINARYEN_FEATURES = metadata['features']
shared.Settings.WASM_TABLE_SIZE = metadata['tableSize']
if shared.Settings.RELOCATABLE:
# When building relocatable output (e.g. MAIN_MODULE) the reported table
# size does not include the reserved slot at zero for the null pointer.
# Instead we use __table_base to offset the elements by 1.
shared.Settings.WASM_TABLE_SIZE += 1
shared.Settings.MAIN_READS_PARAMS = metadata['mainReadsParams']
# static code hooks
class StaticCodeHooks:
atinits = []
atmains = []
atexits = []
def apply_static_code_hooks(code):
code = code.replace('{{{ ATINITS }}}', StaticCodeHooks.atinits)
code = code.replace('{{{ ATMAINS }}}', StaticCodeHooks.atmains)
code = code.replace('{{{ ATEXITS }}}', StaticCodeHooks.atexits)
return code
def apply_forwarded_data(forwarded_data):
forwarded_json = json.loads(forwarded_data)
# Be aware of JS static allocations
shared.Settings.STATIC_BUMP = forwarded_json['STATIC_BUMP']
shared.Settings.DYNAMICTOP_PTR = forwarded_json['DYNAMICTOP_PTR']
# Be aware of JS static code hooks
StaticCodeHooks.atinits = str(forwarded_json['ATINITS'])
StaticCodeHooks.atmains = str(forwarded_json['ATMAINS'])
StaticCodeHooks.atexits = str(forwarded_json['ATEXITS'])
def compile_settings(compiler_engine, temp_files):
# Save settings to a file to work around v8 issue 1579
with temp_files.get_file('.txt') as settings_file:
with open(settings_file, 'w') as s:
json.dump(shared.Settings.to_dict(), s, sort_keys=True)
# Call js compiler
env = os.environ.copy()
env['EMCC_BUILD_DIR'] = os.getcwd()
out = jsrun.run_js_tool(path_from_root('src', 'compiler.js'), compiler_engine,
[settings_file], stdout=subprocess.PIPE, stderr=STDERR_FILE,
cwd=path_from_root('src'), env=env)
assert '//FORWARDED_DATA:' in out, 'Did not receive forwarded data in pre output - process failed?'
glue, forwarded_data = out.split('//FORWARDED_DATA:')
apply_forwarded_data(forwarded_data)
return glue, forwarded_data
class Memory():
def __init__(self):
# Note: if RELOCATABLE, then only relative sizes can be computed, and we don't
# actually write out any absolute memory locations ({{{ STACK_BASE }}}
# does not exist, etc.)
# Memory layout:
# * first the static globals
self.global_base = shared.Settings.GLOBAL_BASE
self.static_bump = shared.Settings.STATIC_BUMP
# * then the stack (up on fastcomp, down on upstream)
self.stack_low = align_memory(self.global_base + self.static_bump)
self.stack_high = align_memory(self.stack_low + shared.Settings.TOTAL_STACK)
if shared.Settings.WASM_BACKEND:
self.stack_base = self.stack_high
self.stack_max = self.stack_low
else:
self.stack_base = self.stack_low
self.stack_max = self.stack_high
# * then dynamic memory begins
self.dynamic_base = align_memory(self.stack_high)
if self.dynamic_base >= shared.Settings.TOTAL_MEMORY:
exit_with_error('Memory is not large enough for static data (%d) plus the stack (%d), please increase TOTAL_MEMORY (%d) to at least %d' % (self.static_bump, shared.Settings.TOTAL_STACK, shared.Settings.TOTAL_MEMORY, self.dynamic_base))
def apply_memory(js):
# Apply the statically-at-compile-time computed memory locations.
memory = Memory()
# Write it all out
js = js.replace('{{{ STATIC_BUMP }}}', str(memory.static_bump))
js = js.replace('{{{ STACK_BASE }}}', str(memory.stack_base))
js = js.replace('{{{ STACK_MAX }}}', str(memory.stack_max))
js = js.replace('{{{ DYNAMIC_BASE }}}', str(memory.dynamic_base))
logger.debug('global_base: %d stack_base: %d, stack_max: %d, dynamic_base: %d, static bump: %d', memory.global_base, memory.stack_base, memory.stack_max, memory.dynamic_base, memory.static_bump)
shared.Settings.DYNAMIC_BASE = memory.dynamic_base
return js
def apply_table(js):
js = js.replace('{{{ WASM_TABLE_SIZE }}}', str(shared.Settings.WASM_TABLE_SIZE))
return js
def apply_script_source(js):
js = js.replace('{{{ TARGET_BASENAME }}}', shared.Settings.TARGET_BASENAME)
return js
def memory_and_global_initializers(pre, metadata, mem_init):
if shared.Settings.SIMD == 1:
pre = open(path_from_root(os.path.join('src', 'ecmascript_simd.js'))).read() + '\n\n' + pre
staticbump = shared.Settings.STATIC_BUMP
pthread = ''
if shared.Settings.USE_PTHREADS:
pthread = 'if (!ENVIRONMENT_IS_PTHREAD)'
global_initializers = ''
if not shared.Settings.MINIMAL_RUNTIME:
# In traditional runtime, global initializers are pushed to the __ATINIT__ array to be processed when runtime is loaded
# In MINIMAL_RUNTIME global initializers are invoked directly off of the asm[''] export object, so this does not apply.
global_initializers = global_initializer_funcs(metadata['initializers'])
if len(global_initializers) > 0:
global_initializers = ', '.join('{ func: function() { %s() } }' % i for i in global_initializers)
global_initializers = '/* global initializers */ {pthread} __ATINIT__.push({global_initializers});'.format(pthread=pthread, global_initializers=global_initializers)
else:
global_initializers = '/* global initializers */ /*__ATINIT__.push();*/'
pre = pre.replace('STATICTOP = STATIC_BASE + 0;', '''\
STATICTOP = STATIC_BASE + {staticbump};
{global_initializers}
{mem_init}'''.format(staticbump=staticbump,
global_initializers=global_initializers,
mem_init=mem_init))
if shared.Settings.SIDE_MODULE:
pre = pre.replace('GLOBAL_BASE', 'gb')
pre = apply_memory(pre)
pre = apply_static_code_hooks(pre)
return pre
def get_js_funcs(pre, funcs):
funcs_js = [funcs]
parts = pre.split('// ASM_LIBRARY FUNCTIONS\n')
if len(parts) > 1:
pre = parts[0]
funcs_js.append(parts[1])
return pre, funcs_js
def get_all_exported_functions(function_table_data):
# both asm.js and otherwise
all_exported_functions = set(shared.Settings.EXPORTED_FUNCTIONS)
# additional functions to export from asm, if they are implemented
for additional_export in shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE:
all_exported_functions.add('_' + additional_export)
if shared.Settings.EXPORT_FUNCTION_TABLES:
for table in function_table_data.values():
for func in table.split('[')[1].split(']')[0].split(','):
if func[0] == '_':
all_exported_functions.add(func)
return all_exported_functions
def get_all_implemented(forwarded_json, metadata):
return set(metadata['implementedFunctions']).union(forwarded_json['Functions']['implementedFunctions'])
def report_missing_symbols(all_implemented, pre):
# we are not checking anyway, so just skip this
if not shared.Settings.ERROR_ON_UNDEFINED_SYMBOLS and not shared.Settings.WARN_ON_UNDEFINED_SYMBOLS:
return
# the initial list of missing functions are that the user explicitly exported
# but were not implemented in compiled code
missing = list(set(shared.Settings.USER_EXPORTED_FUNCTIONS) - all_implemented)
for requested in missing:
if ('function ' + asstr(requested)) in pre:
continue
# special-case malloc, EXPORTED by default for internal use, but we bake in a
# trivial allocator and warn at runtime if used in ASSERTIONS
if missing == '_malloc':
continue
if shared.Settings.ERROR_ON_UNDEFINED_SYMBOLS:
exit_with_error('undefined exported function: "%s"', requested)
elif shared.Settings.WARN_ON_UNDEFINED_SYMBOLS:
shared.warning('undefined exported function: "%s"', requested)
def get_exported_implemented_functions(all_exported_functions, all_implemented, metadata):
funcs = set(metadata['exports'])
export_bindings = shared.Settings.EXPORT_BINDINGS
export_all = shared.Settings.EXPORT_ALL
for key in all_implemented:
if key in all_exported_functions or export_all or (export_bindings and key.startswith('_emscripten_bind')):
funcs.add(key)
if not export_all:
for name, alias in metadata['aliases'].items():
# here we export the aliases,
# if not the side module (which imports the alias)
# will not be able to get to the actual implementation
if alias in all_implemented and name in all_exported_functions:
funcs.add(alias)
funcs = list(funcs) + global_initializer_funcs(metadata['initializers'])
if shared.Settings.ALLOW_MEMORY_GROWTH:
funcs.append('_emscripten_replace_memory')
if not shared.Settings.SIDE_MODULE and not shared.Settings.MINIMAL_RUNTIME:
funcs += ['stackAlloc', 'stackSave', 'stackRestore']
if shared.Settings.USE_PTHREADS:
funcs += ['establishStackSpace']
if shared.Settings.EMTERPRETIFY:
funcs += ['emterpret']
if shared.Settings.EMTERPRETIFY_ASYNC:
funcs += ['setAsyncState', 'emtStackSave', 'emtStackRestore', 'getEmtStackMax', 'setEmtStackMax']
return sorted(set(funcs))
def get_implemented_functions(metadata):
return set(metadata['implementedFunctions'])
def proxy_debug_print(sync):
if shared.Settings.PTHREADS_DEBUG:
if sync:
return 'warnOnce("sync proxying function " + code);'
else:
return 'warnOnce("async proxying function " + code);'
return ''
def include_asm_consts(pre, forwarded_json, metadata):
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
if metadata['asmConsts']:
exit_with_error('EM_ASM is not yet supported in shared wasm module (it cannot be stored in the wasm itself, need some solution)')
asm_consts, all_sigs = all_asm_consts(metadata)
asm_const_funcs = []
for sig, call_type in all_sigs:
if 'j' in sig:
exit_with_error('emscript: EM_ASM should not receive i64s as inputs, they are not valid in JS')
if '_emscripten_asm_const_' + call_type + sig in forwarded_json['Functions']['libraryFunctions']:
continue # Only one invoker needs to be emitted for each ASM_CONST (signature x call_type) item
forwarded_json['Functions']['libraryFunctions']['_emscripten_asm_const_' + call_type + sig] = 1
args = ['a%d' % i for i in range(len(sig) - 1)]
all_args = ['code'] + args
pre_asm_const = ''
if shared.Settings.USE_PTHREADS:
sync_proxy = call_type == 'sync_on_main_thread_'
async_proxy = call_type == 'async_on_main_thread_'
proxied = sync_proxy or async_proxy
if proxied:
# In proxied function calls, positive integers 1, 2, 3, ... denote pointers
# to regular C compiled functions. Negative integers -1, -2, -3, ... denote
# indices to EM_ASM() blocks, so remap the EM_ASM() indices from 0, 1, 2,
# ... over to the negative integers starting at -1.
proxy_args = ['-1 - code', str(int(sync_proxy))] + args
pre_asm_const += ' if (ENVIRONMENT_IS_PTHREAD) { ' + proxy_debug_print(sync_proxy) + 'return _emscripten_proxy_to_main_thread_js(' + ', '.join(proxy_args) + '); }\n'
if shared.Settings.EMTERPRETIFY_ASYNC and shared.Settings.ASSERTIONS:
# we cannot have an EM_ASM on the stack when saving/loading
pre_asm_const += " assert(typeof EmterpreterAsync !== 'object' || EmterpreterAsync.state !== 2, 'cannot have an EM_ASM on the stack when emterpreter pauses/resumes - the JS is not emterpreted, so we would end up running it again from the start');\n"
asm_const_funcs.append(r'''
function _emscripten_asm_const_%s(%s) {
%s return ASM_CONSTS[code](%s);
}''' % (call_type + asstr(sig), ', '.join(all_args), pre_asm_const, ', '.join(args)))
asm_consts_text = '\nvar ASM_CONSTS = [' + ',\n '.join(asm_consts) + '];\n'
asm_funcs_text = '\n'.join(asm_const_funcs) + '\n'
em_js_funcs = create_em_js(forwarded_json, metadata)
em_js_text = '\n'.join(em_js_funcs) + '\n'
body_marker = '// === Body ==='
return pre.replace(body_marker, body_marker + '\n' + asm_consts_text + asstr(asm_funcs_text) + em_js_text)
# Test if the parentheses at body[openIdx] and body[closeIdx] are a match to
# each other.
def parentheses_match(body, openIdx, closeIdx):
if closeIdx < 0:
closeIdx += len(body)
count = 1
for i in range(openIdx + 1, closeIdx + 1):
if body[i] == body[openIdx]:
count += 1
elif body[i] == body[closeIdx]:
count -= 1
if count <= 0:
return i == closeIdx
return False
def trim_asm_const_body(body):
body = body.strip()
orig = None
while orig != body:
orig = body
if len(body) > 1 and body[0] == '"' and body[-1] == '"':
body = body[1:-1].replace('\\"', '"').strip()
if len(body) > 1 and body[0] == '{' and body[-1] == '}' and parentheses_match(body, 0, -1):
body = body[1:-1].strip()
if len(body) > 1 and body[0] == '(' and body[-1] == ')' and parentheses_match(body, 0, -1):
body = body[1:-1].strip()
return body
def all_asm_consts(metadata):
asm_consts = [0] * len(metadata['asmConsts'])
all_sigs = []
for k, v in metadata['asmConsts'].items():
const, sigs, call_types = v
const = asstr(const)
const = trim_asm_const_body(const)
const = '{ ' + const + ' }'
args = []
arity = max(len(s) for s in sigs) - 1
for i in range(arity):
args.append('$' + str(i))
const = 'function(' + ', '.join(args) + ') ' + const
asm_consts[int(k)] = const
assert(len(sigs) == len(call_types))
for sig, call_type in zip(sigs, call_types):
all_sigs.append((sig, call_type))
return asm_consts, all_sigs
def unfloat(s):
"""lower float to double for ffis"""
return 'd' if s == 'f' else s
def make_function_tables_defs(implemented_functions, all_implemented, function_table_data, metadata):
class Counter(object):
next_bad_item = 0
next_item = 0
pre = []
in_table = set()
debug_tables = {}
def make_params(sig):
return ','.join('p%d' % p for p in range(len(sig) - 1))
def make_coerced_params(sig):
return ','.join(shared.JS.make_coercion('p%d', unfloat(sig[p + 1])) % p for p in range(len(sig) - 1))
def make_coercions(sig):
return ';'.join('p%d = %s' % (p, shared.JS.make_coercion('p%d' % p, sig[p + 1])) for p in range(len(sig) - 1)) + ';'
# when emulating function pointer casts, we need to know what is the target of each pointer
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM:
function_pointer_targets = {}
for sig, table in function_table_data.items():
start = table.index('[')
end = table.rindex(']')
body = table[start + 1:end].split(',')
for i, parsed in enumerate(x.strip() for x in body):
if parsed != '0':
assert i not in function_pointer_targets
function_pointer_targets[i] = [sig, str(parsed)]
def make_table(sig, raw):
if '[]' in raw:
return ('', '') # empty table
params = make_params(sig)
coerced_params = make_coerced_params(sig)
coercions = make_coercions(sig)
def make_bad(target=None):
i = Counter.next_bad_item
Counter.next_bad_item += 1
if target is None:
target = i
name = 'b' + str(i)
if not shared.Settings.ASSERTIONS:
if 'abort' in shared.Settings.RUNTIME_FUNCS_TO_IMPORT:
code = 'abort(%s);' % target
else:
# Advanced use: developers is generating code that does not include the function 'abort()'. Generate invalid
# function pointers to be no-op passthroughs that silently continue execution.
code = '\n/*execution is supposed to abort here, but you did not include "abort" in RUNTIME_FUNCS_TO_IMPORT (to save code size?). Silently trucking through, enjoy :)*/\n'
else:
code = 'nullFunc_' + sig + '(%d);' % target
if sig[0] != 'v':
code += 'return %s' % shared.JS.make_initializer(sig[0]) + ';'
return name, make_func(name, code, params, coercions)
bad, bad_func = make_bad() # the default bad func
if shared.Settings.ASSERTIONS <= 1:
Counter.pre = [bad_func]
else:
Counter.pre = []
start = raw.index('[')
end = raw.rindex(']')
body = raw[start + 1:end].split(',')
if shared.Settings.EMULATED_FUNCTION_POINTERS:
def receive(item):
if item == '0':
return item
if item not in all_implemented:
# this is not implemented; it would normally be wrapped, but with emulation, we just use it directly outside
return item
in_table.add(item)
return "asm['" + item + "']"
body = [receive(b) for b in body]
for j in range(shared.Settings.RESERVED_FUNCTION_POINTERS):
curr = 'jsCall_%s_%s' % (sig, j)
body[1 + j] = curr
implemented_functions.add(curr)
Counter.next_item = 0
def fix_item(item):
j = Counter.next_item
Counter.next_item += 1
newline = Counter.next_item % 30 == 29
if item == '0':
# emulate all non-null pointer calls, if asked to
if j > 0 and shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM and j in function_pointer_targets:
proper_sig, proper_target = function_pointer_targets[j]
if shared.Settings.EMULATED_FUNCTION_POINTERS:
if proper_target in all_implemented:
proper_target = "asm['" + proper_target + "']"
def make_emulated_param(i):
if i >= len(sig):
return shared.JS.make_initializer(proper_sig[i]) # extra param, just send a zero
return shared.JS.make_coercion('p%d' % (i - 1), proper_sig[i], convert_from=sig[i])
proper_code = proper_target + '(' + ','.join([make_emulated_param(i + 1) for i in range(len(proper_sig) - 1)]) + ')'
if proper_sig[0] != 'v':
# proper sig has a return, which the wrapper may or may not use
proper_code = shared.JS.make_coercion(proper_code, proper_sig[0])
if proper_sig[0] != sig[0]:
# first coercion ensured we call the target ok; this one ensures we return the right type in the wrapper
proper_code = shared.JS.make_coercion(proper_code, sig[0], convert_from=proper_sig[0])
if sig[0] != 'v':
proper_code = 'return ' + proper_code
else:
# proper sig has no return, we may need a fake return
if sig[0] != 'v':
proper_code = 'return ' + shared.JS.make_initializer(sig[0])
name = 'fpemu_%s_%d' % (sig, j)
wrapper = make_func(name, proper_code, params, coercions)
Counter.pre.append(wrapper)
return name if not newline else (name + '\n')
if shared.Settings.ASSERTIONS <= 1:
return bad if not newline else (bad + '\n')
specific_bad, specific_bad_func = make_bad(j)
Counter.pre.append(specific_bad_func)
return specific_bad if not newline else (specific_bad + '\n')
clean_item = item.replace("asm['", '').replace("']", '')
# when emulating function pointers, we don't need wrappers
# but if relocating, then we also have the copies in-module, and do
# in wasm we never need wrappers though
if clean_item not in implemented_functions and not (shared.Settings.EMULATED_FUNCTION_POINTERS and not shared.Settings.RELOCATABLE) and not shared.Settings.WASM:
# this is imported into asm, we must wrap it
call_ident = clean_item
if call_ident in metadata['redirects']:
call_ident = metadata['redirects'][call_ident]
if not call_ident.startswith('_') and not call_ident.startswith('Math_'):
call_ident = '_' + call_ident
code = call_ident + '(' + coerced_params + ')'
if sig[0] != 'v':
# ffis cannot return float
if sig[0] == 'f':
code = '+' + code
code = 'return ' + shared.JS.make_coercion(code, sig[0])
code += ';'
Counter.pre.append(make_func(clean_item + '__wrapper', code, params, coercions))
assert not sig == 'X', 'must know the signature in order to create a wrapper for "%s" (TODO for shared wasm modules)' % item
return clean_item + '__wrapper'
return item if not newline else (item + '\n')
if shared.Settings.ASSERTIONS >= 2:
debug_tables[sig] = body
body = ','.join(fix_item(b) for b in body)
return ('\n'.join(Counter.pre), ''.join([raw[:start + 1], body, raw[end:]]))
infos = [make_table(sig, raw) for sig, raw in function_table_data.items()]
Counter.pre = []
function_tables_defs = '\n'.join([info[0] for info in infos]) + '\n'
function_tables_defs += '\n// EMSCRIPTEN_END_FUNCS\n'
function_tables_defs += '\n'.join([info[1] for info in infos])
return in_table, debug_tables, function_tables_defs
def make_func(name, code, params, coercions):
return 'function %s(%s) {\n %s %s\n}' % (name, params, coercions, code)
def math_fix(g):
return g if not g.startswith('Math_') else g.split('_')[1]
# asm.js function tables have one table in each linked asm.js module, so we
# can't just dynCall into them - ftCall exists for that purpose. In wasm,
# even linked modules share the table, so it's all fine.
def asm_js_emulated_function_pointers():
return shared.Settings.EMULATED_FUNCTION_POINTERS and not shared.Settings.WASM
def make_function_tables_impls(function_table_data):
function_tables_impls = []
for sig, table in function_table_data.items():
args = ','.join(['a' + str(i) for i in range(1, len(sig))])
arg_coercions = ' '.join(['a' + str(i) + '=' + shared.JS.make_coercion('a' + str(i), sig[i]) + ';' for i in range(1, len(sig))])
coerced_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i]) for i in range(1, len(sig))])
sig_mask = str(table.count(','))
if not (shared.Settings.WASM and shared.Settings.EMULATED_FUNCTION_POINTERS):
ret = 'FUNCTION_TABLE_%s[index&%s](%s)' % (sig, sig_mask, coerced_args)
else:
# for wasm with emulated function pointers, emit an mft_SIG(..) call, we avoid asm.js function tables there.
ret = 'mftCall_%s(index%s%s)' % (sig, ',' if len(sig) > 1 else '', coerced_args)
ret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion(ret, sig[0])
if not asm_js_emulated_function_pointers():
function_tables_impls.append('''
function dynCall_%s(index%s%s) {
index = index|0;
%s
%s;
}
''' % (sig, ',' if len(sig) > 1 else '', args, arg_coercions, ret))
else:
function_tables_impls.append('''
var dynCall_%s = ftCall_%s;
''' % (sig, sig))
ffi_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i], ffi_arg=True) for i in range(1, len(sig))])
for i in range(shared.Settings.RESERVED_FUNCTION_POINTERS):
jsret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('jsCall_%s(%d%s%s)' % (sig, i, ',' if ffi_args else '', ffi_args), sig[0], ffi_result=True)
function_tables_impls.append('''
function jsCall_%s_%s(%s) {
%s
%s;
}
''' % (sig, i, args, arg_coercions, jsret))
return function_tables_impls
def create_mftCall_funcs(function_table_data):
if not asm_js_emulated_function_pointers():
return []
if shared.Settings.WASM or not shared.Settings.RELOCATABLE:
return []
mftCall_funcs = []
# in wasm, emulated function pointers are just simple table calls
for sig, table in function_table_data.items():
return_type, sig_args = sig[0], sig[1:]
num_args = len(sig_args)
params = ','.join(['ptr'] + ['p%d' % i for i in range(num_args)])
coerced_params = ','.join([shared.JS.make_coercion('ptr', 'i')] + [shared.JS.make_coercion('p%d' % i, unfloat(sig_args[i])) for i in range(num_args)])
coercions = ';'.join(['ptr = ptr | 0'] + ['p%d = %s' % (i, shared.JS.make_coercion('p%d' % i, unfloat(sig_args[i]))) for i in range(num_args)]) + ';'
mini_coerced_params = ','.join([shared.JS.make_coercion('p%d' % i, sig_args[i]) for i in range(num_args)])
maybe_return = '' if return_type == 'v' else 'return'
final_return = maybe_return + ' ' + shared.JS.make_coercion('ftCall_' + sig + '(' + coerced_params + ')', unfloat(return_type)) + ';'
if shared.Settings.EMULATED_FUNCTION_POINTERS == 1:
body = final_return
else:
sig_mask = str(table.count(','))
body = ('if (((ptr|0) >= (fb|0)) & ((ptr|0) < (fb + ' + sig_mask + ' | 0))) { ' + maybe_return + ' ' +
shared.JS.make_coercion(
'FUNCTION_TABLE_' + sig + '[(ptr-fb)&' + sig_mask + '](' +
mini_coerced_params + ')', return_type, ffi_arg=True
) + '; ' + ('return;' if return_type == 'v' else '') + ' }' + final_return)
mftCall_funcs.append(make_func('mftCall_' + sig, body, params, coercions) + '\n')
return mftCall_funcs
def get_function_pointer_error(sig, function_table_sigs):
if shared.Settings.ASSERTIONS == 0:
# Release build: do the most minimal sized abort possible
return "abort();"
else:
# ASSERTIONS-enabled build, identify the pointer and the failing signature.
return "abortFnPtrError(x, '" + sig + "');"
def signature_sort_key(sig):
def closure(other):
ret = 0
minlen = min(len(other), len(sig))
maxlen = min(len(other), len(sig))
if other.startswith(sig) or sig.startswith(other):
ret -= 1000 # prioritize prefixes, could be dropped params
ret -= 133 * difflib.SequenceMatcher(a=other, b=sig).ratio() # prioritize on diff similarity
ret += 15 * abs(len(other) - len(sig)) / float(maxlen) # deprioritize the bigger the length difference is
for i in range(minlen):
if other[i] == sig[i]:
ret -= 5 / float(maxlen) # prioritize on identically-placed params
ret += 20 * len(other) # deprioritize on length
return ret
return closure
def asm_backend_uses(metadata, symbol):
# If doing dynamic linking, we should generate full set of runtime primitives, since we cannot know up front ahead
# of time what the dynamically linked in modules will need. Also with SAFE_HEAP and Emterpretify, generate full set of views.
if shared.Settings.MAIN_MODULE or shared.Settings.SIDE_MODULE or shared.Settings.SAFE_HEAP or shared.Settings.EMTERPRETIFY:
return True
# Allow querying asm_backend_uses(metadata, 'Math.') to find if any of the Math objects are used
if symbol.endswith('.'):
return any(e.startswith(symbol) for e in metadata['externUses'])
else:
# Querying a single symbol
return symbol in metadata['externUses']
def create_asm_global_funcs(bg_funcs, metadata):
maths = ['Math.' + func for func in ['floor', 'abs', 'sqrt', 'pow', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'atan2', 'exp', 'log', 'ceil', 'imul', 'min', 'max', 'clz32']]
if provide_fround():
maths += ['Math.fround']
asm_global_funcs = ''
for math in maths:
if asm_backend_uses(metadata, math):
asm_global_funcs += ' var ' + math.replace('.', '_') + '=global' + access_quote(math) + ';\n'
asm_global_funcs += ''.join([' var ' + unminified + '=env' + access_quote(math_fix(minified)) + ';\n' for (minified, unminified) in bg_funcs])
asm_global_funcs += global_simd_funcs(access_quote, metadata)
if shared.Settings.USE_PTHREADS:
asm_global_funcs += ''.join([' var Atomics_' + ty + '=global' + access_quote('Atomics') + access_quote(ty) + ';\n' for ty in ['load', 'store', 'exchange', 'compareExchange', 'add', 'sub', 'and', 'or', 'xor']])
return asm_global_funcs
def create_asm_global_vars(bg_vars):
asm_global_vars = ''.join([' var ' + unminified + '=env' + access_quote(minified) + '|0;\n' for (minified, unminified) in bg_vars])
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
# wasm side modules internally define their stack, these are set at module startup time
asm_global_vars += '\n var STACKTOP = 0, STACK_MAX = 0;\n'
return asm_global_vars
def global_simd_funcs(access_quote, metadata):
# Always import SIMD when building with -s SIMD=1, since in that mode memcpy is SIMD optimized.
if not (metadata['simd'] or shared.Settings.SIMD):
return ''
def string_contains_any(s, str_list):
return any(sub in s for sub in str_list)
nonexisting_simd_symbols = ['Int8x16_fromInt8x16', 'Uint8x16_fromUint8x16', 'Int16x8_fromInt16x8', 'Uint16x8_fromUint16x8', 'Int32x4_fromInt32x4', 'Uint32x4_fromUint32x4', 'Float32x4_fromFloat32x4', 'Float64x2_fromFloat64x2']
nonexisting_simd_symbols += ['Int32x4_addSaturate', 'Int32x4_subSaturate', 'Uint32x4_addSaturate', 'Uint32x4_subSaturate']
nonexisting_simd_symbols += [(x + '_' + y) for x in ['Int8x16', 'Uint8x16', 'Int16x8', 'Uint16x8', 'Float64x2'] for y in ['load2', 'store2']]
nonexisting_simd_symbols += [(x + '_' + y) for x in ['Int8x16', 'Uint8x16', 'Int16x8', 'Uint16x8'] for y in ['load1', 'store1']]
simd = make_simd_types(metadata)
simd_func_text = ''
simd_func_text += ''.join([' var SIMD_' + ty + '=global' + access_quote('SIMD') + access_quote(ty) + ';\n' for ty in simd['types']])
def generate_symbols(types, funcs):
symbols = [' var SIMD_' + ty + '_' + g + '=SIMD_' + ty + access_quote(g) + ';\n' for ty in types for g in funcs]
symbols = [x for x in symbols if not string_contains_any(x, nonexisting_simd_symbols)]
return ''.join(symbols)
simd_func_text += generate_symbols(simd['int_types'], simd['int_funcs'])
simd_func_text += generate_symbols(simd['float_types'], simd['float_funcs'])
simd_func_text += generate_symbols(simd['bool_types'], simd['bool_funcs'])
# SIMD conversions (not bitcasts) between same lane sizes:
def add_simd_cast(dst, src):
return ' var SIMD_' + dst + '_from' + src + '=SIMD_' + dst + '.from' + src + ';\n'
def add_simd_casts(t1, t2):
return add_simd_cast(t1, t2) + add_simd_cast(t2, t1)
# Bug: Skip importing conversions for int<->uint for now, they don't validate
# as asm.js. https://bugzilla.mozilla.org/show_bug.cgi?id=1313512
# This is not an issue when building SSEx code, because it doesn't use these.
# (but it will be an issue if using SIMD.js intrinsics from vector.h to
# explicitly call these)
# if metadata['simdInt8x16'] and metadata['simdUint8x16']:
# simd_func_text += add_simd_casts('Int8x16', 'Uint8x16')
# if metadata['simdInt16x8'] and metadata['simdUint16x8']:
# simd_func_text += add_simd_casts('Int16x8', 'Uint16x8')
# if metadata['simdInt32x4'] and metadata['simdUint32x4']:
# simd_func_text += add_simd_casts('Int32x4', 'Uint32x4')
if metadata['simdInt32x4'] and metadata['simdFloat32x4']:
simd_func_text += add_simd_casts('Int32x4', 'Float32x4')
if metadata['simdUint32x4'] and metadata['simdFloat32x4']:
simd_func_text += add_simd_casts('Uint32x4', 'Float32x4')
if metadata['simdInt32x4'] and metadata['simdFloat64x2']:
simd_func_text += add_simd_cast('Int32x4', 'Float64x2') # Unofficial, needed for emscripten_int32x4_fromFloat64x2
if metadata['simdUint32x4'] and metadata['simdFloat64x2']:
simd_func_text += add_simd_cast('Uint32x4', 'Float64x2') # Unofficial, needed for emscripten_uint32x4_fromFloat64x2
# Unofficial, Bool64x2 does not yet exist, but needed for Float64x2 comparisons.
if metadata['simdFloat64x2']:
simd_func_text += ' var SIMD_Int32x4_fromBool64x2Bits = global.SIMD.Int32x4.fromBool64x2Bits;\n'
return simd_func_text
def make_simd_types(metadata):
simd_float_types = []
simd_int_types = []
simd_bool_types = []
simd_funcs = ['splat', 'check', 'extractLane', 'replaceLane']
simd_intfloat_funcs = ['add', 'sub', 'neg', 'mul',
'equal', 'lessThan', 'greaterThan',
'notEqual', 'lessThanOrEqual', 'greaterThanOrEqual',
'select', 'swizzle', 'shuffle',
'load', 'store', 'load1', 'store1', 'load2', 'store2']
simd_intbool_funcs = ['and', 'xor', 'or', 'not']
if metadata['simdUint8x16']:
simd_int_types += ['Uint8x16']
simd_intfloat_funcs += ['fromUint8x16Bits']
if metadata['simdInt8x16']:
simd_int_types += ['Int8x16']
simd_intfloat_funcs += ['fromInt8x16Bits']
if metadata['simdUint16x8']:
simd_int_types += ['Uint16x8']
simd_intfloat_funcs += ['fromUint16x8Bits']
if metadata['simdInt16x8']:
simd_int_types += ['Int16x8']
simd_intfloat_funcs += ['fromInt16x8Bits']
if metadata['simdUint32x4']:
simd_int_types += ['Uint32x4']
simd_intfloat_funcs += ['fromUint32x4Bits']
if metadata['simdInt32x4'] or shared.Settings.SIMD:
# Always import Int32x4 when building with -s SIMD=1, since memcpy is SIMD optimized.
simd_int_types += ['Int32x4']
simd_intfloat_funcs += ['fromInt32x4Bits']
if metadata['simdFloat32x4']:
simd_float_types += ['Float32x4']
simd_intfloat_funcs += ['fromFloat32x4Bits']
if metadata['simdFloat64x2']:
simd_float_types += ['Float64x2']
simd_intfloat_funcs += ['fromFloat64x2Bits']
if metadata['simdBool8x16']:
simd_bool_types += ['Bool8x16']
if metadata['simdBool16x8']:
simd_bool_types += ['Bool16x8']
if metadata['simdBool32x4']:
simd_bool_types += ['Bool32x4']
if metadata['simdBool64x2']:
simd_bool_types += ['Bool64x2']
simd_float_funcs = simd_funcs + simd_intfloat_funcs + ['div', 'min', 'max', 'minNum', 'maxNum', 'sqrt',
'abs', 'reciprocalApproximation', 'reciprocalSqrtApproximation']
simd_int_funcs = simd_funcs + simd_intfloat_funcs + simd_intbool_funcs + ['shiftLeftByScalar', 'shiftRightByScalar', 'addSaturate', 'subSaturate']
simd_bool_funcs = simd_funcs + simd_intbool_funcs + ['anyTrue', 'allTrue']
simd_types = simd_float_types + simd_int_types + simd_bool_types
return {
'types': simd_types,
'float_types': simd_float_types,
'int_types': simd_int_types,
'bool_types': simd_bool_types,
'funcs': simd_funcs,
'float_funcs': simd_float_funcs,
'int_funcs': simd_int_funcs,
'bool_funcs': simd_bool_funcs,
'intfloat_funcs': simd_intfloat_funcs,
'intbool_funcs': simd_intbool_funcs,
}
def asm_safe_heap():
"""optimized safe heap in asm, when we can"""
return shared.Settings.SAFE_HEAP and not shared.Settings.SAFE_HEAP_LOG and not shared.Settings.RELOCATABLE
def provide_fround():
return shared.Settings.PRECISE_F32 or shared.Settings.SIMD
def create_asm_setup(debug_tables, function_table_data, invoke_function_names, metadata):
function_table_sigs = function_table_data.keys()
asm_setup = ''
if shared.Settings.ASSERTIONS >= 2:
debug_tables_map = 'var debug_tables = {\n'
for sig in function_table_data:
# if the table is empty, debug_tables will not contain it
body = debug_tables.get(sig, [])
asm_setup += 'var debug_table_' + sig + ' = [' + ','.join(['0' if x == '0' else "'" + x.replace("'", '"') + "'" for x in body]) + '];\n'
debug_tables_map += " '" + sig + "': debug_table_" + sig + ',\n'
asm_setup += debug_tables_map + '};\n'
if shared.Settings.ASSERTIONS:
for sig in function_table_sigs:
asm_setup += 'function nullFunc_' + sig + '(x) { ' + get_function_pointer_error(sig, function_table_sigs) + ' }\n'
if shared.Settings.RELOCATABLE:
if not shared.Settings.SIDE_MODULE:
asm_setup += 'var gb = GLOBAL_BASE, fb = 0;\n'
side = 'parent' if shared.Settings.SIDE_MODULE else ''
def check(extern):
if shared.Settings.ASSERTIONS:
return ('\n assert(%sModule["%s"] || %s, "external symbol `%s` is missing.' % (side, extern, extern, extern) +
'perhaps a side module was not linked in? if this symbol was expected to arrive '
'from a system library, try to build the MAIN_MODULE with '
'EMCC_FORCE_STDLIBS=1 in the environment");')
return ''
for extern in metadata['externs']:
asm_setup += 'var g$' + extern + ' = function() {' + check(extern) + '\n return ' + side + 'Module["' + extern + '"];\n}\n'
for extern in metadata['externFunctions']:
barename, sig = extern.split('$')
fullname = "fp$" + extern
key = '%sModule["%s"]' % (side, fullname)
asm_setup += '''\
var %s = function() {
if (!%s) { %s
var fid = addFunction(%sModule["%s"] || %s, "%s");
%s = fid;
}
return %s;
}
''' % (fullname, key, check(barename), side, barename, barename, sig, key, key)
asm_setup += create_invoke_wrappers(invoke_function_names)
asm_setup += setup_function_pointers(function_table_sigs)
if shared.Settings.EMULATED_FUNCTION_POINTERS:
function_tables_impls = make_function_tables_impls(function_table_data)
asm_setup += '\n' + '\n'.join(function_tables_impls) + '\n'
return asm_setup
def setup_function_pointers(function_table_sigs):
asm_setup = ''
for sig in function_table_sigs:
if shared.Settings.RESERVED_FUNCTION_POINTERS:
asm_setup += '\n' + shared.JS.make_jscall(sig) + '\n'
# nothing special to do here for wasm, we just use dynCalls
if not shared.Settings.WASM:
if shared.Settings.EMULATED_FUNCTION_POINTERS:
args = ['a%d' % i for i in range(len(sig) - 1)]
full_args = ['x'] + args
table_access = 'FUNCTION_TABLE_' + sig
if shared.Settings.SIDE_MODULE:
table_access = 'parentModule["' + table_access + '"]' # side module tables were merged into the parent, we need to access the global one
table_read = table_access + '[x]'
prelude = ''
if shared.Settings.ASSERTIONS:
prelude = '''
if (x < 0 || x >= %s.length) { err("Function table mask error (out of range)"); %s ; abort(x) }''' % (table_access, get_function_pointer_error(sig, function_table_sigs))
asm_setup += '''
function ftCall_%s(%s) {%s
return %s(%s);
}
''' % (sig, ', '.join(full_args), prelude, table_read, ', '.join(args))
return asm_setup
def create_basic_funcs(function_table_sigs, invoke_function_names):
basic_funcs = shared.Settings.RUNTIME_FUNCS_TO_IMPORT
if shared.Settings.STACK_OVERFLOW_CHECK and not shared.Settings.MINIMAL_RUNTIME:
basic_funcs += ['abortStackOverflow']
if shared.Settings.EMTERPRETIFY:
basic_funcs += ['abortStackOverflowEmterpreter']
if shared.Settings.SAFE_HEAP:
if asm_safe_heap():
basic_funcs += ['segfault', 'alignfault', 'ftfault']
else:
# Binaryen generates calls to these two so they are always needed with wasm
if shared.Settings.WASM:
basic_funcs += ['segfault', 'alignfault']
basic_funcs += ['SAFE_HEAP_LOAD', 'SAFE_HEAP_LOAD_D', 'SAFE_HEAP_STORE', 'SAFE_HEAP_STORE_D', 'SAFE_FT_MASK']
if shared.Settings.ASSERTIONS:
for sig in function_table_sigs:
basic_funcs += ['nullFunc_' + sig]
basic_funcs += invoke_function_names
for sig in function_table_sigs:
if shared.Settings.RESERVED_FUNCTION_POINTERS:
basic_funcs.append('jsCall_%s' % sig)
if asm_js_emulated_function_pointers():
basic_funcs.append('ftCall_%s' % sig)
return basic_funcs
def create_basic_vars(exported_implemented_functions, forwarded_json, metadata):
basic_vars = []
if 'tempDoublePtr' in shared.Settings.ASM_PRIMITIVE_VARS:
basic_vars += ['tempDoublePtr']
if shared.Settings.RELOCATABLE:
if not (shared.Settings.WASM and shared.Settings.SIDE_MODULE):
basic_vars += ['gb', 'fb', 'STACKTOP', 'STACK_MAX']
else:
# wasm side modules have a specific convention for these
basic_vars += ['__memory_base', '__table_base']
if shared.Settings.EMTERPRETIFY:
basic_vars += ['EMTSTACKTOP', 'EMT_STACK_MAX', 'eb']
return basic_vars
def create_exports(exported_implemented_functions, in_table, function_table_data, metadata):
asm_runtime_funcs = create_asm_runtime_funcs()
all_exported = exported_implemented_functions + asm_runtime_funcs + function_tables(function_table_data)
# In asm.js + emulated function pointers, export all the table because we use
# JS to add the asm.js module's functions to the table (which is external
# in this mode). In wasm, we don't need that since wasm modules can
# directly add functions to the imported Table.
if not shared.Settings.WASM and shared.Settings.EMULATED_FUNCTION_POINTERS:
all_exported += in_table
exports = []
for export in sorted(set(all_exported)):
exports.append(quote(export) + ": " + export)
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
# named globals in side wasm modules are exported globals from asm/wasm
for k, v in metadata['namedGlobals'].items():
exports.append(quote('_' + str(k)) + ': ' + str(v))
# aliases become additional exports
for k, v in metadata['aliases'].items():
exports.append(quote(str(k)) + ': ' + str(v))
# shared wasm emulated function pointer mode requires us to know the function pointer for
# each function. export fp$func => function pointer for func
if shared.Settings.WASM and shared.Settings.RELOCATABLE and shared.Settings.EMULATE_FUNCTION_POINTER_CASTS:
for k, v in metadata['functionPointers'].items():
exports.append(quote('fp$' + str(k)) + ': ' + str(v))
return '{ ' + ', '.join(exports) + ' }'
def create_asm_runtime_funcs():
funcs = []
if not (shared.Settings.WASM and shared.Settings.SIDE_MODULE) and not shared.Settings.MINIMAL_RUNTIME:
funcs += ['stackAlloc', 'stackSave', 'stackRestore']
if shared.Settings.USE_PTHREADS:
funcs += ['establishStackSpace']
return funcs
def function_tables(function_table_data):
if not asm_js_emulated_function_pointers():
return ['dynCall_' + table for table in function_table_data]
else:
return []
def create_the_global(metadata):
# the global is only needed for asm.js
if shared.Settings.WASM:
return '{}'
fundamentals = []
if asm_backend_uses(metadata, 'Math.'):
fundamentals += ['Math']
for f in ['Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array', 'NaN', 'Infinity']:
if asm_backend_uses(metadata, f):
fundamentals += [f]
if metadata['simd'] or shared.Settings.SIMD:
# Always import SIMD when building with -s SIMD=1, since in that mode memcpy is SIMD optimized.
fundamentals += ['SIMD']
return '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in fundamentals]) + ' }'
RUNTIME_ASSERTIONS = '''
assert(runtimeInitialized, 'you need to wait for the runtime to be ready (e.g. wait for main() to be called)');
assert(!runtimeExited, 'the runtime was exited (use NO_EXIT_RUNTIME to keep it alive after main() exits)');'''
def create_receiving(function_table_data, function_tables_defs, exported_implemented_functions, initializers):
receiving = ''
if not shared.Settings.ASSERTIONS or shared.Settings.MINIMAL_RUNTIME:
runtime_assertions = ''
else:
runtime_assertions = RUNTIME_ASSERTIONS
# assert on the runtime being in a valid state when calling into compiled code. The only exceptions are some support code.
# WASM=1 already inserts runtime assertions, so no need to do it again here (see create_receiving_wasm)
if not shared.Settings.WASM:
receiving_functions = [f for f in exported_implemented_functions if f not in ('_memcpy', '_memset', '_emscripten_replace_memory', '__start_module')]
wrappers = []
for name in receiving_functions:
wrappers.append('''\
var real_%(name)s = asm["%(name)s"];
asm["%(name)s"] = function() {%(runtime_assertions)s
return real_%(name)s.apply(null, arguments);
};
''' % {'name': name, 'runtime_assertions': runtime_assertions})
receiving = '\n'.join(wrappers)
module_exports = exported_implemented_functions + function_tables(function_table_data)
shared.Settings.MODULE_EXPORTS = [(f, f) for f in module_exports]
if not shared.Settings.SWAPPABLE_ASM_MODULE:
if shared.Settings.DECLARE_ASM_MODULE_EXPORTS:
imported_exports = [s for s in module_exports if s not in initializers]
if shared.Settings.WASM and shared.Settings.MINIMAL_RUNTIME:
# In Wasm exports are assigned inside a function to variables existing in top level JS scope, i.e.
# var _main;
# WebAssembly.instantiate(Module["wasm"], imports).then((function(output) {
# var asm = output.instance.exports;
# _main = asm["_main"];
receiving += '\n'.join([s + ' = asm["' + s + '"];' for s in imported_exports]) + '\n'
else:
if shared.Settings.MINIMAL_RUNTIME:
# In asm.js exports can be directly processed at top level, i.e.
# var asm = Module["asm"](asmGlobalArg, asmLibraryArg, buffer);
# var _main = asm["_main"];
receiving += '\n'.join(['var ' + s + ' = asm["' + s + '"];' for s in imported_exports]) + '\n'
else:
receiving += '\n'.join(['var ' + s + ' = Module["' + s + '"] = asm["' + s + '"];' for s in module_exports]) + '\n'
else:
if shared.Settings.target_environment_may_be('node') and shared.Settings.target_environment_may_be('web'):
global_object = '(typeof process !== "undefined" ? global : this)'
elif shared.Settings.target_environment_may_be('node'):
global_object = 'global'
else:
global_object = 'this'
if shared.Settings.MINIMAL_RUNTIME:
module_assign = ''
else:
module_assign = 'Module[__exportedFunc] = '
receiving += 'for(var __exportedFunc in asm) ' + global_object + '[__exportedFunc] = ' + module_assign + 'asm[__exportedFunc];\n'
else:
receiving += 'Module["asm"] = asm;\n'
wrappers = []
for name in module_exports:
wrappers.append('''\
var %(name)s = Module["%(name)s"] = function() {%(runtime_assertions)s
return Module["asm"]["%(name)s"].apply(null, arguments)
};
''' % {'name': name, 'runtime_assertions': runtime_assertions})
receiving += '\n'.join(wrappers)
if shared.Settings.EXPORT_FUNCTION_TABLES and not shared.Settings.WASM:
for table in function_table_data.values():
tableName = table.split()[1]
table = table.replace('var ' + tableName, 'var ' + tableName + ' = Module["' + tableName + '"]')
receiving += table + '\n'
if shared.Settings.EMULATED_FUNCTION_POINTERS:
# in asm.js emulated function tables, emit the table on the outside, where
# JS can manage it (for wasm, a native wasm Table is used directly, and we
# don't need this)
if not shared.Settings.WASM:
receiving += '\n' + function_tables_defs.replace('// EMSCRIPTEN_END_FUNCS\n', '')
# wasm still needs definitions for dyncalls on the outside, for JS
receiving += '\n' + ''.join(['Module["dynCall_%s"] = dynCall_%s\n' % (sig, sig) for sig in function_table_data])
if not shared.Settings.WASM:
for sig in function_table_data.keys():
name = 'FUNCTION_TABLE_' + sig
fullname = name if not shared.Settings.SIDE_MODULE else ('SIDE_' + name)
receiving += 'Module["' + name + '"] = ' + fullname + ';\n'
return receiving
def create_fp_accessors(metadata):
if not shared.Settings.RELOCATABLE:
return ''
# Create `fp$XXX` handlers for determining function pionters (table addresses)
# at runtime.
# For SIDE_MODULEs these are generated by the proxyHandler at runtime.
accessors = []
for fullname in metadata['declares']:
if not fullname.startswith('fp$'):
continue
_, name, sig = fullname.split('$')
mangled = asmjs_mangle(name)
side = 'parent' if shared.Settings.SIDE_MODULE else ''
assertion = ('\n assert(%sModule["%s"] || typeof %s !== "undefined", "external function `%s` is missing.' % (side, mangled, mangled, name) +
'perhaps a side module was not linked in? if this symbol was expected to arrive '
'from a system library, try to build the MAIN_MODULE with '
'EMCC_FORCE_STDLIBS=XX in the environment");')
# the name of the original function is generally the normal function
# name, unless it is legalized, in which case the export is the legalized
# version, and the original provided by orig$X
if shared.Settings.LEGALIZE_JS_FFI and not shared.JS.is_legal_sig(sig):
name = 'orig$' + name
accessors.append('''
Module['%(full)s'] = function() {
%(assert)s
// Use the original wasm function itself, for the table, from the main module.
var func = Module['asm']['%(original)s'];
// Try an original version from a side module.
if (!func) func = Module['_%(original)s'];
// Otherwise, look for a regular function or JS library function.
if (!func) func = Module['%(mangled)s'];
if (!func) func = %(mangled)s;
var fp = addFunction(func, '%(sig)s');
Module['%(full)s'] = function() { return fp };
return fp;
}
''' % {'full': asmjs_mangle(fullname), 'mangled': mangled, 'original': name, 'assert': assertion, 'sig': sig})
return '\n'.join(accessors)
def create_named_globals(metadata):
if not shared.Settings.RELOCATABLE:
return ''
named_globals = '''
var NAMED_GLOBALS = {
%s
};
for (var named in NAMED_GLOBALS) {
Module['_' + named] = gb + NAMED_GLOBALS[named];
}
Module['NAMED_GLOBALS'] = NAMED_GLOBALS;
''' % ',\n '.join('"' + k + '": ' + str(v) for k, v in metadata['namedGlobals'].items())
if shared.Settings.WASM:
# wasm side modules are pure wasm, and cannot create their g$..() methods, so we help them out
# TODO: this works if we are the main module, but if the supplying module is later, it won't, so
# we'll need another solution for that. one option is to scan the module imports, if/when
# wasm supports that, then the loader can do this.
named_globals += '''
for (var named in NAMED_GLOBALS) {
(function(named) {
var addr = Module['_' + named];
Module['g$_' + named] = function() { return addr };
})(named);
}
'''
named_globals += ''.join(["Module['%s'] = Module['%s']\n" % (k, v) for k, v in metadata['aliases'].items()])
return named_globals
def create_runtime_funcs_asmjs(exports, metadata):
if shared.Settings.ASSERTIONS or shared.Settings.STACK_OVERFLOW_CHECK >= 2:
stack_check = ' if ((STACKTOP|0) >= (STACK_MAX|0)) abortStackOverflow(size|0);\n'
else:
stack_check = ''
funcs = ['''
function stackAlloc(size) {
size = size|0;
var ret = 0;
ret = STACKTOP;
STACKTOP = (STACKTOP + size)|0;
STACKTOP = (STACKTOP + 15)&-16;
%s
return ret|0;
}
function stackSave() {
return STACKTOP|0;
}
function stackRestore(top) {
top = top|0;
STACKTOP = top;
}
''' % stack_check]
if shared.Settings.USE_PTHREADS:
funcs.append('''
function establishStackSpace(stackBase, stackMax) {
stackBase = stackBase|0;
stackMax = stackMax|0;
STACKTOP = stackBase;
STACK_MAX = stackMax;
tempDoublePtr = STACKTOP;
STACKTOP = (STACKTOP + 8)|0;
}
''')
if shared.Settings.MINIMAL_RUNTIME:
# MINIMAL_RUNTIME moves stack functions to library.
funcs = []
if shared.Settings.EMTERPRETIFY:
funcs.append('''
function emterpret(pc) { // this will be replaced when the emterpreter code is generated; adding it here allows validation until then
pc = pc | 0;
assert(0);
}''')
if shared.Settings.EMTERPRETIFY_ASYNC:
funcs.append('''
function setAsyncState(x) {
x = x | 0;
asyncState = x;
}
function emtStackSave() {
return EMTSTACKTOP|0;
}
function emtStackRestore(x) {
x = x | 0;
EMTSTACKTOP = x;
}
function getEmtStackMax() {
return EMT_STACK_MAX | 0;
}
function setEmtStackMax(x) {
x = x | 0;
EMT_STACK_MAX = x;
}
''')
if asm_safe_heap():
if '_sbrk' in metadata['implementedFunctions']:
brk_check = 'if ((dest + bytes|0) > (HEAP32[(_emscripten_get_sbrk_ptr()|0)>>2]|0)) segfault();'
else:
# sbrk and malloc were not linked in, but SAFE_HEAP is used - so safe heap
# can ignore the sbrk location.
brk_check = ''
funcs.append('''
function SAFE_HEAP_STORE(dest, value, bytes) {
dest = dest | 0;
value = value | 0;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 4) {
if ((dest&3)) alignfault();
HEAP32[dest>>2] = value;
} else if ((bytes|0) == 1) {
HEAP8[dest>>0] = value;
} else {
if ((dest&1)) alignfault();
HEAP16[dest>>1] = value;
}
}
function SAFE_HEAP_STORE_D(dest, value, bytes) {
dest = dest | 0;
value = +value;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 8) {
if ((dest&7)) alignfault();
HEAPF64[dest>>3] = value;
} else {
if ((dest&3)) alignfault();
HEAPF32[dest>>2] = value;
}
}
function SAFE_HEAP_LOAD(dest, bytes, unsigned) {
dest = dest | 0;
bytes = bytes | 0;
unsigned = unsigned | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 4) {
if ((dest&3)) alignfault();
return HEAP32[dest>>2] | 0;
} else if ((bytes|0) == 1) {
if (unsigned) {
return HEAPU8[dest>>0] | 0;
} else {
return HEAP8[dest>>0] | 0;
}
}
if ((dest&1)) alignfault();
if (unsigned) return HEAPU16[dest>>1] | 0;
return HEAP16[dest>>1] | 0;
}
function SAFE_HEAP_LOAD_D(dest, bytes) {
dest = dest | 0;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 8) {
if ((dest&7)) alignfault();
return +HEAPF64[dest>>3];
}
if ((dest&3)) alignfault();
return +HEAPF32[dest>>2];
}
function SAFE_FT_MASK(value, mask) {
value = value | 0;
mask = mask | 0;
var ret = 0;
ret = value & mask;
if ((ret|0) != (value|0)) ftfault();
return ret | 0;
}
''' % {'brk_check': brk_check})
return funcs
def create_asm_start_pre(asm_setup, the_global, sending, metadata):
shared_array_buffer = ''
if shared.Settings.USE_PTHREADS and not shared.Settings.WASM:
shared_array_buffer = "asmGlobalArg['Atomics'] = Atomics;"
module_global = 'var asmGlobalArg = ' + the_global + ';'
module_library = 'var asmLibraryArg = ' + sending + ';'
asm_function_top = ('// EMSCRIPTEN_START_ASM\n'
'var asm = (/** @suppress {uselessCode} */ function(global, env, buffer) {')
use_asm = "'almost asm';"
if shared.Settings.ASM_JS == 1:
use_asm = "'use asm';"
lines = [
asm_setup,
module_global,
shared_array_buffer,
module_library,
asm_function_top,
use_asm,
create_first_in_asm(),
]
return '\n'.join(lines)
def create_asm_temp_vars(metadata):
temp_ints = ['__THREW__', 'threwValue', 'setjmpId', 'tempInt', 'tempBigInt', 'tempBigIntS', 'tempValue']
temp_doubles = ['tempDouble']
rtn = ''
for i in temp_ints:
if i in shared.Settings.ASM_PRIMITIVE_VARS:
rtn += 'var ' + i + ' = 0;\n'
for i in temp_doubles:
if i in shared.Settings.ASM_PRIMITIVE_VARS:
rtn += 'var ' + i + ' = 0.0;\n'
if asm_backend_uses(metadata, 'NaN'):
rtn += 'var nan = global%s;\n' % (access_quote('NaN'))
if asm_backend_uses(metadata, 'Infinity'):
rtn += 'var inf = global%s;\n' % (access_quote('Infinity'))
return rtn
def create_asm_runtime_thread_local_vars():
if not shared.Settings.USE_PTHREADS:
return ''
return '''
var __pthread_ptr = 0;
var __pthread_is_main_runtime_thread = 0;
var __pthread_is_main_browser_thread = 0;
'''
def create_replace_memory(metadata):
if not shared.Settings.ALLOW_MEMORY_GROWTH:
return ''
emscripten_replace_memory = '''
function _emscripten_replace_memory(newBuffer) {
'''
for heap, view in [
('HEAP8', 'Int8Array'),
('HEAPU8', 'Uint8Array'),
('HEAP16', 'Int16Array'),
('HEAPU16', 'Uint16Array'),
('HEAP32', 'Int32Array'),
('HEAPU32', 'Uint32Array'),
('HEAPF32', 'Float32Array'),
('HEAPF64', 'Float64Array')]:
if asm_backend_uses(metadata, view):
emscripten_replace_memory += ' %s = new %s(newBuffer);\n' % (heap, view)
emscripten_replace_memory += '''
buffer = newBuffer;
return true;
}
'''
return emscripten_replace_memory
def create_asm_end(exports):
if shared.Settings.MINIMAL_RUNTIME and shared.Settings.WASM:
return '''
return %s;
})
// EMSCRIPTEN_END_ASM
''' % (exports)
return '''
return %s;
})
// EMSCRIPTEN_END_ASM
(asmGlobalArg, asmLibraryArg, buffer);
''' % (exports)
def create_first_in_asm():
return ''
def create_memory_views(metadata):
"""Generates memory views for the different heap types.
Generated symbols:
Int8View Int16View Int32View
Uint8View Uint16View Uint32View
Float32View Float64View
"""
ret = '\n'
for info in HEAP_TYPE_INFOS:
heap_name = '{}Array'.format(info.long_name)
access = access_quote(heap_name)
if asm_backend_uses(metadata, heap_name):
format_args = {
'heap': info.heap_name,
'long': info.long_name,
'access': access,
}
ret += ' var {heap} = new global{access}(buffer);\n'.format(**format_args)
return ret
class HeapTypeInfo(object):
"""Struct that holds data for a type of HEAP* views."""
def __init__(self, heap_name, long_name, shift_amount):
assert heap_name.startswith('HEAP')
self.heap_name = heap_name
self.long_name = long_name
self.shift_amount = shift_amount
def short_name(self):
"""The unique part of the heap name for this type.
Derive this from heap_name instead of the other way around so that searching,
e.g. for HEAP8, from the generated JS code leads back here.
"""
return self.heap_name[len('HEAP'):]
def is_int(self):
"""Whether this heap type is an integer type or not."""
return self.short_name()[0] != 'F'
def coerce(self, expression):
"""Adds asm.js type coercion to a string expression."""
if self.is_int():
return expression + '| 0'
else:
return '+' + expression
HEAP_TYPE_INFOS = [
HeapTypeInfo(heap_name='HEAP8', long_name='Int8', shift_amount=0),
HeapTypeInfo(heap_name='HEAP16', long_name='Int16', shift_amount=1),
HeapTypeInfo(heap_name='HEAP32', long_name='Int32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPU8', long_name='Uint8', shift_amount=0),
HeapTypeInfo(heap_name='HEAPU16', long_name='Uint16', shift_amount=1),
HeapTypeInfo(heap_name='HEAPU32', long_name='Uint32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPF32', long_name='Float32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPF64', long_name='Float64', shift_amount=3),
]
def emscript_wasm_backend(infile, outfile, memfile, compiler_engine,
temp_files, DEBUG):
# Overview:
# * Run wasm-emscripten-finalize to extract metadata and modify the binary
# to use emscripten's wasm<->JS ABI
# * Use the metadata to generate the JS glue that goes with the wasm
metadata = finalize_wasm(temp_files, infile, outfile, memfile, DEBUG)
update_settings_glue(metadata, DEBUG)
if shared.Settings.SIDE_MODULE:
return
if DEBUG:
logger.debug('emscript: js compiler glue')
if DEBUG:
t = time.time()
glue, forwarded_data = compile_settings(compiler_engine, temp_files)
if DEBUG:
logger.debug(' emscript: glue took %s seconds' % (time.time() - t))
t = time.time()
forwarded_json = json.loads(forwarded_data)
# For the wasm backend the implementedFunctions from compiler.js should
# alwasys be empty. This only gets populated for __asm function when using
# the JS backend.
assert not forwarded_json['Functions']['implementedFunctions']
pre, post = glue.split('// EMSCRIPTEN_END_FUNCS')
# memory and global initializers
global_initializers = ', '.join('{ func: function() { %s() } }' % i for i in metadata['initializers'])
staticbump = shared.Settings.STATIC_BUMP
if shared.Settings.MINIMAL_RUNTIME:
# In minimal runtime, global initializers are run after the Wasm Module instantiation has finished.
global_initializers = ''
else:
# In regular runtime, global initializers are recorded in an __ATINIT__ array.
global_initializers = '''/* global initializers */ %s __ATINIT__.push(%s);
''' % ('if (!ENVIRONMENT_IS_PTHREAD)' if shared.Settings.USE_PTHREADS else '',
global_initializers)
pre = pre.replace('STATICTOP = STATIC_BASE + 0;', '''STATICTOP = STATIC_BASE + %d;
%s
''' % (staticbump, global_initializers))
pre = apply_memory(pre)
pre = apply_static_code_hooks(pre) # In regular runtime, atinits etc. exist in the preamble part
post = apply_static_code_hooks(post) # In MINIMAL_RUNTIME, atinit exists in the postamble part
if shared.Settings.RELOCATABLE and not shared.Settings.SIDE_MODULE:
pre += 'var gb = GLOBAL_BASE, fb = 0;\n'
# merge forwarded data
shared.Settings.EXPORTED_FUNCTIONS = forwarded_json['EXPORTED_FUNCTIONS']
exports = metadata['exports']
# Store exports for Closure compiler to be able to track these as globals in
# -s DECLARE_ASM_MODULE_EXPORTS=0 builds.
shared.Settings.MODULE_EXPORTS = [(asmjs_mangle(f), f) for f in exports]
if shared.Settings.ASYNCIFY:
exports += ['asyncify_start_unwind', 'asyncify_stop_unwind', 'asyncify_start_rewind', 'asyncify_stop_rewind']
report_missing_symbols(set([asmjs_mangle(f) for f in exports]), pre)
asm_consts, asm_const_funcs = create_asm_consts_wasm(forwarded_json, metadata)
em_js_funcs = create_em_js(forwarded_json, metadata)
asm_const_pairs = ['%s: %s' % (key, value) for key, value in asm_consts]
asm_const_map = 'var ASM_CONSTS = {\n ' + ', \n '.join(asm_const_pairs) + '\n};\n'
pre = pre.replace(
'// === Body ===',
('// === Body ===\n\n' + asm_const_map +
asstr('\n'.join(asm_const_funcs)) +
'\n'.join(em_js_funcs) + '\n'))
pre = apply_table(pre)
outfile.write(pre)
pre = None
invoke_funcs = metadata['invokeFuncs']
if shared.Settings.RELOCATABLE:
invoke_funcs.append('invoke_X')
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except KeyError:
pass
sending = create_sending_wasm(invoke_funcs, forwarded_json, metadata)
receiving = create_receiving_wasm(exports, metadata['initializers'])
if shared.Settings.MINIMAL_RUNTIME:
post, receiving = compute_minimal_runtime_initializer_and_exports(post, metadata['initializers'], exports, receiving)
module = create_module_wasm(sending, receiving, invoke_funcs, metadata)
write_output_file(outfile, post, module)
module = None
outfile.close()
def remove_trailing_zeros(memfile):
with open(memfile, 'rb') as f:
mem_data = f.read()
end = len(mem_data)
while end > 0 and (mem_data[end - 1] == b'\0' or mem_data[end - 1] == 0):
end -= 1
with open(memfile, 'wb') as f:
f.write(mem_data[:end])
def finalize_wasm(temp_files, infile, outfile, memfile, DEBUG):
basename = shared.unsuffixed(outfile.name)
wasm = basename + '.wasm'
base_wasm = infile
shared.Building.save_intermediate(infile, 'base.wasm')
args = ['--detect-features']
write_source_map = shared.Settings.DEBUG_LEVEL >= 4
if write_source_map:
shared.Building.emit_wasm_source_map(base_wasm, base_wasm + '.map')
shared.Building.save_intermediate(base_wasm + '.map', 'base_wasm.map')
args += ['--output-source-map-url=' + shared.Settings.SOURCE_MAP_BASE + os.path.basename(shared.Settings.WASM_BINARY_FILE) + '.map']
# tell binaryen to look at the features section, and if there isn't one, to use MVP
# (which matches what llvm+lld has given us)
if shared.Settings.DEBUG_LEVEL >= 2 or shared.Settings.PROFILING_FUNCS or shared.Settings.EMIT_SYMBOL_MAP or shared.Settings.ASYNCIFY_WHITELIST or shared.Settings.ASYNCIFY_BLACKLIST:
args.append('-g')
if shared.Settings.LEGALIZE_JS_FFI != 1:
args.append('--no-legalize-javascript-ffi')
if not shared.Settings.MEM_INIT_IN_WASM:
args.append('--separate-data-segments=' + memfile)
if shared.Settings.SIDE_MODULE:
args.append('--side-module')
else:
# --global-base is used by wasm-emscripten-finalize to calculate the size
# of the static data used. The argument we supply here needs to match the
# global based used by lld (see Building.link_lld). For relocatable this is
# zero for the global base although at runtime __memory_base is used.
# For non-relocatable output we used shared.Settings.GLOBAL_BASE.
# TODO(sbc): Can we remove this argument infer this from the segment
# initializer?
if shared.Settings.RELOCATABLE:
args.append('--global-base=0')
else:
args.append('--global-base=%s' % shared.Settings.GLOBAL_BASE)
if shared.Settings.WASM_BACKEND and shared.Settings.STACK_OVERFLOW_CHECK >= 2:
args.append('--check-stack-overflow')
if shared.Settings.STANDALONE_WASM:
args.append('--standalone-wasm')
# When we dynamically link our JS loader adds functions from wasm modules to
# the table. It must add the original versions of them, not legalized ones,
# so that indirect calls have the right type, so export those.
if shared.Settings.RELOCATABLE:
args.append('--pass-arg=legalize-js-interface-export-originals')
if shared.Settings.FULL_DWARF:
args.append('--dwarf')
stdout = shared.Building.run_binaryen_command('wasm-emscripten-finalize',
infile=base_wasm,
outfile=wasm,
args=args,
stdout=subprocess.PIPE)
if write_source_map:
shared.Building.save_intermediate(wasm + '.map', 'post_finalize.map')
shared.Building.save_intermediate(wasm, 'post_finalize.wasm')
if not shared.Settings.MEM_INIT_IN_WASM:
# we have a separate .mem file. binaryen did not strip any trailing zeros,
# because it's an ABI question as to whether it is valid to do so or not.
# we can do so here, since we make sure to zero out that memory (even in
# the dynamic linking case, our loader zeros it out)
remove_trailing_zeros(memfile)
return load_metadata_wasm(stdout, DEBUG)
def create_asm_consts_wasm(forwarded_json, metadata):
asm_consts = {}
all_sigs = []
for k, v in metadata['asmConsts'].items():
const, sigs, call_types = v
const = asstr(const)
const = trim_asm_const_body(const)
args = []
max_arity = 16
arity = 0
for i in range(max_arity):
if ('$' + str(i)) in const:
arity = i + 1
for i in range(arity):
args.append('$' + str(i))
const = 'function(' + ', '.join(args) + ') {' + const + '}'
asm_consts[int(k)] = const
for sig, call_type in zip(sigs, call_types):
all_sigs.append((sig, call_type))
asm_const_funcs = []
if all_sigs:
# emit the signature-reading helper function only if we have any EM_ASM
# functions in the module
check_int = ''
check = ''
if shared.Settings.ASSERTIONS:
check_int = "if (ch === 105 /*'i'*/)"
check = ' else abort("unexpected char in asm const signature " + ch);'
asm_const_funcs.append(r'''
// Avoid creating a new array
var _readAsmConstArgsArray = [];
function readAsmConstArgs(sigPtr, buf) {
var args = _readAsmConstArgsArray;
args.length = 0;
var ch;
while (ch = HEAPU8[sigPtr++]) {
if (ch === 100/*'d'*/ || ch === 102/*'f'*/) {
buf = (buf + 7) & ~7;
args.push(HEAPF64[(buf >> 3)]);
buf += 8;
} else %s {
buf = (buf + 3) & ~3;
args.push(HEAP32[(buf >> 2)]);
buf += 4;
}%s
}
return args;
}
''' % (check_int, check))
for sig, call_type in set(all_sigs):
const_name = '_emscripten_asm_const_' + call_type + sig
forwarded_json['Functions']['libraryFunctions'][const_name] = 1
preamble = ''
if shared.Settings.USE_PTHREADS:
sync_proxy = call_type == 'sync_on_main_thread_'
async_proxy = call_type == 'async_on_main_thread_'
proxied = sync_proxy or async_proxy
if proxied:
# In proxied function calls, positive integers 1, 2, 3, ... denote pointers
# to regular C compiled functions. Negative integers -1, -2, -3, ... denote
# indices to EM_ASM() blocks, so remap the EM_ASM() indices from 0, 1, 2,
# ... over to the negative integers starting at -1.
preamble += ('\n if (ENVIRONMENT_IS_PTHREAD) { ' +
proxy_debug_print(sync_proxy) +
'return _emscripten_proxy_to_main_thread_js(-1 - code, ' +
str(int(sync_proxy)) +
', code, sigPtr, argbuf); }')
if shared.Settings.RELOCATABLE:
preamble += '\n code -= %s;\n' % shared.Settings.GLOBAL_BASE
asm_const_funcs.append(r'''
function %s(code, sigPtr, argbuf) {%s
var args = readAsmConstArgs(sigPtr, argbuf);
return ASM_CONSTS[code].apply(null, args);
}''' % (const_name, preamble))
asm_consts = [(key, value) for key, value in asm_consts.items()]
asm_consts.sort()
return asm_consts, asm_const_funcs
def create_em_js(forwarded_json, metadata):
em_js_funcs = []
separator = '<::>'
for name, raw in metadata.get('emJsFuncs', {}).items():
assert separator in raw
args, body = raw.split(separator, 1)
args = args[1:-1]
if args == 'void':
args = []
else:
args = args.split(',')
arg_names = [arg.split()[-1].replace("*", "") for arg in args if arg]
func = 'function {}({}){}'.format(name, ','.join(arg_names), asstr(body))
em_js_funcs.append(func)
forwarded_json['Functions']['libraryFunctions'][name] = 1
return em_js_funcs
def add_standard_wasm_imports(send_items_map):
# Normally we import these into the wasm (so that JS could use them even
# before the wasm loads), while in standalone mode we do not depend
# on JS to create them, but create them in the wasm and export them.
if not shared.Settings.STANDALONE_WASM:
memory_import = 'wasmMemory'
if shared.Settings.MODULARIZE and shared.Settings.USE_PTHREADS:
# Pthreads assign wasmMemory in their worker startup. In MODULARIZE mode, they cannot assign inside the
# Module scope, so lookup via Module as well.
memory_import += " || Module['wasmMemory']"
send_items_map['memory'] = memory_import
send_items_map['table'] = 'wasmTable'
# With the wasm backend __memory_base and __table_base and only needed for
# relocatable output.
if shared.Settings.RELOCATABLE or not shared.Settings.WASM_BACKEND: # FIXME
send_items_map['__memory_base'] = str(shared.Settings.GLOBAL_BASE) # tell the memory segments where to place themselves
# the wasm backend reserves slot 0 for the NULL function pointer
table_base = '1' if shared.Settings.WASM_BACKEND else '0'
send_items_map['__table_base'] = table_base
if shared.Settings.RELOCATABLE and shared.Settings.WASM_BACKEND: # FIXME
send_items_map['__stack_pointer'] = 'STACK_BASE'
if shared.Settings.MAYBE_WASM2JS or shared.Settings.AUTODEBUG or shared.Settings.LINKABLE:
# legalization of i64 support code may require these in some modes
send_items_map['setTempRet0'] = 'setTempRet0'
send_items_map['getTempRet0'] = 'getTempRet0'
if shared.Settings.AUTODEBUG:
send_items_map['log_execution'] = '''function(loc) {
console.log('log_execution ' + loc);
}'''
send_items_map['get_i32'] = '''function(loc, index, value) {
console.log('get_i32 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_i64'] = '''function(loc, index, low, high) {
console.log('get_i64 ' + [loc, index, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['get_f32'] = '''function(loc, index, value) {
console.log('get_f32 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_f64'] = '''function(loc, index, value) {
console.log('get_f64 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_anyref'] = '''function(loc, index, value) {
console.log('get_anyref ' + [loc, index, value]);
return value;
}'''
send_items_map['get_exnref'] = '''function(loc, index, value) {
console.log('get_exnref ' + [loc, index, value]);
return value;
}'''
send_items_map['set_i32'] = '''function(loc, index, value) {
console.log('set_i32 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_i64'] = '''function(loc, index, low, high) {
console.log('set_i64 ' + [loc, index, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['set_f32'] = '''function(loc, index, value) {
console.log('set_f32 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_f64'] = '''function(loc, index, value) {
console.log('set_f64 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_anyref'] = '''function(loc, index, value) {
console.log('set_anyref ' + [loc, index, value]);
return value;
}'''
send_items_map['set_exnref'] = '''function(loc, index, value) {
console.log('set_exnref ' + [loc, index, value]);
return value;
}'''
send_items_map['load_ptr'] = '''function(loc, bytes, offset, ptr) {
console.log('load_ptr ' + [loc, bytes, offset, ptr]);
return ptr;
}'''
send_items_map['load_val_i32'] = '''function(loc, value) {
console.log('load_val_i32 ' + [loc, value]);
return value;
}'''
send_items_map['load_val_i64'] = '''function(loc, low, high) {
console.log('load_val_i64 ' + [loc, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['load_val_f32'] = '''function(loc, value) {
console.log('loaload_val_i32d_ptr ' + [loc, value]);
return value;
}'''
send_items_map['load_val_f64'] = '''function(loc, value) {
console.log('load_val_f64 ' + [loc, value]);
return value;
}'''
send_items_map['store_ptr'] = '''function(loc, bytes, offset, ptr) {
console.log('store_ptr ' + [loc, bytes, offset, ptr]);
return ptr;
}'''
send_items_map['store_val_i32'] = '''function(loc, value) {
console.log('store_val_i32 ' + [loc, value]);
return value;
}'''
send_items_map['store_val_i64'] = '''function(loc, low, high) {
console.log('store_val_i64 ' + [loc, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['store_val_f32'] = '''function(loc, value) {
console.log('loastore_val_i32d_ptr ' + [loc, value]);
return value;
}'''
send_items_map['store_val_f64'] = '''function(loc, value) {
console.log('store_val_f64 ' + [loc, value]);
return value;
}'''
def create_sending_wasm(invoke_funcs, forwarded_json, metadata):
basic_funcs = []
if shared.Settings.SAFE_HEAP:
basic_funcs += ['segfault', 'alignfault']
em_asm_sigs = [zip(sigs, call_types) for _, sigs, call_types in metadata['asmConsts'].values()]
# flatten em_asm_sigs
em_asm_sigs = [sig for sigs in em_asm_sigs for sig in sigs]
em_asm_funcs = ['_emscripten_asm_const_' + call_type + sig for sig, call_type in em_asm_sigs]
em_js_funcs = list(metadata['emJsFuncs'].keys())
declared_items = ['_' + item for item in metadata['declares']]
send_items = set(basic_funcs + invoke_funcs + em_asm_funcs + em_js_funcs + declared_items)
def fix_import_name(g):
if g.startswith('Math_'):
return g.split('_')[1]
# Unlike fastcomp the wasm backend doesn't use the '_' prefix for native
# symbols. Emscripten currently expects symbols to start with '_' so we
# artificially add them to the output of emscripten-wasm-finalize and them
# strip them again here.
# note that we don't do this for EM_JS functions (which, rarely, may have
# a '_' prefix)
if g.startswith('_') and g not in metadata['emJsFuncs']:
return g[1:]
return g
send_items_map = OrderedDict()
for name in send_items:
internal_name = fix_import_name(name)
if internal_name in send_items_map:
exit_with_error('duplicate symbol in exports to wasm: %s', name)
send_items_map[internal_name] = name
add_standard_wasm_imports(send_items_map)
sorted_keys = sorted(send_items_map.keys())
return '{ ' + ', '.join('"' + k + '": ' + send_items_map[k] for k in sorted_keys) + ' }'
def create_receiving_wasm(exports, initializers):
exports_that_are_not_initializers = [x for x in exports if x not in initializers]
receiving = []
runtime_assertions = ''
if shared.Settings.ASSERTIONS and not shared.Settings.MINIMAL_RUNTIME:
runtime_assertions = RUNTIME_ASSERTIONS
# assert on the runtime being in a valid state when calling into compiled code. The only exceptions are
# some support code
for e in exports:
receiving.append('''\
var real_%(mangled)s = asm["%(e)s"];
asm["%(e)s"] = function() {%(assertions)s
return real_%(mangled)s.apply(null, arguments);
};
''' % {'mangled': asmjs_mangle(e), 'e': e, 'assertions': runtime_assertions})
if not shared.Settings.SWAPPABLE_ASM_MODULE:
if shared.Settings.DECLARE_ASM_MODULE_EXPORTS:
if shared.Settings.WASM and shared.Settings.MINIMAL_RUNTIME:
# In Wasm exports are assigned inside a function to variables existing in top level JS scope, i.e.
# var _main;
# WebAssembly.instantiate(Module["wasm"], imports).then((function(output) {
# var asm = output.instance.exports;
# _main = asm["_main"];
receiving += [asmjs_mangle(s) + ' = asm["' + s + '"];' for s in exports_that_are_not_initializers]
else:
if shared.Settings.MINIMAL_RUNTIME:
# In wasm2js exports can be directly processed at top level, i.e.
# var asm = Module["asm"](asmGlobalArg, asmLibraryArg, buffer);
# var _main = asm["_main"];
receiving += ['var ' + asmjs_mangle(s) + ' = asm["' + asmjs_mangle(s) + '"];' for s in exports_that_are_not_initializers]
else:
receiving += ['var ' + asmjs_mangle(s) + ' = Module["' + asmjs_mangle(s) + '"] = asm["' + s + '"];' for s in exports]
else:
if shared.Settings.target_environment_may_be('node') and shared.Settings.target_environment_may_be('web'):
global_object = '(typeof process !== "undefined" ? global : this)'
elif shared.Settings.target_environment_may_be('node'):
global_object = 'global'
else:
global_object = 'this'
if shared.Settings.MINIMAL_RUNTIME:
module_assign = ''
else:
module_assign = 'Module[asmjs_mangle(__exportedFunc)] = '
receiving.append('''
function asmjs_mangle(x) {
var unmangledSymbols = %s;
return x.indexOf('dynCall_') == 0 || unmangledSymbols.indexOf(x) != -1 ? x : '_' + x;
}
''' % shared.Settings.WASM_FUNCTIONS_THAT_ARE_NOT_NAME_MANGLED)
receiving.append('for(var __exportedFunc in asm) ' + global_object + '[asmjs_mangle(__exportedFunc)] = ' + module_assign + 'asm[__exportedFunc];')
else:
receiving.append('Module["asm"] = asm;')
for e in exports:
if shared.Settings.ASSERTIONS:
# With assertions on, don't hot-swap implementation.
receiving.append('''\
var %(mangled)s = Module["%(mangled)s"] = function() {%(assertions)s
return Module["asm"]["%(e)s"].apply(null, arguments)
};
''' % {'mangled': asmjs_mangle(e), 'e': e, 'assertions': runtime_assertions})
else:
# With assertions off, hot-swap implementation to avoid garbage via
# arguments keyword.
receiving.append('''\
var %(mangled)s = Module["%(mangled)s"] = function() {%(assertions)s
return (%(mangled)s = Module["%(mangled)s"] = Module["asm"]["%(e)s"]).apply(null, arguments);
};
''' % {'mangled': asmjs_mangle(e), 'e': e, 'assertions': runtime_assertions})
return '\n'.join(receiving) + '\n'
def create_module_wasm(sending, receiving, invoke_funcs, metadata):
invoke_wrappers = create_invoke_wrappers(invoke_funcs)
receiving += create_named_globals(metadata)
receiving += create_fp_accessors(metadata)
module = []
module.append('var asmGlobalArg = {};\n')
if shared.Settings.USE_PTHREADS and not shared.Settings.WASM:
module.append("if (typeof SharedArrayBuffer !== 'undefined') asmGlobalArg['Atomics'] = Atomics;\n")
module.append('var asmLibraryArg = %s;\n' % (sending))
if shared.Settings.ASYNCIFY and shared.Settings.ASSERTIONS:
module.append('Asyncify.instrumentWasmImports(asmLibraryArg);\n')
if not shared.Settings.MINIMAL_RUNTIME:
module.append("var asm = createWasm();\n")
module.append(receiving)
module.append(invoke_wrappers)
return module
def load_metadata_wasm(metadata_raw, DEBUG):
try:
metadata_json = json.loads(metadata_raw)
except Exception:
logger.error('emscript: failure to parse metadata output from wasm-emscripten-finalize. raw output is: \n' + metadata_raw)
raise
metadata = {
'aliases': {},
'declares': [],
'implementedFunctions': [],
'externs': [],
'simd': False,
'maxGlobalAlign': 0,
'staticBump': 0,
'tableSize': 0,
'initializers': [],
'exports': [],
'namedGlobals': {},
'emJsFuncs': {},
'asmConsts': {},
'invokeFuncs': [],
'features': [],
'mainReadsParams': 1,
}
assert 'tableSize' in metadata_json.keys()
for key, value in metadata_json.items():
# json.loads returns `unicode` for strings but other code in this file
# generally works with utf8 encoded `str` objects, and they don't alwasy
# mix well. e.g. s.replace(x, y) will blow up is `s` a uts8 str containing
# non-ascii and either x or y are unicode objects.
# TODO(sbc): Remove this encoding if we switch to unicode elsewhere
# (specifically the glue returned from compile_settings)
if type(value) == list:
value = [asstr(v) for v in value]
if key not in metadata:
exit_with_error('unexpected metadata key received from wasm-emscripten-finalize: %s', key)
metadata[key] = value
if not shared.Settings.MINIMAL_RUNTIME:
# In regular runtime initializers call the global var version of the export, so they get the mangled name.
# In MINIMAL_RUNTIME, the initializers are called directly off the export object for minimal code size.
metadata['initializers'] = [asmjs_mangle(i) for i in metadata['initializers']]
if DEBUG:
logger.debug("Metadata parsed: " + pprint.pformat(metadata))
# Calculate the subset of exports that were explicitly marked with llvm.used.
# These are any exports that were not requested on the command line and are
# not known auto-generated system functions.
unexpected_exports = [e for e in metadata['exports'] if treat_as_user_function(e)]
unexpected_exports = [asmjs_mangle(e) for e in unexpected_exports]
unexpected_exports = [e for e in unexpected_exports if e not in shared.Settings.EXPORTED_FUNCTIONS]
shared.Building.user_requested_exports += unexpected_exports
return metadata
def create_invoke_wrappers(invoke_funcs):
"""Asm.js-style exception handling: invoke wrapper generation."""
invoke_wrappers = ''
for invoke in invoke_funcs:
sig = invoke[len('invoke_'):]
invoke_wrappers += '\n' + shared.JS.make_invoke(sig) + '\n'
return invoke_wrappers
def normalize_line_endings(text):
"""Normalize to UNIX line endings.
On Windows, writing to text file will duplicate \r\n to \r\r\n otherwise.
"""
if WINDOWS:
return text.replace('\r\n', '\n')
return text
def run(infile, outfile, memfile):
temp_files = get_configuration().get_temp_files()
infile, outfile = substitute_response_files([infile, outfile])
if not shared.Settings.BOOTSTRAPPING_STRUCT_INFO:
generated_struct_info_name = 'generated_struct_info.json'
def generate_struct_info():
with ToolchainProfiler.profile_block('gen_struct_info'):
out = shared.Cache.get_path(generated_struct_info_name)
gen_struct_info.main(['-q', '-c', '-o', out])
return out
shared.Settings.STRUCT_INFO = shared.Cache.get(generated_struct_info_name, generate_struct_info)
# do we need an else, to define it for the bootstrap case?
outfile_obj = open(outfile, 'w')
emscripter = emscript_wasm_backend if shared.Settings.WASM_BACKEND else emscript_fastcomp
return temp_files.run_and_clean(lambda: emscripter(
infile, outfile_obj, memfile, shared.NODE_JS, temp_files, shared.DEBUG)
)
| 39.97543 | 256 | 0.684643 |
from __future__ import print_function
import difflib
import os
import json
import subprocess
import re
import time
import logging
import pprint
from collections import OrderedDict
from tools import shared
from tools import gen_struct_info
from tools import jsrun
from tools.response_file import substitute_response_files
from tools.shared import WINDOWS, asstr, path_from_root, exit_with_error, asmjs_mangle, treat_as_user_function
from tools.toolchain_profiler import ToolchainProfiler
from tools.minified_js_name_generator import MinifiedJsNameGenerator
logger = logging.getLogger('emscripten')
STDERR_FILE = os.environ.get('EMCC_STDERR_FILE')
if STDERR_FILE:
STDERR_FILE = os.path.abspath(STDERR_FILE)
logger.info('logging stderr in js compiler phase into %s' % STDERR_FILE)
STDERR_FILE = open(STDERR_FILE, 'w')
def get_configuration():
if hasattr(get_configuration, 'configuration'):
return get_configuration.configuration
configuration = shared.Configuration(environ=os.environ)
get_configuration.configuration = configuration
return configuration
def quote(prop):
if shared.Settings.USE_CLOSURE_COMPILER == 2:
return ''.join(["'" + p + "'" for p in prop.split('.')])
else:
return prop
def access_quote(prop):
if shared.Settings.USE_CLOSURE_COMPILER == 2:
return ''.join(["['" + p + "']" for p in prop.split('.')])
else:
return '.' + prop
def emscript_fastcomp(infile, outfile, memfile, compiler_engine,
temp_files, DEBUG):
assert shared.Settings.ASM_JS, 'fastcomp is asm.js-only (mode 1 or 2)'
success = False
try:
with ToolchainProfiler.profile_block('get_and_parse_backend'):
backend_output = compile_js(infile, temp_files, DEBUG)
funcs, metadata, mem_init = parse_fastcomp_output(backend_output, DEBUG)
fixup_metadata_tables(metadata)
funcs = fixup_functions(funcs, metadata)
with ToolchainProfiler.profile_block('compiler_glue'):
glue, forwarded_data = compiler_glue(metadata, compiler_engine, temp_files, DEBUG)
with ToolchainProfiler.profile_block('function_tables_and_exports'):
(post, function_table_data, bundled_args) = (
function_tables_and_exports(funcs, metadata, mem_init, glue, forwarded_data, outfile, DEBUG))
with ToolchainProfiler.profile_block('write_output_file'):
finalize_output(outfile, post, function_table_data, bundled_args, metadata, DEBUG)
success = True
finally:
outfile.close()
if not success:
shared.try_delete(outfile.name)
def compile_js(infile, temp_files, DEBUG):
with temp_files.get_file('.4.js') as temp_js:
backend_cmd = create_backend_cmd(infile, temp_js)
if DEBUG:
logger.debug('emscript: llvm backend: ' + ' '.join(backend_cmd))
t = time.time()
shared.print_compiler_stage(backend_cmd)
with ToolchainProfiler.profile_block('emscript_llvm_backend'):
shared.check_call(backend_cmd)
if DEBUG:
logger.debug(' emscript: llvm backend took %s seconds' % (time.time() - t))
backend_output = open(temp_js).read()
return backend_output
def parse_fastcomp_output(backend_output, DEBUG):
start_funcs_marker = '// EMSCRIPTEN_START_FUNCTIONS'
end_funcs_marker = '// EMSCRIPTEN_END_FUNCTIONS'
metadata_split_marker = '// EMSCRIPTEN_METADATA'
start_funcs = backend_output.index(start_funcs_marker)
end_funcs = backend_output.rindex(end_funcs_marker)
metadata_split = backend_output.rindex(metadata_split_marker)
funcs = backend_output[start_funcs + len(start_funcs_marker):end_funcs]
metadata_raw = backend_output[metadata_split + len(metadata_split_marker):]
mem_init = backend_output[end_funcs + len(end_funcs_marker):metadata_split]
mem_init = mem_init.replace('Runtime.', '')
try:
metadata = json.loads(metadata_raw, object_pairs_hook=OrderedDict)
except ValueError:
logger.error('emscript: failure to parse metadata output from compiler backend. raw output is: \n' + metadata_raw)
raise
# version.
metadata.setdefault('externFunctions', [])
if 'externUses' not in metadata:
exit_with_error('Your fastcomp compiler is out of date, please update! (need >= 1.38.26)')
# JS optimizer turns some heap accesses to others as an optimization, so make HEAP8 imply HEAPU8, HEAP16->HEAPU16, and HEAPF64->HEAPF32.
if 'Int8Array' in metadata['externUses']:
metadata['externUses'] += ['Uint8Array']
if 'Int16Array' in metadata['externUses']:
metadata['externUses'] += ['Uint16Array']
if 'Float64Array' in metadata['externUses']:
metadata['externUses'] += ['Float32Array']
# If we are generating references to Math.fround() from here in emscripten.py, declare it used as well.
if provide_fround() or metadata['simd']:
metadata['externUses'] += ['Math.fround']
# functions marked llvm.used in the code are exports requested by the user
shared.Building.user_requested_exports += metadata['exports']
# In MINIMAL_RUNTIME stackSave() and stackRestore are JS library functions. If LLVM backend generated
# calls to invoke_*() functions that save and restore the stack, we must include the stack functions
# explicitly into the build. (In traditional runtime the stack functions are always present, so this
# tracking is not needed)
if shared.Settings.MINIMAL_RUNTIME and (len(metadata['invokeFuncs']) > 0 or shared.Settings.LINKABLE):
shared.Settings.EXPORTED_FUNCTIONS += ['stackSave', 'stackRestore']
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$stackSave', '$stackRestore']
return funcs, metadata, mem_init
def fixup_metadata_tables(metadata):
# if emulating pointer casts, force all tables to the size of the largest
# (for wasm, we use binaryen's fpcast-emu pass, we don't need to do anything
# here)
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM:
max_size = 0
for k, v in metadata['tables'].items():
max_size = max(max_size, v.count(',') + 1)
for k, v in metadata['tables'].items():
curr = v.count(',') + 1
if curr < max_size:
if v.count('[]') == 1:
metadata['tables'][k] = v.replace(']', (','.join(['0'] * (max_size - curr)) + ']'))
else:
metadata['tables'][k] = v.replace(']', (',0' * (max_size - curr)) + ']')
if shared.Settings.SIDE_MODULE:
for k in metadata['tables'].keys():
metadata['tables'][k] = metadata['tables'][k].replace('var FUNCTION_TABLE_', 'var SIDE_FUNCTION_TABLE_')
def fixup_functions(funcs, metadata):
# function table masks
table_sizes = {}
for k, v in metadata['tables'].items():
# undercounts by one, but that is what we want
table_sizes[k] = str(v.count(','))
# if shared.Settings.ASSERTIONS >= 2 and table_sizes[k] == 0:
# shared.warning('no function pointers with signature ' + k + ', but there is a call, which will abort if it occurs (this can result from undefined behavior, check for compiler warnings on your source files and consider -Werror)'
funcs = re.sub(r"#FM_(\w+)#", lambda m: table_sizes[m.groups(0)[0]], funcs)
# fix +float into float.0, if not running js opts
if not shared.Settings.RUNNING_JS_OPTS:
def fix_dot_zero(m):
num = m.group(3)
# TODO: handle 0x floats?
if num.find('.') < 0:
e = num.find('e')
if e < 0:
num += '.0'
else:
num = num[:e] + '.0' + num[e:]
return m.group(1) + m.group(2) + num
funcs = re.sub(r'([(=,+\-*/%<>:?] *)\+(-?)((0x)?[0-9a-f]*\.?[0-9]+([eE][-+]?[0-9]+)?)', fix_dot_zero, funcs)
return funcs
def compiler_glue(metadata, compiler_engine, temp_files, DEBUG):
if DEBUG:
logger.debug('emscript: js compiler glue')
t = time.time()
# FIXME: do these one by one as normal js lib funcs
metadata['declares'] = [i64_func for i64_func in metadata['declares'] if i64_func not in ['getHigh32', 'setHigh32']]
update_settings_glue(metadata, DEBUG)
assert not (metadata['simd'] and shared.Settings.WASM), 'SIMD is used, but not supported in WASM mode yet'
assert not (shared.Settings.SIMD and shared.Settings.WASM), 'SIMD is requested, but not supported in WASM mode yet'
glue, forwarded_data = compile_settings(compiler_engine, temp_files)
if DEBUG:
logger.debug(' emscript: glue took %s seconds' % (time.time() - t))
return glue, forwarded_data
def analyze_table(function_table_data):
def table_size(table):
table_contents = table[table.index('[') + 1: table.index(']')]
if len(table_contents) == 0: # empty table
return 0
return table_contents.count(',') + 1
# note that this is a minimal estimate, as when asm2wasm lays out tables it adds padding
table_total_size = sum(table_size(s) for s in function_table_data.values())
shared.Settings.WASM_TABLE_SIZE = table_total_size
# Extracts from JS library code dependencies to runtime primitives.
def get_asm_extern_primitives(pre):
primitives = re.search(r'\/\/ ASM_LIBRARY EXTERN PRIMITIVES: ([^\n]*)', pre)
if primitives:
return [x.strip().replace('Math_', 'Math.') for x in primitives.group(1).split(',')]
else:
return []
def compute_minimal_runtime_initializer_and_exports(post, initializers, exports, receiving):
# Generate invocations for all global initializers directly off the asm export object, e.g. asm['__GLOBAL__INIT']();
post = post.replace('/*** RUN_GLOBAL_INITIALIZERS(); ***/', '\n'.join(["asm['" + x + "']();" for x in global_initializer_funcs(initializers)]))
if shared.Settings.WASM:
# Declare all exports out to global JS scope so that JS library functions can access them in a way that minifies well with Closure
# e.g. var a,b,c,d,e,f;
exports_that_are_not_initializers = [x for x in exports if x not in initializers]
if shared.Settings.WASM_BACKEND:
# In Wasm backend the exports are still unmangled at this point, so mangle the names here
exports_that_are_not_initializers = [asmjs_mangle(x) for x in exports_that_are_not_initializers]
post = post.replace('/*** ASM_MODULE_EXPORTS_DECLARES ***/', 'var ' + ','.join(exports_that_are_not_initializers) + ';')
# Generate assignments from all asm.js/wasm exports out to the JS variables above: e.g. a = asm['a']; b = asm['b'];
post = post.replace('/*** ASM_MODULE_EXPORTS ***/', receiving)
receiving = ''
return post, receiving
def function_tables_and_exports(funcs, metadata, mem_init, glue, forwarded_data, outfile, DEBUG):
if DEBUG:
logger.debug('emscript: python processing: function tables and exports')
t = time.time()
forwarded_json = json.loads(forwarded_data)
# merge in information from llvm backend
function_table_data = metadata['tables']
if shared.Settings.WASM:
analyze_table(function_table_data)
# merge forwarded data
shared.Settings.EXPORTED_FUNCTIONS = forwarded_json['EXPORTED_FUNCTIONS']
pre, post = glue.split('// EMSCRIPTEN_END_FUNCS')
pre = apply_script_source(pre)
asm_extern_primitives = get_asm_extern_primitives(pre)
metadata['externUses'] += asm_extern_primitives
pre = memory_and_global_initializers(pre, metadata, mem_init)
pre, funcs_js = get_js_funcs(pre, funcs)
all_exported_functions = get_all_exported_functions(function_table_data)
all_implemented = get_all_implemented(forwarded_json, metadata)
report_missing_symbols(all_implemented, pre)
implemented_functions = get_implemented_functions(metadata)
pre = include_asm_consts(pre, forwarded_json, metadata)
pre = apply_table(pre)
outfile.write(pre)
pre = None
# Move preAsms to their right place
def move_preasm(m):
contents = m.groups(0)[0]
outfile.write(contents + '\n')
return ''
if not shared.Settings.BOOTSTRAPPING_STRUCT_INFO and len(funcs_js) > 1:
funcs_js[1] = re.sub(r'/\* PRE_ASM \*/(.*)\n', move_preasm, funcs_js[1])
if 'pre' in function_table_data:
pre_tables = function_table_data['pre']
del function_table_data['pre']
else:
pre_tables = ''
function_table_sigs = list(function_table_data.keys())
in_table, debug_tables, function_tables_defs = make_function_tables_defs(
implemented_functions, all_implemented, function_table_data, metadata)
exported_implemented_functions = get_exported_implemented_functions(
all_exported_functions, all_implemented, metadata)
# List of function signatures of used 'invoke_xxx()' functions in the application
# For backwards compatibility if one might be using a mismatching Emscripten compiler version, if 'invokeFuncs' is not present in metadata,
# use the full list of signatures in function table and generate invoke_() functions for all signatures in the program (producing excessive code size)
# we must also emit the full list if we are emitting code that can be linked later
if 'invokeFuncs' in metadata and not shared.Settings.LINKABLE:
invoke_function_names = metadata['invokeFuncs']
else:
invoke_function_names = ['invoke_' + x for x in function_table_sigs]
asm_setup = create_asm_setup(debug_tables, function_table_data, invoke_function_names, metadata)
basic_funcs = create_basic_funcs(function_table_sigs, invoke_function_names)
basic_vars = create_basic_vars(exported_implemented_functions, forwarded_json, metadata)
funcs_js += create_mftCall_funcs(function_table_data)
exports = create_exports(exported_implemented_functions, in_table, function_table_data, metadata)
# calculate globals
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except KeyError:
pass
if not shared.Settings.RELOCATABLE:
global_vars = metadata['externs']
else:
global_vars = [] # linkable code accesses globals through function calls
global_funcs = set(key for key, value in forwarded_json['Functions']['libraryFunctions'].items() if value != 2)
global_funcs = sorted(global_funcs.difference(set(global_vars)).difference(implemented_functions))
if shared.Settings.RELOCATABLE:
global_funcs += ['g$' + extern for extern in metadata['externs']]
global_funcs += ['fp$' + extern for extern in metadata['externFunctions']]
# Tracks the set of used (minified) function names in
# JS symbols imported to asm.js module.
minified_js_names = MinifiedJsNameGenerator()
# Converts list of imports ['foo', 'bar', ...] to a dictionary of
# name mappings in form { 'minified': 'unminified', ... }
def define_asmjs_import_names(imports):
if shared.Settings.MINIFY_ASMJS_IMPORT_NAMES:
return [(minified_js_names.generate(), i) for i in imports]
else:
return [(i, i) for i in imports]
basic_funcs = define_asmjs_import_names(basic_funcs)
global_funcs = define_asmjs_import_names(global_funcs)
basic_vars = define_asmjs_import_names(basic_vars)
global_vars = define_asmjs_import_names(global_vars)
bg_funcs = basic_funcs + global_funcs
bg_vars = basic_vars + global_vars
asm_global_funcs = create_asm_global_funcs(bg_funcs, metadata)
asm_global_vars = create_asm_global_vars(bg_vars)
the_global = create_the_global(metadata)
sending_vars = bg_funcs + bg_vars
sending = OrderedDict([(math_fix(minified), unminified) for (minified, unminified) in sending_vars])
if shared.Settings.WASM:
add_standard_wasm_imports(sending)
sorted_sending_keys = sorted(sending.keys())
sending = '{ ' + ', '.join('"' + k + '": ' + sending[k] for k in sorted_sending_keys) + ' }'
receiving = create_receiving(function_table_data, function_tables_defs,
exported_implemented_functions, metadata['initializers'])
post = apply_table(post)
post = apply_static_code_hooks(post)
if shared.Settings.MINIMAL_RUNTIME:
post, receiving = compute_minimal_runtime_initializer_and_exports(post, metadata['initializers'], [mangled for mangled, unmangled in shared.Settings.MODULE_EXPORTS], receiving)
function_tables_impls = make_function_tables_impls(function_table_data)
final_function_tables = '\n'.join(function_tables_impls) + '\n' + function_tables_defs
if shared.Settings.EMULATED_FUNCTION_POINTERS:
final_function_tables = (
final_function_tables
.replace("asm['", '')
.replace("']", '')
.replace('var SIDE_FUNCTION_TABLE_', 'var FUNCTION_TABLE_')
.replace('var dynCall_', '//')
)
if DEBUG:
logger.debug('asm text sizes' + str([
[len(s) for s in funcs_js], len(asm_setup), len(asm_global_vars), len(asm_global_funcs), len(pre_tables),
len('\n'.join(function_tables_impls)), len(function_tables_defs) + (function_tables_defs.count('\n') * len(' ')),
len(exports), len(the_global), len(sending), len(receiving)]))
logger.debug(' emscript: python processing: function tables and exports took %s seconds' % (time.time() - t))
bundled_args = (funcs_js, asm_setup, the_global, sending, receiving, asm_global_vars,
asm_global_funcs, pre_tables, final_function_tables, exports)
return (post, function_table_data, bundled_args)
def finalize_output(outfile, post, function_table_data, bundled_args, metadata, DEBUG):
function_table_sigs = function_table_data.keys()
module = create_module_asmjs(function_table_sigs, metadata, *bundled_args)
if DEBUG:
logger.debug('emscript: python processing: finalize')
t = time.time()
write_output_file(outfile, post, module)
module = None
if DEBUG:
logger.debug(' emscript: python processing: finalize took %s seconds' % (time.time() - t))
write_cyberdwarf_data(outfile, metadata)
# Given JS code that consists only exactly of a series of "var a = ...;\n var b = ...;" statements,
# this function collapses the redundant 'var ' statements at the beginning of each line to a
# single var a =..., b=..., c=...; statement.
def collapse_redundant_vars(code):
if shared.Settings.WASM:
return code # Skip if targeting Wasm, this does not matter there
old_code = ''
while code != old_code: # Repeated vars overlap, so can't run in one regex pass. Runs in O(log(N)) time
old_code = code
code = re.sub(r'(var [^;]*);\s*var ', r'\1,\n ', code)
return code
def global_initializer_funcs(initializers):
return ['globalCtors'] if (len(initializers) > 1 and not shared.Settings.EVAL_CTORS) else initializers
def create_global_initializer(initializers):
# Also in EVAL_CTORS mode, we want to try to evaluate the individual ctor functions, so in that mode,
# we do not group ctors into one.
if 'globalCtors' not in global_initializer_funcs(initializers):
return ''
global_initializer = ''' function globalCtors() {
%s
}''' % '\n '.join(i + '();' for i in initializers)
return global_initializer
def create_module_asmjs(function_table_sigs, metadata,
funcs_js, asm_setup, the_global, sending, receiving, asm_global_vars,
asm_global_funcs, pre_tables, final_function_tables, exports):
receiving += create_named_globals(metadata)
runtime_funcs = create_runtime_funcs_asmjs(exports, metadata)
asm_start_pre = create_asm_start_pre(asm_setup, the_global, sending, metadata)
memory_views = create_memory_views(metadata)
asm_temp_vars = create_asm_temp_vars(metadata)
asm_runtime_thread_local_vars = create_asm_runtime_thread_local_vars()
stack = ''
if not shared.Settings.RELOCATABLE and not (shared.Settings.WASM and shared.Settings.SIDE_MODULE):
if 'STACKTOP' in shared.Settings.ASM_PRIMITIVE_VARS:
stack += apply_memory(' var STACKTOP = {{{ STACK_BASE }}};\n')
if 'STACK_MAX' in shared.Settings.ASM_PRIMITIVE_VARS:
stack += apply_memory(' var STACK_MAX = {{{ STACK_MAX }}};\n')
if 'tempFloat' in shared.Settings.ASM_PRIMITIVE_VARS:
temp_float = ' var tempFloat = %s;\n' % ('Math_fround(0)' if provide_fround() else '0.0')
else:
temp_float = ''
async_state = ' var asyncState = 0;\n' if shared.Settings.EMTERPRETIFY_ASYNC else ''
f0_fround = ' const f0 = Math_fround(0);\n' if provide_fround() else ''
replace_memory = create_replace_memory(metadata)
start_funcs_marker = '\n// EMSCRIPTEN_START_FUNCS\n'
asm_end = create_asm_end(exports)
asm_variables = collapse_redundant_vars(memory_views + asm_global_vars + asm_temp_vars + asm_runtime_thread_local_vars + '\n' + asm_global_funcs + stack + temp_float + async_state + f0_fround)
asm_global_initializer = create_global_initializer(metadata['initializers'])
module = [
asm_start_pre,
asm_variables,
replace_memory,
start_funcs_marker,
asm_global_initializer
] + runtime_funcs + funcs_js + [
'\n ',
pre_tables, final_function_tables, asm_end,
'\n', receiving, ';\n'
]
if shared.Settings.SIDE_MODULE:
module.append('''
parentModule['registerFunctions'](%s, Module);
''' % str([str(f) for f in function_table_sigs]))
return module
def write_output_file(outfile, post, module):
for i in range(len(module)): # do this loop carefully to save memory
module[i] = normalize_line_endings(module[i])
outfile.write(module[i])
post = normalize_line_endings(post)
outfile.write(post)
def write_cyberdwarf_data(outfile, metadata):
if not shared.Settings.CYBERDWARF:
return
assert('cyberdwarf_data' in metadata)
cd_file_name = outfile.name + ".cd"
with open(cd_file_name, 'w') as f:
json.dump({'cyberdwarf': metadata['cyberdwarf_data']}, f)
def create_backend_cmd(infile, temp_js):
args = [
shared.LLVM_COMPILER, infile, '-march=js', '-filetype=asm', '-o', temp_js,
'-emscripten-stack-size=%d' % shared.Settings.TOTAL_STACK,
'-O%s' % shared.Settings.OPT_LEVEL,
]
if shared.Settings.PRECISE_F32:
args += ['-emscripten-precise-f32']
if shared.Settings.USE_PTHREADS:
args += ['-emscripten-enable-pthreads']
if shared.Settings.WARN_UNALIGNED:
args += ['-emscripten-warn-unaligned']
if shared.Settings.RESERVED_FUNCTION_POINTERS > 0:
args += ['-emscripten-reserved-function-pointers=%d' % shared.Settings.RESERVED_FUNCTION_POINTERS]
if shared.Settings.ASSERTIONS > 0:
args += ['-emscripten-assertions=%d' % shared.Settings.ASSERTIONS]
if shared.Settings.ALIASING_FUNCTION_POINTERS == 0:
args += ['-emscripten-no-aliasing-function-pointers']
if shared.Settings.EMULATED_FUNCTION_POINTERS:
args += ['-emscripten-emulated-function-pointers']
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS:
args += ['-emscripten-emulate-function-pointer-casts']
if shared.Settings.RELOCATABLE:
args += ['-emscripten-relocatable']
args += ['-emscripten-global-base=0']
elif shared.Settings.GLOBAL_BASE >= 0:
args += ['-emscripten-global-base=%d' % shared.Settings.GLOBAL_BASE]
if shared.Settings.SIDE_MODULE:
args += ['-emscripten-side-module']
if shared.Settings.LEGALIZE_JS_FFI != 1:
args += ['-emscripten-legalize-javascript-ffi=0']
if shared.Settings.DISABLE_EXCEPTION_CATCHING != 1:
args += ['-enable-emscripten-cpp-exceptions']
if shared.Settings.DISABLE_EXCEPTION_CATCHING == 2:
args += ['-emscripten-cpp-exceptions-whitelist=' + ','.join(shared.Settings.EXCEPTION_CATCHING_WHITELIST or ['fake'])]
if not shared.Settings.EXIT_RUNTIME:
args += ['-emscripten-no-exit-runtime']
if shared.Settings.WORKAROUND_IOS_9_RIGHT_SHIFT_BUG:
args += ['-emscripten-asmjs-work-around-ios-9-right-shift-bug']
if shared.Settings.WASM:
args += ['-emscripten-wasm']
if shared.Building.is_wasm_only():
args += ['-emscripten-only-wasm']
if shared.Settings.CYBERDWARF:
args += ['-enable-cyberdwarf']
return args
def optimize_syscalls(declares, DEBUG):
relevant_settings = ['FORCE_FILESYSTEM', 'INCLUDE_FULL_LIBRARY', 'MAIN_MODULE']
if any(shared.Settings[s] for s in relevant_settings):
return
if shared.Settings.FILESYSTEM == 0:
# without filesystem support, it doesn't matter what syscalls need
shared.Settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
else:
syscall_prefixes = ('__syscall', 'fd_', '__wasi_fd_')
syscalls = [d for d in declares if d.startswith(syscall_prefixes)]
if set(syscalls).issubset(set([
'__syscall6', '__syscall54', '__syscall140',
'fd_seek', '__wasi_fd_seek',
'fd_write', '__wasi_fd_write',
'fd_close', '__wasi_fd_close',
])):
if DEBUG:
logger.debug('very limited syscalls (%s) so disabling full filesystem support', ', '.join(str(s) for s in syscalls))
shared.Settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
def is_int(x):
try:
int(x)
return True
except ValueError:
return False
def align_memory(addr):
return (addr + 15) & -16
def align_static_bump(metadata):
metadata['staticBump'] = align_memory(metadata['staticBump'])
return metadata['staticBump']
def update_settings_glue(metadata, DEBUG):
optimize_syscalls(metadata['declares'], DEBUG)
if shared.Settings.CYBERDWARF:
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE.append("cyberdwarf_Debugger")
shared.Settings.EXPORTED_FUNCTIONS.append("cyberdwarf_Debugger")
if shared.Settings.SIDE_MODULE:
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE = []
if metadata.get('cantValidate') and shared.Settings.ASM_JS != 2:
shared.WarningManager.warn('ALMOST_ASM', 'disabling asm.js validation due to use of non-supported features: ' + metadata['cantValidate'])
shared.Settings.ASM_JS = 2
all_funcs = shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE + [shared.JS.to_nice_ident(d) for d in metadata['declares']]
implemented_funcs = [x[1:] for x in metadata['implementedFunctions']]
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE = sorted(set(all_funcs).difference(implemented_funcs))
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [x[1:] for x in metadata['externs']]
if metadata['simd']:
shared.Settings.SIMD = 1
if shared.Settings.ASM_JS != 2:
shared.WarningManager.warn('ALMOST_ASM', 'disabling asm.js validation due to use of SIMD')
shared.Settings.ASM_JS = 2
shared.Settings.MAX_GLOBAL_ALIGN = metadata['maxGlobalAlign']
shared.Settings.IMPLEMENTED_FUNCTIONS = metadata['implementedFunctions']
# Extract the list of function signatures that MAIN_THREAD_EM_ASM blocks in
# the compiled code have, each signature will need a proxy function invoker
# generated for it.
def read_proxied_function_signatures(asmConsts):
proxied_function_signatures = set()
for _, sigs, proxying_types in asmConsts.values():
for sig, proxying_type in zip(sigs, proxying_types):
if proxying_type == 'sync_on_main_thread_':
proxied_function_signatures.add(sig + '_sync')
elif proxying_type == 'async_on_main_thread_':
proxied_function_signatures.add(sig + '_async')
return list(proxied_function_signatures)
shared.Settings.PROXIED_FUNCTION_SIGNATURES = read_proxied_function_signatures(metadata['asmConsts'])
shared.Settings.STATIC_BUMP = align_static_bump(metadata)
if shared.Settings.WASM_BACKEND:
shared.Settings.BINARYEN_FEATURES = metadata['features']
shared.Settings.WASM_TABLE_SIZE = metadata['tableSize']
if shared.Settings.RELOCATABLE:
# When building relocatable output (e.g. MAIN_MODULE) the reported table
# size does not include the reserved slot at zero for the null pointer.
# Instead we use __table_base to offset the elements by 1.
shared.Settings.WASM_TABLE_SIZE += 1
shared.Settings.MAIN_READS_PARAMS = metadata['mainReadsParams']
# static code hooks
class StaticCodeHooks:
atinits = []
atmains = []
atexits = []
def apply_static_code_hooks(code):
code = code.replace('{{{ ATINITS }}}', StaticCodeHooks.atinits)
code = code.replace('{{{ ATMAINS }}}', StaticCodeHooks.atmains)
code = code.replace('{{{ ATEXITS }}}', StaticCodeHooks.atexits)
return code
def apply_forwarded_data(forwarded_data):
forwarded_json = json.loads(forwarded_data)
# Be aware of JS static allocations
shared.Settings.STATIC_BUMP = forwarded_json['STATIC_BUMP']
shared.Settings.DYNAMICTOP_PTR = forwarded_json['DYNAMICTOP_PTR']
# Be aware of JS static code hooks
StaticCodeHooks.atinits = str(forwarded_json['ATINITS'])
StaticCodeHooks.atmains = str(forwarded_json['ATMAINS'])
StaticCodeHooks.atexits = str(forwarded_json['ATEXITS'])
def compile_settings(compiler_engine, temp_files):
# Save settings to a file to work around v8 issue 1579
with temp_files.get_file('.txt') as settings_file:
with open(settings_file, 'w') as s:
json.dump(shared.Settings.to_dict(), s, sort_keys=True)
# Call js compiler
env = os.environ.copy()
env['EMCC_BUILD_DIR'] = os.getcwd()
out = jsrun.run_js_tool(path_from_root('src', 'compiler.js'), compiler_engine,
[settings_file], stdout=subprocess.PIPE, stderr=STDERR_FILE,
cwd=path_from_root('src'), env=env)
assert '//FORWARDED_DATA:' in out, 'Did not receive forwarded data in pre output - process failed?'
glue, forwarded_data = out.split('//FORWARDED_DATA:')
apply_forwarded_data(forwarded_data)
return glue, forwarded_data
class Memory():
def __init__(self):
# Note: if RELOCATABLE, then only relative sizes can be computed, and we don't
self.global_base = shared.Settings.GLOBAL_BASE
self.static_bump = shared.Settings.STATIC_BUMP
self.stack_low = align_memory(self.global_base + self.static_bump)
self.stack_high = align_memory(self.stack_low + shared.Settings.TOTAL_STACK)
if shared.Settings.WASM_BACKEND:
self.stack_base = self.stack_high
self.stack_max = self.stack_low
else:
self.stack_base = self.stack_low
self.stack_max = self.stack_high
self.dynamic_base = align_memory(self.stack_high)
if self.dynamic_base >= shared.Settings.TOTAL_MEMORY:
exit_with_error('Memory is not large enough for static data (%d) plus the stack (%d), please increase TOTAL_MEMORY (%d) to at least %d' % (self.static_bump, shared.Settings.TOTAL_STACK, shared.Settings.TOTAL_MEMORY, self.dynamic_base))
def apply_memory(js):
memory = Memory()
js = js.replace('{{{ STATIC_BUMP }}}', str(memory.static_bump))
js = js.replace('{{{ STACK_BASE }}}', str(memory.stack_base))
js = js.replace('{{{ STACK_MAX }}}', str(memory.stack_max))
js = js.replace('{{{ DYNAMIC_BASE }}}', str(memory.dynamic_base))
logger.debug('global_base: %d stack_base: %d, stack_max: %d, dynamic_base: %d, static bump: %d', memory.global_base, memory.stack_base, memory.stack_max, memory.dynamic_base, memory.static_bump)
shared.Settings.DYNAMIC_BASE = memory.dynamic_base
return js
def apply_table(js):
js = js.replace('{{{ WASM_TABLE_SIZE }}}', str(shared.Settings.WASM_TABLE_SIZE))
return js
def apply_script_source(js):
js = js.replace('{{{ TARGET_BASENAME }}}', shared.Settings.TARGET_BASENAME)
return js
def memory_and_global_initializers(pre, metadata, mem_init):
if shared.Settings.SIMD == 1:
pre = open(path_from_root(os.path.join('src', 'ecmascript_simd.js'))).read() + '\n\n' + pre
staticbump = shared.Settings.STATIC_BUMP
pthread = ''
if shared.Settings.USE_PTHREADS:
pthread = 'if (!ENVIRONMENT_IS_PTHREAD)'
global_initializers = ''
if not shared.Settings.MINIMAL_RUNTIME:
global_initializers = global_initializer_funcs(metadata['initializers'])
if len(global_initializers) > 0:
global_initializers = ', '.join('{ func: function() { %s() } }' % i for i in global_initializers)
global_initializers = '/* global initializers */ {pthread} __ATINIT__.push({global_initializers});'.format(pthread=pthread, global_initializers=global_initializers)
else:
global_initializers = '/* global initializers */ /*__ATINIT__.push();*/'
pre = pre.replace('STATICTOP = STATIC_BASE + 0;', '''\
STATICTOP = STATIC_BASE + {staticbump};
{global_initializers}
{mem_init}'''.format(staticbump=staticbump,
global_initializers=global_initializers,
mem_init=mem_init))
if shared.Settings.SIDE_MODULE:
pre = pre.replace('GLOBAL_BASE', 'gb')
pre = apply_memory(pre)
pre = apply_static_code_hooks(pre)
return pre
def get_js_funcs(pre, funcs):
funcs_js = [funcs]
parts = pre.split('// ASM_LIBRARY FUNCTIONS\n')
if len(parts) > 1:
pre = parts[0]
funcs_js.append(parts[1])
return pre, funcs_js
def get_all_exported_functions(function_table_data):
all_exported_functions = set(shared.Settings.EXPORTED_FUNCTIONS)
for additional_export in shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE:
all_exported_functions.add('_' + additional_export)
if shared.Settings.EXPORT_FUNCTION_TABLES:
for table in function_table_data.values():
for func in table.split('[')[1].split(']')[0].split(','):
if func[0] == '_':
all_exported_functions.add(func)
return all_exported_functions
def get_all_implemented(forwarded_json, metadata):
return set(metadata['implementedFunctions']).union(forwarded_json['Functions']['implementedFunctions'])
def report_missing_symbols(all_implemented, pre):
if not shared.Settings.ERROR_ON_UNDEFINED_SYMBOLS and not shared.Settings.WARN_ON_UNDEFINED_SYMBOLS:
return
missing = list(set(shared.Settings.USER_EXPORTED_FUNCTIONS) - all_implemented)
for requested in missing:
if ('function ' + asstr(requested)) in pre:
continue
if missing == '_malloc':
continue
if shared.Settings.ERROR_ON_UNDEFINED_SYMBOLS:
exit_with_error('undefined exported function: "%s"', requested)
elif shared.Settings.WARN_ON_UNDEFINED_SYMBOLS:
shared.warning('undefined exported function: "%s"', requested)
def get_exported_implemented_functions(all_exported_functions, all_implemented, metadata):
funcs = set(metadata['exports'])
export_bindings = shared.Settings.EXPORT_BINDINGS
export_all = shared.Settings.EXPORT_ALL
for key in all_implemented:
if key in all_exported_functions or export_all or (export_bindings and key.startswith('_emscripten_bind')):
funcs.add(key)
if not export_all:
for name, alias in metadata['aliases'].items():
if alias in all_implemented and name in all_exported_functions:
funcs.add(alias)
funcs = list(funcs) + global_initializer_funcs(metadata['initializers'])
if shared.Settings.ALLOW_MEMORY_GROWTH:
funcs.append('_emscripten_replace_memory')
if not shared.Settings.SIDE_MODULE and not shared.Settings.MINIMAL_RUNTIME:
funcs += ['stackAlloc', 'stackSave', 'stackRestore']
if shared.Settings.USE_PTHREADS:
funcs += ['establishStackSpace']
if shared.Settings.EMTERPRETIFY:
funcs += ['emterpret']
if shared.Settings.EMTERPRETIFY_ASYNC:
funcs += ['setAsyncState', 'emtStackSave', 'emtStackRestore', 'getEmtStackMax', 'setEmtStackMax']
return sorted(set(funcs))
def get_implemented_functions(metadata):
return set(metadata['implementedFunctions'])
def proxy_debug_print(sync):
if shared.Settings.PTHREADS_DEBUG:
if sync:
return 'warnOnce("sync proxying function " + code);'
else:
return 'warnOnce("async proxying function " + code);'
return ''
def include_asm_consts(pre, forwarded_json, metadata):
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
if metadata['asmConsts']:
exit_with_error('EM_ASM is not yet supported in shared wasm module (it cannot be stored in the wasm itself, need some solution)')
asm_consts, all_sigs = all_asm_consts(metadata)
asm_const_funcs = []
for sig, call_type in all_sigs:
if 'j' in sig:
exit_with_error('emscript: EM_ASM should not receive i64s as inputs, they are not valid in JS')
if '_emscripten_asm_const_' + call_type + sig in forwarded_json['Functions']['libraryFunctions']:
continue
forwarded_json['Functions']['libraryFunctions']['_emscripten_asm_const_' + call_type + sig] = 1
args = ['a%d' % i for i in range(len(sig) - 1)]
all_args = ['code'] + args
pre_asm_const = ''
if shared.Settings.USE_PTHREADS:
sync_proxy = call_type == 'sync_on_main_thread_'
async_proxy = call_type == 'async_on_main_thread_'
proxied = sync_proxy or async_proxy
if proxied:
proxy_args = ['-1 - code', str(int(sync_proxy))] + args
pre_asm_const += ' if (ENVIRONMENT_IS_PTHREAD) { ' + proxy_debug_print(sync_proxy) + 'return _emscripten_proxy_to_main_thread_js(' + ', '.join(proxy_args) + '); }\n'
if shared.Settings.EMTERPRETIFY_ASYNC and shared.Settings.ASSERTIONS:
pre_asm_const += " assert(typeof EmterpreterAsync !== 'object' || EmterpreterAsync.state !== 2, 'cannot have an EM_ASM on the stack when emterpreter pauses/resumes - the JS is not emterpreted, so we would end up running it again from the start');\n"
asm_const_funcs.append(r'''
function _emscripten_asm_const_%s(%s) {
%s return ASM_CONSTS[code](%s);
}''' % (call_type + asstr(sig), ', '.join(all_args), pre_asm_const, ', '.join(args)))
asm_consts_text = '\nvar ASM_CONSTS = [' + ',\n '.join(asm_consts) + '];\n'
asm_funcs_text = '\n'.join(asm_const_funcs) + '\n'
em_js_funcs = create_em_js(forwarded_json, metadata)
em_js_text = '\n'.join(em_js_funcs) + '\n'
body_marker = '// === Body ==='
return pre.replace(body_marker, body_marker + '\n' + asm_consts_text + asstr(asm_funcs_text) + em_js_text)
def parentheses_match(body, openIdx, closeIdx):
if closeIdx < 0:
closeIdx += len(body)
count = 1
for i in range(openIdx + 1, closeIdx + 1):
if body[i] == body[openIdx]:
count += 1
elif body[i] == body[closeIdx]:
count -= 1
if count <= 0:
return i == closeIdx
return False
def trim_asm_const_body(body):
body = body.strip()
orig = None
while orig != body:
orig = body
if len(body) > 1 and body[0] == '"' and body[-1] == '"':
body = body[1:-1].replace('\\"', '"').strip()
if len(body) > 1 and body[0] == '{' and body[-1] == '}' and parentheses_match(body, 0, -1):
body = body[1:-1].strip()
if len(body) > 1 and body[0] == '(' and body[-1] == ')' and parentheses_match(body, 0, -1):
body = body[1:-1].strip()
return body
def all_asm_consts(metadata):
asm_consts = [0] * len(metadata['asmConsts'])
all_sigs = []
for k, v in metadata['asmConsts'].items():
const, sigs, call_types = v
const = asstr(const)
const = trim_asm_const_body(const)
const = '{ ' + const + ' }'
args = []
arity = max(len(s) for s in sigs) - 1
for i in range(arity):
args.append('$' + str(i))
const = 'function(' + ', '.join(args) + ') ' + const
asm_consts[int(k)] = const
assert(len(sigs) == len(call_types))
for sig, call_type in zip(sigs, call_types):
all_sigs.append((sig, call_type))
return asm_consts, all_sigs
def unfloat(s):
return 'd' if s == 'f' else s
def make_function_tables_defs(implemented_functions, all_implemented, function_table_data, metadata):
class Counter(object):
next_bad_item = 0
next_item = 0
pre = []
in_table = set()
debug_tables = {}
def make_params(sig):
return ','.join('p%d' % p for p in range(len(sig) - 1))
def make_coerced_params(sig):
return ','.join(shared.JS.make_coercion('p%d', unfloat(sig[p + 1])) % p for p in range(len(sig) - 1))
def make_coercions(sig):
return ';'.join('p%d = %s' % (p, shared.JS.make_coercion('p%d' % p, sig[p + 1])) for p in range(len(sig) - 1)) + ';'
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM:
function_pointer_targets = {}
for sig, table in function_table_data.items():
start = table.index('[')
end = table.rindex(']')
body = table[start + 1:end].split(',')
for i, parsed in enumerate(x.strip() for x in body):
if parsed != '0':
assert i not in function_pointer_targets
function_pointer_targets[i] = [sig, str(parsed)]
def make_table(sig, raw):
if '[]' in raw:
return ('', '')
params = make_params(sig)
coerced_params = make_coerced_params(sig)
coercions = make_coercions(sig)
def make_bad(target=None):
i = Counter.next_bad_item
Counter.next_bad_item += 1
if target is None:
target = i
name = 'b' + str(i)
if not shared.Settings.ASSERTIONS:
if 'abort' in shared.Settings.RUNTIME_FUNCS_TO_IMPORT:
code = 'abort(%s);' % target
else:
code = '\n/*execution is supposed to abort here, but you did not include "abort" in RUNTIME_FUNCS_TO_IMPORT (to save code size?). Silently trucking through, enjoy :)*/\n'
else:
code = 'nullFunc_' + sig + '(%d);' % target
if sig[0] != 'v':
code += 'return %s' % shared.JS.make_initializer(sig[0]) + ';'
return name, make_func(name, code, params, coercions)
bad, bad_func = make_bad()
if shared.Settings.ASSERTIONS <= 1:
Counter.pre = [bad_func]
else:
Counter.pre = []
start = raw.index('[')
end = raw.rindex(']')
body = raw[start + 1:end].split(',')
if shared.Settings.EMULATED_FUNCTION_POINTERS:
def receive(item):
if item == '0':
return item
if item not in all_implemented:
return item
in_table.add(item)
return "asm['" + item + "']"
body = [receive(b) for b in body]
for j in range(shared.Settings.RESERVED_FUNCTION_POINTERS):
curr = 'jsCall_%s_%s' % (sig, j)
body[1 + j] = curr
implemented_functions.add(curr)
Counter.next_item = 0
def fix_item(item):
j = Counter.next_item
Counter.next_item += 1
newline = Counter.next_item % 30 == 29
if item == '0':
if j > 0 and shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM and j in function_pointer_targets:
proper_sig, proper_target = function_pointer_targets[j]
if shared.Settings.EMULATED_FUNCTION_POINTERS:
if proper_target in all_implemented:
proper_target = "asm['" + proper_target + "']"
def make_emulated_param(i):
if i >= len(sig):
return shared.JS.make_initializer(proper_sig[i])
return shared.JS.make_coercion('p%d' % (i - 1), proper_sig[i], convert_from=sig[i])
proper_code = proper_target + '(' + ','.join([make_emulated_param(i + 1) for i in range(len(proper_sig) - 1)]) + ')'
if proper_sig[0] != 'v':
proper_code = shared.JS.make_coercion(proper_code, proper_sig[0])
if proper_sig[0] != sig[0]:
proper_code = shared.JS.make_coercion(proper_code, sig[0], convert_from=proper_sig[0])
if sig[0] != 'v':
proper_code = 'return ' + proper_code
else:
if sig[0] != 'v':
proper_code = 'return ' + shared.JS.make_initializer(sig[0])
name = 'fpemu_%s_%d' % (sig, j)
wrapper = make_func(name, proper_code, params, coercions)
Counter.pre.append(wrapper)
return name if not newline else (name + '\n')
if shared.Settings.ASSERTIONS <= 1:
return bad if not newline else (bad + '\n')
specific_bad, specific_bad_func = make_bad(j)
Counter.pre.append(specific_bad_func)
return specific_bad if not newline else (specific_bad + '\n')
clean_item = item.replace("asm['", '').replace("']", '')
# but if relocating, then we also have the copies in-module, and do
# in wasm we never need wrappers though
if clean_item not in implemented_functions and not (shared.Settings.EMULATED_FUNCTION_POINTERS and not shared.Settings.RELOCATABLE) and not shared.Settings.WASM:
# this is imported into asm, we must wrap it
call_ident = clean_item
if call_ident in metadata['redirects']:
call_ident = metadata['redirects'][call_ident]
if not call_ident.startswith('_') and not call_ident.startswith('Math_'):
call_ident = '_' + call_ident
code = call_ident + '(' + coerced_params + ')'
if sig[0] != 'v':
# ffis cannot return float
if sig[0] == 'f':
code = '+' + code
code = 'return ' + shared.JS.make_coercion(code, sig[0])
code += ';'
Counter.pre.append(make_func(clean_item + '__wrapper', code, params, coercions))
assert not sig == 'X', 'must know the signature in order to create a wrapper for "%s" (TODO for shared wasm modules)' % item
return clean_item + '__wrapper'
return item if not newline else (item + '\n')
if shared.Settings.ASSERTIONS >= 2:
debug_tables[sig] = body
body = ','.join(fix_item(b) for b in body)
return ('\n'.join(Counter.pre), ''.join([raw[:start + 1], body, raw[end:]]))
infos = [make_table(sig, raw) for sig, raw in function_table_data.items()]
Counter.pre = []
function_tables_defs = '\n'.join([info[0] for info in infos]) + '\n'
function_tables_defs += '\n// EMSCRIPTEN_END_FUNCS\n'
function_tables_defs += '\n'.join([info[1] for info in infos])
return in_table, debug_tables, function_tables_defs
def make_func(name, code, params, coercions):
return 'function %s(%s) {\n %s %s\n}' % (name, params, coercions, code)
def math_fix(g):
return g if not g.startswith('Math_') else g.split('_')[1]
# asm.js function tables have one table in each linked asm.js module, so we
# can't just dynCall into them - ftCall exists for that purpose. In wasm,
def asm_js_emulated_function_pointers():
return shared.Settings.EMULATED_FUNCTION_POINTERS and not shared.Settings.WASM
def make_function_tables_impls(function_table_data):
function_tables_impls = []
for sig, table in function_table_data.items():
args = ','.join(['a' + str(i) for i in range(1, len(sig))])
arg_coercions = ' '.join(['a' + str(i) + '=' + shared.JS.make_coercion('a' + str(i), sig[i]) + ';' for i in range(1, len(sig))])
coerced_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i]) for i in range(1, len(sig))])
sig_mask = str(table.count(','))
if not (shared.Settings.WASM and shared.Settings.EMULATED_FUNCTION_POINTERS):
ret = 'FUNCTION_TABLE_%s[index&%s](%s)' % (sig, sig_mask, coerced_args)
else:
# for wasm with emulated function pointers, emit an mft_SIG(..) call, we avoid asm.js function tables there.
ret = 'mftCall_%s(index%s%s)' % (sig, ',' if len(sig) > 1 else '', coerced_args)
ret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion(ret, sig[0])
if not asm_js_emulated_function_pointers():
function_tables_impls.append('''
function dynCall_%s(index%s%s) {
index = index|0;
%s
%s;
}
''' % (sig, ',' if len(sig) > 1 else '', args, arg_coercions, ret))
else:
function_tables_impls.append('''
var dynCall_%s = ftCall_%s;
''' % (sig, sig))
ffi_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i], ffi_arg=True) for i in range(1, len(sig))])
for i in range(shared.Settings.RESERVED_FUNCTION_POINTERS):
jsret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('jsCall_%s(%d%s%s)' % (sig, i, ',' if ffi_args else '', ffi_args), sig[0], ffi_result=True)
function_tables_impls.append('''
function jsCall_%s_%s(%s) {
%s
%s;
}
''' % (sig, i, args, arg_coercions, jsret))
return function_tables_impls
def create_mftCall_funcs(function_table_data):
if not asm_js_emulated_function_pointers():
return []
if shared.Settings.WASM or not shared.Settings.RELOCATABLE:
return []
mftCall_funcs = []
# in wasm, emulated function pointers are just simple table calls
for sig, table in function_table_data.items():
return_type, sig_args = sig[0], sig[1:]
num_args = len(sig_args)
params = ','.join(['ptr'] + ['p%d' % i for i in range(num_args)])
coerced_params = ','.join([shared.JS.make_coercion('ptr', 'i')] + [shared.JS.make_coercion('p%d' % i, unfloat(sig_args[i])) for i in range(num_args)])
coercions = ';'.join(['ptr = ptr | 0'] + ['p%d = %s' % (i, shared.JS.make_coercion('p%d' % i, unfloat(sig_args[i]))) for i in range(num_args)]) + ';'
mini_coerced_params = ','.join([shared.JS.make_coercion('p%d' % i, sig_args[i]) for i in range(num_args)])
maybe_return = '' if return_type == 'v' else 'return'
final_return = maybe_return + ' ' + shared.JS.make_coercion('ftCall_' + sig + '(' + coerced_params + ')', unfloat(return_type)) + ';'
if shared.Settings.EMULATED_FUNCTION_POINTERS == 1:
body = final_return
else:
sig_mask = str(table.count(','))
body = ('if (((ptr|0) >= (fb|0)) & ((ptr|0) < (fb + ' + sig_mask + ' | 0))) { ' + maybe_return + ' ' +
shared.JS.make_coercion(
'FUNCTION_TABLE_' + sig + '[(ptr-fb)&' + sig_mask + '](' +
mini_coerced_params + ')', return_type, ffi_arg=True
) + '; ' + ('return;' if return_type == 'v' else '') + ' }' + final_return)
mftCall_funcs.append(make_func('mftCall_' + sig, body, params, coercions) + '\n')
return mftCall_funcs
def get_function_pointer_error(sig, function_table_sigs):
if shared.Settings.ASSERTIONS == 0:
# Release build: do the most minimal sized abort possible
return "abort();"
else:
# ASSERTIONS-enabled build, identify the pointer and the failing signature.
return "abortFnPtrError(x, '" + sig + "');"
def signature_sort_key(sig):
def closure(other):
ret = 0
minlen = min(len(other), len(sig))
maxlen = min(len(other), len(sig))
if other.startswith(sig) or sig.startswith(other):
ret -= 1000 # prioritize prefixes, could be dropped params
ret -= 133 * difflib.SequenceMatcher(a=other, b=sig).ratio() # prioritize on diff similarity
ret += 15 * abs(len(other) - len(sig)) / float(maxlen) # deprioritize the bigger the length difference is
for i in range(minlen):
if other[i] == sig[i]:
ret -= 5 / float(maxlen) # prioritize on identically-placed params
ret += 20 * len(other) # deprioritize on length
return ret
return closure
def asm_backend_uses(metadata, symbol):
# If doing dynamic linking, we should generate full set of runtime primitives, since we cannot know up front ahead
# of time what the dynamically linked in modules will need. Also with SAFE_HEAP and Emterpretify, generate full set of views.
if shared.Settings.MAIN_MODULE or shared.Settings.SIDE_MODULE or shared.Settings.SAFE_HEAP or shared.Settings.EMTERPRETIFY:
return True
# Allow querying asm_backend_uses(metadata, 'Math.') to find if any of the Math objects are used
if symbol.endswith('.'):
return any(e.startswith(symbol) for e in metadata['externUses'])
else:
# Querying a single symbol
return symbol in metadata['externUses']
def create_asm_global_funcs(bg_funcs, metadata):
maths = ['Math.' + func for func in ['floor', 'abs', 'sqrt', 'pow', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'atan2', 'exp', 'log', 'ceil', 'imul', 'min', 'max', 'clz32']]
if provide_fround():
maths += ['Math.fround']
asm_global_funcs = ''
for math in maths:
if asm_backend_uses(metadata, math):
asm_global_funcs += ' var ' + math.replace('.', '_') + '=global' + access_quote(math) + ';\n'
asm_global_funcs += ''.join([' var ' + unminified + '=env' + access_quote(math_fix(minified)) + ';\n' for (minified, unminified) in bg_funcs])
asm_global_funcs += global_simd_funcs(access_quote, metadata)
if shared.Settings.USE_PTHREADS:
asm_global_funcs += ''.join([' var Atomics_' + ty + '=global' + access_quote('Atomics') + access_quote(ty) + ';\n' for ty in ['load', 'store', 'exchange', 'compareExchange', 'add', 'sub', 'and', 'or', 'xor']])
return asm_global_funcs
def create_asm_global_vars(bg_vars):
asm_global_vars = ''.join([' var ' + unminified + '=env' + access_quote(minified) + '|0;\n' for (minified, unminified) in bg_vars])
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
# wasm side modules internally define their stack, these are set at module startup time
asm_global_vars += '\n var STACKTOP = 0, STACK_MAX = 0;\n'
return asm_global_vars
def global_simd_funcs(access_quote, metadata):
# Always import SIMD when building with -s SIMD=1, since in that mode memcpy is SIMD optimized.
if not (metadata['simd'] or shared.Settings.SIMD):
return ''
def string_contains_any(s, str_list):
return any(sub in s for sub in str_list)
nonexisting_simd_symbols = ['Int8x16_fromInt8x16', 'Uint8x16_fromUint8x16', 'Int16x8_fromInt16x8', 'Uint16x8_fromUint16x8', 'Int32x4_fromInt32x4', 'Uint32x4_fromUint32x4', 'Float32x4_fromFloat32x4', 'Float64x2_fromFloat64x2']
nonexisting_simd_symbols += ['Int32x4_addSaturate', 'Int32x4_subSaturate', 'Uint32x4_addSaturate', 'Uint32x4_subSaturate']
nonexisting_simd_symbols += [(x + '_' + y) for x in ['Int8x16', 'Uint8x16', 'Int16x8', 'Uint16x8', 'Float64x2'] for y in ['load2', 'store2']]
nonexisting_simd_symbols += [(x + '_' + y) for x in ['Int8x16', 'Uint8x16', 'Int16x8', 'Uint16x8'] for y in ['load1', 'store1']]
simd = make_simd_types(metadata)
simd_func_text = ''
simd_func_text += ''.join([' var SIMD_' + ty + '=global' + access_quote('SIMD') + access_quote(ty) + ';\n' for ty in simd['types']])
def generate_symbols(types, funcs):
symbols = [' var SIMD_' + ty + '_' + g + '=SIMD_' + ty + access_quote(g) + ';\n' for ty in types for g in funcs]
symbols = [x for x in symbols if not string_contains_any(x, nonexisting_simd_symbols)]
return ''.join(symbols)
simd_func_text += generate_symbols(simd['int_types'], simd['int_funcs'])
simd_func_text += generate_symbols(simd['float_types'], simd['float_funcs'])
simd_func_text += generate_symbols(simd['bool_types'], simd['bool_funcs'])
# SIMD conversions (not bitcasts) between same lane sizes:
def add_simd_cast(dst, src):
return ' var SIMD_' + dst + '_from' + src + '=SIMD_' + dst + '.from' + src + ';\n'
def add_simd_casts(t1, t2):
return add_simd_cast(t1, t2) + add_simd_cast(t2, t1)
# Bug: Skip importing conversions for int<->uint for now, they don't validate
# (but it will be an issue if using SIMD.js intrinsics from vector.h to
# explicitly call these)
# if metadata['simdInt8x16'] and metadata['simdUint8x16']:
# simd_func_text += add_simd_casts('Int8x16', 'Uint8x16')
# if metadata['simdInt16x8'] and metadata['simdUint16x8']:
# simd_func_text += add_simd_casts('Int16x8', 'Uint16x8')
# if metadata['simdInt32x4'] and metadata['simdUint32x4']:
# simd_func_text += add_simd_casts('Int32x4', 'Uint32x4')
if metadata['simdInt32x4'] and metadata['simdFloat32x4']:
simd_func_text += add_simd_casts('Int32x4', 'Float32x4')
if metadata['simdUint32x4'] and metadata['simdFloat32x4']:
simd_func_text += add_simd_casts('Uint32x4', 'Float32x4')
if metadata['simdInt32x4'] and metadata['simdFloat64x2']:
simd_func_text += add_simd_cast('Int32x4', 'Float64x2') # Unofficial, needed for emscripten_int32x4_fromFloat64x2
if metadata['simdUint32x4'] and metadata['simdFloat64x2']:
simd_func_text += add_simd_cast('Uint32x4', 'Float64x2') # Unofficial, needed for emscripten_uint32x4_fromFloat64x2
# Unofficial, Bool64x2 does not yet exist, but needed for Float64x2 comparisons.
if metadata['simdFloat64x2']:
simd_func_text += ' var SIMD_Int32x4_fromBool64x2Bits = global.SIMD.Int32x4.fromBool64x2Bits;\n'
return simd_func_text
def make_simd_types(metadata):
simd_float_types = []
simd_int_types = []
simd_bool_types = []
simd_funcs = ['splat', 'check', 'extractLane', 'replaceLane']
simd_intfloat_funcs = ['add', 'sub', 'neg', 'mul',
'equal', 'lessThan', 'greaterThan',
'notEqual', 'lessThanOrEqual', 'greaterThanOrEqual',
'select', 'swizzle', 'shuffle',
'load', 'store', 'load1', 'store1', 'load2', 'store2']
simd_intbool_funcs = ['and', 'xor', 'or', 'not']
if metadata['simdUint8x16']:
simd_int_types += ['Uint8x16']
simd_intfloat_funcs += ['fromUint8x16Bits']
if metadata['simdInt8x16']:
simd_int_types += ['Int8x16']
simd_intfloat_funcs += ['fromInt8x16Bits']
if metadata['simdUint16x8']:
simd_int_types += ['Uint16x8']
simd_intfloat_funcs += ['fromUint16x8Bits']
if metadata['simdInt16x8']:
simd_int_types += ['Int16x8']
simd_intfloat_funcs += ['fromInt16x8Bits']
if metadata['simdUint32x4']:
simd_int_types += ['Uint32x4']
simd_intfloat_funcs += ['fromUint32x4Bits']
if metadata['simdInt32x4'] or shared.Settings.SIMD:
# Always import Int32x4 when building with -s SIMD=1, since memcpy is SIMD optimized.
simd_int_types += ['Int32x4']
simd_intfloat_funcs += ['fromInt32x4Bits']
if metadata['simdFloat32x4']:
simd_float_types += ['Float32x4']
simd_intfloat_funcs += ['fromFloat32x4Bits']
if metadata['simdFloat64x2']:
simd_float_types += ['Float64x2']
simd_intfloat_funcs += ['fromFloat64x2Bits']
if metadata['simdBool8x16']:
simd_bool_types += ['Bool8x16']
if metadata['simdBool16x8']:
simd_bool_types += ['Bool16x8']
if metadata['simdBool32x4']:
simd_bool_types += ['Bool32x4']
if metadata['simdBool64x2']:
simd_bool_types += ['Bool64x2']
simd_float_funcs = simd_funcs + simd_intfloat_funcs + ['div', 'min', 'max', 'minNum', 'maxNum', 'sqrt',
'abs', 'reciprocalApproximation', 'reciprocalSqrtApproximation']
simd_int_funcs = simd_funcs + simd_intfloat_funcs + simd_intbool_funcs + ['shiftLeftByScalar', 'shiftRightByScalar', 'addSaturate', 'subSaturate']
simd_bool_funcs = simd_funcs + simd_intbool_funcs + ['anyTrue', 'allTrue']
simd_types = simd_float_types + simd_int_types + simd_bool_types
return {
'types': simd_types,
'float_types': simd_float_types,
'int_types': simd_int_types,
'bool_types': simd_bool_types,
'funcs': simd_funcs,
'float_funcs': simd_float_funcs,
'int_funcs': simd_int_funcs,
'bool_funcs': simd_bool_funcs,
'intfloat_funcs': simd_intfloat_funcs,
'intbool_funcs': simd_intbool_funcs,
}
def asm_safe_heap():
return shared.Settings.SAFE_HEAP and not shared.Settings.SAFE_HEAP_LOG and not shared.Settings.RELOCATABLE
def provide_fround():
return shared.Settings.PRECISE_F32 or shared.Settings.SIMD
def create_asm_setup(debug_tables, function_table_data, invoke_function_names, metadata):
function_table_sigs = function_table_data.keys()
asm_setup = ''
if shared.Settings.ASSERTIONS >= 2:
debug_tables_map = 'var debug_tables = {\n'
for sig in function_table_data:
# if the table is empty, debug_tables will not contain it
body = debug_tables.get(sig, [])
asm_setup += 'var debug_table_' + sig + ' = [' + ','.join(['0' if x == '0' else "'" + x.replace("'", '"') + "'" for x in body]) + '];\n'
debug_tables_map += " '" + sig + "': debug_table_" + sig + ',\n'
asm_setup += debug_tables_map + '};\n'
if shared.Settings.ASSERTIONS:
for sig in function_table_sigs:
asm_setup += 'function nullFunc_' + sig + '(x) { ' + get_function_pointer_error(sig, function_table_sigs) + ' }\n'
if shared.Settings.RELOCATABLE:
if not shared.Settings.SIDE_MODULE:
asm_setup += 'var gb = GLOBAL_BASE, fb = 0;\n'
side = 'parent' if shared.Settings.SIDE_MODULE else ''
def check(extern):
if shared.Settings.ASSERTIONS:
return ('\n assert(%sModule["%s"] || %s, "external symbol `%s` is missing.' % (side, extern, extern, extern) +
'perhaps a side module was not linked in? if this symbol was expected to arrive '
'from a system library, try to build the MAIN_MODULE with '
'EMCC_FORCE_STDLIBS=1 in the environment");')
return ''
for extern in metadata['externs']:
asm_setup += 'var g$' + extern + ' = function() {' + check(extern) + '\n return ' + side + 'Module["' + extern + '"];\n}\n'
for extern in metadata['externFunctions']:
barename, sig = extern.split('$')
fullname = "fp$" + extern
key = '%sModule["%s"]' % (side, fullname)
asm_setup += '''\
var %s = function() {
if (!%s) { %s
var fid = addFunction(%sModule["%s"] || %s, "%s");
%s = fid;
}
return %s;
}
''' % (fullname, key, check(barename), side, barename, barename, sig, key, key)
asm_setup += create_invoke_wrappers(invoke_function_names)
asm_setup += setup_function_pointers(function_table_sigs)
if shared.Settings.EMULATED_FUNCTION_POINTERS:
function_tables_impls = make_function_tables_impls(function_table_data)
asm_setup += '\n' + '\n'.join(function_tables_impls) + '\n'
return asm_setup
def setup_function_pointers(function_table_sigs):
asm_setup = ''
for sig in function_table_sigs:
if shared.Settings.RESERVED_FUNCTION_POINTERS:
asm_setup += '\n' + shared.JS.make_jscall(sig) + '\n'
# nothing special to do here for wasm, we just use dynCalls
if not shared.Settings.WASM:
if shared.Settings.EMULATED_FUNCTION_POINTERS:
args = ['a%d' % i for i in range(len(sig) - 1)]
full_args = ['x'] + args
table_access = 'FUNCTION_TABLE_' + sig
if shared.Settings.SIDE_MODULE:
table_access = 'parentModule["' + table_access + '"]' # side module tables were merged into the parent, we need to access the global one
table_read = table_access + '[x]'
prelude = ''
if shared.Settings.ASSERTIONS:
prelude = '''
if (x < 0 || x >= %s.length) { err("Function table mask error (out of range)"); %s ; abort(x) }''' % (table_access, get_function_pointer_error(sig, function_table_sigs))
asm_setup += '''
function ftCall_%s(%s) {%s
return %s(%s);
}
''' % (sig, ', '.join(full_args), prelude, table_read, ', '.join(args))
return asm_setup
def create_basic_funcs(function_table_sigs, invoke_function_names):
basic_funcs = shared.Settings.RUNTIME_FUNCS_TO_IMPORT
if shared.Settings.STACK_OVERFLOW_CHECK and not shared.Settings.MINIMAL_RUNTIME:
basic_funcs += ['abortStackOverflow']
if shared.Settings.EMTERPRETIFY:
basic_funcs += ['abortStackOverflowEmterpreter']
if shared.Settings.SAFE_HEAP:
if asm_safe_heap():
basic_funcs += ['segfault', 'alignfault', 'ftfault']
else:
# Binaryen generates calls to these two so they are always needed with wasm
if shared.Settings.WASM:
basic_funcs += ['segfault', 'alignfault']
basic_funcs += ['SAFE_HEAP_LOAD', 'SAFE_HEAP_LOAD_D', 'SAFE_HEAP_STORE', 'SAFE_HEAP_STORE_D', 'SAFE_FT_MASK']
if shared.Settings.ASSERTIONS:
for sig in function_table_sigs:
basic_funcs += ['nullFunc_' + sig]
basic_funcs += invoke_function_names
for sig in function_table_sigs:
if shared.Settings.RESERVED_FUNCTION_POINTERS:
basic_funcs.append('jsCall_%s' % sig)
if asm_js_emulated_function_pointers():
basic_funcs.append('ftCall_%s' % sig)
return basic_funcs
def create_basic_vars(exported_implemented_functions, forwarded_json, metadata):
basic_vars = []
if 'tempDoublePtr' in shared.Settings.ASM_PRIMITIVE_VARS:
basic_vars += ['tempDoublePtr']
if shared.Settings.RELOCATABLE:
if not (shared.Settings.WASM and shared.Settings.SIDE_MODULE):
basic_vars += ['gb', 'fb', 'STACKTOP', 'STACK_MAX']
else:
# wasm side modules have a specific convention for these
basic_vars += ['__memory_base', '__table_base']
if shared.Settings.EMTERPRETIFY:
basic_vars += ['EMTSTACKTOP', 'EMT_STACK_MAX', 'eb']
return basic_vars
def create_exports(exported_implemented_functions, in_table, function_table_data, metadata):
asm_runtime_funcs = create_asm_runtime_funcs()
all_exported = exported_implemented_functions + asm_runtime_funcs + function_tables(function_table_data)
# In asm.js + emulated function pointers, export all the table because we use
# JS to add the asm.js module's functions to the table (which is external
# in this mode). In wasm, we don't need that since wasm modules can
# directly add functions to the imported Table.
if not shared.Settings.WASM and shared.Settings.EMULATED_FUNCTION_POINTERS:
all_exported += in_table
exports = []
for export in sorted(set(all_exported)):
exports.append(quote(export) + ": " + export)
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
# named globals in side wasm modules are exported globals from asm/wasm
for k, v in metadata['namedGlobals'].items():
exports.append(quote('_' + str(k)) + ': ' + str(v))
# aliases become additional exports
for k, v in metadata['aliases'].items():
exports.append(quote(str(k)) + ': ' + str(v))
# shared wasm emulated function pointer mode requires us to know the function pointer for
# each function. export fp$func => function pointer for func
if shared.Settings.WASM and shared.Settings.RELOCATABLE and shared.Settings.EMULATE_FUNCTION_POINTER_CASTS:
for k, v in metadata['functionPointers'].items():
exports.append(quote('fp$' + str(k)) + ': ' + str(v))
return '{ ' + ', '.join(exports) + ' }'
def create_asm_runtime_funcs():
funcs = []
if not (shared.Settings.WASM and shared.Settings.SIDE_MODULE) and not shared.Settings.MINIMAL_RUNTIME:
funcs += ['stackAlloc', 'stackSave', 'stackRestore']
if shared.Settings.USE_PTHREADS:
funcs += ['establishStackSpace']
return funcs
def function_tables(function_table_data):
if not asm_js_emulated_function_pointers():
return ['dynCall_' + table for table in function_table_data]
else:
return []
def create_the_global(metadata):
# the global is only needed for asm.js
if shared.Settings.WASM:
return '{}'
fundamentals = []
if asm_backend_uses(metadata, 'Math.'):
fundamentals += ['Math']
for f in ['Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array', 'NaN', 'Infinity']:
if asm_backend_uses(metadata, f):
fundamentals += [f]
if metadata['simd'] or shared.Settings.SIMD:
# Always import SIMD when building with -s SIMD=1, since in that mode memcpy is SIMD optimized.
fundamentals += ['SIMD']
return '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in fundamentals]) + ' }'
RUNTIME_ASSERTIONS = '''
assert(runtimeInitialized, 'you need to wait for the runtime to be ready (e.g. wait for main() to be called)');
assert(!runtimeExited, 'the runtime was exited (use NO_EXIT_RUNTIME to keep it alive after main() exits)');'''
def create_receiving(function_table_data, function_tables_defs, exported_implemented_functions, initializers):
receiving = ''
if not shared.Settings.ASSERTIONS or shared.Settings.MINIMAL_RUNTIME:
runtime_assertions = ''
else:
runtime_assertions = RUNTIME_ASSERTIONS
# assert on the runtime being in a valid state when calling into compiled code. The only exceptions are some support code.
# WASM=1 already inserts runtime assertions, so no need to do it again here (see create_receiving_wasm)
if not shared.Settings.WASM:
receiving_functions = [f for f in exported_implemented_functions if f not in ('_memcpy', '_memset', '_emscripten_replace_memory', '__start_module')]
wrappers = []
for name in receiving_functions:
wrappers.append('''\
var real_%(name)s = asm["%(name)s"];
asm["%(name)s"] = function() {%(runtime_assertions)s
return real_%(name)s.apply(null, arguments);
};
''' % {'name': name, 'runtime_assertions': runtime_assertions})
receiving = '\n'.join(wrappers)
module_exports = exported_implemented_functions + function_tables(function_table_data)
shared.Settings.MODULE_EXPORTS = [(f, f) for f in module_exports]
if not shared.Settings.SWAPPABLE_ASM_MODULE:
if shared.Settings.DECLARE_ASM_MODULE_EXPORTS:
imported_exports = [s for s in module_exports if s not in initializers]
if shared.Settings.WASM and shared.Settings.MINIMAL_RUNTIME:
# In Wasm exports are assigned inside a function to variables existing in top level JS scope, i.e.
# var _main;
# WebAssembly.instantiate(Module["wasm"], imports).then((function(output) {
# var asm = output.instance.exports;
# _main = asm["_main"];
receiving += '\n'.join([s + ' = asm["' + s + '"];' for s in imported_exports]) + '\n'
else:
if shared.Settings.MINIMAL_RUNTIME:
# In asm.js exports can be directly processed at top level, i.e.
# var asm = Module["asm"](asmGlobalArg, asmLibraryArg, buffer);
# var _main = asm["_main"];
receiving += '\n'.join(['var ' + s + ' = asm["' + s + '"];' for s in imported_exports]) + '\n'
else:
receiving += '\n'.join(['var ' + s + ' = Module["' + s + '"] = asm["' + s + '"];' for s in module_exports]) + '\n'
else:
if shared.Settings.target_environment_may_be('node') and shared.Settings.target_environment_may_be('web'):
global_object = '(typeof process !== "undefined" ? global : this)'
elif shared.Settings.target_environment_may_be('node'):
global_object = 'global'
else:
global_object = 'this'
if shared.Settings.MINIMAL_RUNTIME:
module_assign = ''
else:
module_assign = 'Module[__exportedFunc] = '
receiving += 'for(var __exportedFunc in asm) ' + global_object + '[__exportedFunc] = ' + module_assign + 'asm[__exportedFunc];\n'
else:
receiving += 'Module["asm"] = asm;\n'
wrappers = []
for name in module_exports:
wrappers.append('''\
var %(name)s = Module["%(name)s"] = function() {%(runtime_assertions)s
return Module["asm"]["%(name)s"].apply(null, arguments)
};
''' % {'name': name, 'runtime_assertions': runtime_assertions})
receiving += '\n'.join(wrappers)
if shared.Settings.EXPORT_FUNCTION_TABLES and not shared.Settings.WASM:
for table in function_table_data.values():
tableName = table.split()[1]
table = table.replace('var ' + tableName, 'var ' + tableName + ' = Module["' + tableName + '"]')
receiving += table + '\n'
if shared.Settings.EMULATED_FUNCTION_POINTERS:
# in asm.js emulated function tables, emit the table on the outside, where
# JS can manage it (for wasm, a native wasm Table is used directly, and we
# don't need this)
if not shared.Settings.WASM:
receiving += '\n' + function_tables_defs.replace('// EMSCRIPTEN_END_FUNCS\n', '')
# wasm still needs definitions for dyncalls on the outside, for JS
receiving += '\n' + ''.join(['Module["dynCall_%s"] = dynCall_%s\n' % (sig, sig) for sig in function_table_data])
if not shared.Settings.WASM:
for sig in function_table_data.keys():
name = 'FUNCTION_TABLE_' + sig
fullname = name if not shared.Settings.SIDE_MODULE else ('SIDE_' + name)
receiving += 'Module["' + name + '"] = ' + fullname + ';\n'
return receiving
def create_fp_accessors(metadata):
if not shared.Settings.RELOCATABLE:
return ''
# Create `fp$XXX` handlers for determining function pionters (table addresses)
# at runtime.
# For SIDE_MODULEs these are generated by the proxyHandler at runtime.
accessors = []
for fullname in metadata['declares']:
if not fullname.startswith('fp$'):
continue
_, name, sig = fullname.split('$')
mangled = asmjs_mangle(name)
side = 'parent' if shared.Settings.SIDE_MODULE else ''
assertion = ('\n assert(%sModule["%s"] || typeof %s !== "undefined", "external function `%s` is missing.' % (side, mangled, mangled, name) +
'perhaps a side module was not linked in? if this symbol was expected to arrive '
'from a system library, try to build the MAIN_MODULE with '
'EMCC_FORCE_STDLIBS=XX in the environment");')
# the name of the original function is generally the normal function
# name, unless it is legalized, in which case the export is the legalized
# version, and the original provided by orig$X
if shared.Settings.LEGALIZE_JS_FFI and not shared.JS.is_legal_sig(sig):
name = 'orig$' + name
accessors.append('''
Module['%(full)s'] = function() {
%(assert)s
// Use the original wasm function itself, for the table, from the main module.
var func = Module['asm']['%(original)s'];
// Try an original version from a side module.
if (!func) func = Module['_%(original)s'];
// Otherwise, look for a regular function or JS library function.
if (!func) func = Module['%(mangled)s'];
if (!func) func = %(mangled)s;
var fp = addFunction(func, '%(sig)s');
Module['%(full)s'] = function() { return fp };
return fp;
}
''' % {'full': asmjs_mangle(fullname), 'mangled': mangled, 'original': name, 'assert': assertion, 'sig': sig})
return '\n'.join(accessors)
def create_named_globals(metadata):
if not shared.Settings.RELOCATABLE:
return ''
named_globals = '''
var NAMED_GLOBALS = {
%s
};
for (var named in NAMED_GLOBALS) {
Module['_' + named] = gb + NAMED_GLOBALS[named];
}
Module['NAMED_GLOBALS'] = NAMED_GLOBALS;
''' % ',\n '.join('"' + k + '": ' + str(v) for k, v in metadata['namedGlobals'].items())
if shared.Settings.WASM:
# wasm side modules are pure wasm, and cannot create their g$..() methods, so we help them out
# TODO: this works if we are the main module, but if the supplying module is later, it won't, so
# we'll need another solution for that. one option is to scan the module imports, if/when
# wasm supports that, then the loader can do this.
named_globals += '''
for (var named in NAMED_GLOBALS) {
(function(named) {
var addr = Module['_' + named];
Module['g$_' + named] = function() { return addr };
})(named);
}
'''
named_globals += ''.join(["Module['%s'] = Module['%s']\n" % (k, v) for k, v in metadata['aliases'].items()])
return named_globals
def create_runtime_funcs_asmjs(exports, metadata):
if shared.Settings.ASSERTIONS or shared.Settings.STACK_OVERFLOW_CHECK >= 2:
stack_check = ' if ((STACKTOP|0) >= (STACK_MAX|0)) abortStackOverflow(size|0);\n'
else:
stack_check = ''
funcs = ['''
function stackAlloc(size) {
size = size|0;
var ret = 0;
ret = STACKTOP;
STACKTOP = (STACKTOP + size)|0;
STACKTOP = (STACKTOP + 15)&-16;
%s
return ret|0;
}
function stackSave() {
return STACKTOP|0;
}
function stackRestore(top) {
top = top|0;
STACKTOP = top;
}
''' % stack_check]
if shared.Settings.USE_PTHREADS:
funcs.append('''
function establishStackSpace(stackBase, stackMax) {
stackBase = stackBase|0;
stackMax = stackMax|0;
STACKTOP = stackBase;
STACK_MAX = stackMax;
tempDoublePtr = STACKTOP;
STACKTOP = (STACKTOP + 8)|0;
}
''')
if shared.Settings.MINIMAL_RUNTIME:
# MINIMAL_RUNTIME moves stack functions to library.
funcs = []
if shared.Settings.EMTERPRETIFY:
funcs.append('''
function emterpret(pc) { // this will be replaced when the emterpreter code is generated; adding it here allows validation until then
pc = pc | 0;
assert(0);
}''')
if shared.Settings.EMTERPRETIFY_ASYNC:
funcs.append('''
function setAsyncState(x) {
x = x | 0;
asyncState = x;
}
function emtStackSave() {
return EMTSTACKTOP|0;
}
function emtStackRestore(x) {
x = x | 0;
EMTSTACKTOP = x;
}
function getEmtStackMax() {
return EMT_STACK_MAX | 0;
}
function setEmtStackMax(x) {
x = x | 0;
EMT_STACK_MAX = x;
}
''')
if asm_safe_heap():
if '_sbrk' in metadata['implementedFunctions']:
brk_check = 'if ((dest + bytes|0) > (HEAP32[(_emscripten_get_sbrk_ptr()|0)>>2]|0)) segfault();'
else:
# sbrk and malloc were not linked in, but SAFE_HEAP is used - so safe heap
# can ignore the sbrk location.
brk_check = ''
funcs.append('''
function SAFE_HEAP_STORE(dest, value, bytes) {
dest = dest | 0;
value = value | 0;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 4) {
if ((dest&3)) alignfault();
HEAP32[dest>>2] = value;
} else if ((bytes|0) == 1) {
HEAP8[dest>>0] = value;
} else {
if ((dest&1)) alignfault();
HEAP16[dest>>1] = value;
}
}
function SAFE_HEAP_STORE_D(dest, value, bytes) {
dest = dest | 0;
value = +value;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 8) {
if ((dest&7)) alignfault();
HEAPF64[dest>>3] = value;
} else {
if ((dest&3)) alignfault();
HEAPF32[dest>>2] = value;
}
}
function SAFE_HEAP_LOAD(dest, bytes, unsigned) {
dest = dest | 0;
bytes = bytes | 0;
unsigned = unsigned | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 4) {
if ((dest&3)) alignfault();
return HEAP32[dest>>2] | 0;
} else if ((bytes|0) == 1) {
if (unsigned) {
return HEAPU8[dest>>0] | 0;
} else {
return HEAP8[dest>>0] | 0;
}
}
if ((dest&1)) alignfault();
if (unsigned) return HEAPU16[dest>>1] | 0;
return HEAP16[dest>>1] | 0;
}
function SAFE_HEAP_LOAD_D(dest, bytes) {
dest = dest | 0;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 8) {
if ((dest&7)) alignfault();
return +HEAPF64[dest>>3];
}
if ((dest&3)) alignfault();
return +HEAPF32[dest>>2];
}
function SAFE_FT_MASK(value, mask) {
value = value | 0;
mask = mask | 0;
var ret = 0;
ret = value & mask;
if ((ret|0) != (value|0)) ftfault();
return ret | 0;
}
''' % {'brk_check': brk_check})
return funcs
def create_asm_start_pre(asm_setup, the_global, sending, metadata):
shared_array_buffer = ''
if shared.Settings.USE_PTHREADS and not shared.Settings.WASM:
shared_array_buffer = "asmGlobalArg['Atomics'] = Atomics;"
module_global = 'var asmGlobalArg = ' + the_global + ';'
module_library = 'var asmLibraryArg = ' + sending + ';'
asm_function_top = ('// EMSCRIPTEN_START_ASM\n'
'var asm = (/** @suppress {uselessCode} */ function(global, env, buffer) {')
use_asm = "'almost asm';"
if shared.Settings.ASM_JS == 1:
use_asm = "'use asm';"
lines = [
asm_setup,
module_global,
shared_array_buffer,
module_library,
asm_function_top,
use_asm,
create_first_in_asm(),
]
return '\n'.join(lines)
def create_asm_temp_vars(metadata):
temp_ints = ['__THREW__', 'threwValue', 'setjmpId', 'tempInt', 'tempBigInt', 'tempBigIntS', 'tempValue']
temp_doubles = ['tempDouble']
rtn = ''
for i in temp_ints:
if i in shared.Settings.ASM_PRIMITIVE_VARS:
rtn += 'var ' + i + ' = 0;\n'
for i in temp_doubles:
if i in shared.Settings.ASM_PRIMITIVE_VARS:
rtn += 'var ' + i + ' = 0.0;\n'
if asm_backend_uses(metadata, 'NaN'):
rtn += 'var nan = global%s;\n' % (access_quote('NaN'))
if asm_backend_uses(metadata, 'Infinity'):
rtn += 'var inf = global%s;\n' % (access_quote('Infinity'))
return rtn
def create_asm_runtime_thread_local_vars():
if not shared.Settings.USE_PTHREADS:
return ''
return '''
var __pthread_ptr = 0;
var __pthread_is_main_runtime_thread = 0;
var __pthread_is_main_browser_thread = 0;
'''
def create_replace_memory(metadata):
if not shared.Settings.ALLOW_MEMORY_GROWTH:
return ''
emscripten_replace_memory = '''
function _emscripten_replace_memory(newBuffer) {
'''
for heap, view in [
('HEAP8', 'Int8Array'),
('HEAPU8', 'Uint8Array'),
('HEAP16', 'Int16Array'),
('HEAPU16', 'Uint16Array'),
('HEAP32', 'Int32Array'),
('HEAPU32', 'Uint32Array'),
('HEAPF32', 'Float32Array'),
('HEAPF64', 'Float64Array')]:
if asm_backend_uses(metadata, view):
emscripten_replace_memory += ' %s = new %s(newBuffer);\n' % (heap, view)
emscripten_replace_memory += '''
buffer = newBuffer;
return true;
}
'''
return emscripten_replace_memory
def create_asm_end(exports):
if shared.Settings.MINIMAL_RUNTIME and shared.Settings.WASM:
return '''
return %s;
})
// EMSCRIPTEN_END_ASM
''' % (exports)
return '''
return %s;
})
// EMSCRIPTEN_END_ASM
(asmGlobalArg, asmLibraryArg, buffer);
''' % (exports)
def create_first_in_asm():
return ''
def create_memory_views(metadata):
ret = '\n'
for info in HEAP_TYPE_INFOS:
heap_name = '{}Array'.format(info.long_name)
access = access_quote(heap_name)
if asm_backend_uses(metadata, heap_name):
format_args = {
'heap': info.heap_name,
'long': info.long_name,
'access': access,
}
ret += ' var {heap} = new global{access}(buffer);\n'.format(**format_args)
return ret
class HeapTypeInfo(object):
def __init__(self, heap_name, long_name, shift_amount):
assert heap_name.startswith('HEAP')
self.heap_name = heap_name
self.long_name = long_name
self.shift_amount = shift_amount
def short_name(self):
return self.heap_name[len('HEAP'):]
def is_int(self):
return self.short_name()[0] != 'F'
def coerce(self, expression):
if self.is_int():
return expression + '| 0'
else:
return '+' + expression
HEAP_TYPE_INFOS = [
HeapTypeInfo(heap_name='HEAP8', long_name='Int8', shift_amount=0),
HeapTypeInfo(heap_name='HEAP16', long_name='Int16', shift_amount=1),
HeapTypeInfo(heap_name='HEAP32', long_name='Int32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPU8', long_name='Uint8', shift_amount=0),
HeapTypeInfo(heap_name='HEAPU16', long_name='Uint16', shift_amount=1),
HeapTypeInfo(heap_name='HEAPU32', long_name='Uint32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPF32', long_name='Float32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPF64', long_name='Float64', shift_amount=3),
]
def emscript_wasm_backend(infile, outfile, memfile, compiler_engine,
temp_files, DEBUG):
# Overview:
# * Run wasm-emscripten-finalize to extract metadata and modify the binary
# to use emscripten's wasm<->JS ABI
# * Use the metadata to generate the JS glue that goes with the wasm
metadata = finalize_wasm(temp_files, infile, outfile, memfile, DEBUG)
update_settings_glue(metadata, DEBUG)
if shared.Settings.SIDE_MODULE:
return
if DEBUG:
logger.debug('emscript: js compiler glue')
if DEBUG:
t = time.time()
glue, forwarded_data = compile_settings(compiler_engine, temp_files)
if DEBUG:
logger.debug(' emscript: glue took %s seconds' % (time.time() - t))
t = time.time()
forwarded_json = json.loads(forwarded_data)
# For the wasm backend the implementedFunctions from compiler.js should
# alwasys be empty. This only gets populated for __asm function when using
# the JS backend.
assert not forwarded_json['Functions']['implementedFunctions']
pre, post = glue.split('// EMSCRIPTEN_END_FUNCS')
# memory and global initializers
global_initializers = ', '.join('{ func: function() { %s() } }' % i for i in metadata['initializers'])
staticbump = shared.Settings.STATIC_BUMP
if shared.Settings.MINIMAL_RUNTIME:
# In minimal runtime, global initializers are run after the Wasm Module instantiation has finished.
global_initializers = ''
else:
# In regular runtime, global initializers are recorded in an __ATINIT__ array.
global_initializers = '''/* global initializers */ %s __ATINIT__.push(%s);
''' % ('if (!ENVIRONMENT_IS_PTHREAD)' if shared.Settings.USE_PTHREADS else '',
global_initializers)
pre = pre.replace('STATICTOP = STATIC_BASE + 0;', '''STATICTOP = STATIC_BASE + %d;
%s
''' % (staticbump, global_initializers))
pre = apply_memory(pre)
pre = apply_static_code_hooks(pre) # In regular runtime, atinits etc. exist in the preamble part
post = apply_static_code_hooks(post) # In MINIMAL_RUNTIME, atinit exists in the postamble part
if shared.Settings.RELOCATABLE and not shared.Settings.SIDE_MODULE:
pre += 'var gb = GLOBAL_BASE, fb = 0;\n'
# merge forwarded data
shared.Settings.EXPORTED_FUNCTIONS = forwarded_json['EXPORTED_FUNCTIONS']
exports = metadata['exports']
# Store exports for Closure compiler to be able to track these as globals in
# -s DECLARE_ASM_MODULE_EXPORTS=0 builds.
shared.Settings.MODULE_EXPORTS = [(asmjs_mangle(f), f) for f in exports]
if shared.Settings.ASYNCIFY:
exports += ['asyncify_start_unwind', 'asyncify_stop_unwind', 'asyncify_start_rewind', 'asyncify_stop_rewind']
report_missing_symbols(set([asmjs_mangle(f) for f in exports]), pre)
asm_consts, asm_const_funcs = create_asm_consts_wasm(forwarded_json, metadata)
em_js_funcs = create_em_js(forwarded_json, metadata)
asm_const_pairs = ['%s: %s' % (key, value) for key, value in asm_consts]
asm_const_map = 'var ASM_CONSTS = {\n ' + ', \n '.join(asm_const_pairs) + '\n};\n'
pre = pre.replace(
'// === Body ===',
('// === Body ===\n\n' + asm_const_map +
asstr('\n'.join(asm_const_funcs)) +
'\n'.join(em_js_funcs) + '\n'))
pre = apply_table(pre)
outfile.write(pre)
pre = None
invoke_funcs = metadata['invokeFuncs']
if shared.Settings.RELOCATABLE:
invoke_funcs.append('invoke_X')
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except KeyError:
pass
sending = create_sending_wasm(invoke_funcs, forwarded_json, metadata)
receiving = create_receiving_wasm(exports, metadata['initializers'])
if shared.Settings.MINIMAL_RUNTIME:
post, receiving = compute_minimal_runtime_initializer_and_exports(post, metadata['initializers'], exports, receiving)
module = create_module_wasm(sending, receiving, invoke_funcs, metadata)
write_output_file(outfile, post, module)
module = None
outfile.close()
def remove_trailing_zeros(memfile):
with open(memfile, 'rb') as f:
mem_data = f.read()
end = len(mem_data)
while end > 0 and (mem_data[end - 1] == b'\0' or mem_data[end - 1] == 0):
end -= 1
with open(memfile, 'wb') as f:
f.write(mem_data[:end])
def finalize_wasm(temp_files, infile, outfile, memfile, DEBUG):
basename = shared.unsuffixed(outfile.name)
wasm = basename + '.wasm'
base_wasm = infile
shared.Building.save_intermediate(infile, 'base.wasm')
args = ['--detect-features']
write_source_map = shared.Settings.DEBUG_LEVEL >= 4
if write_source_map:
shared.Building.emit_wasm_source_map(base_wasm, base_wasm + '.map')
shared.Building.save_intermediate(base_wasm + '.map', 'base_wasm.map')
args += ['--output-source-map-url=' + shared.Settings.SOURCE_MAP_BASE + os.path.basename(shared.Settings.WASM_BINARY_FILE) + '.map']
# tell binaryen to look at the features section, and if there isn't one, to use MVP
# (which matches what llvm+lld has given us)
if shared.Settings.DEBUG_LEVEL >= 2 or shared.Settings.PROFILING_FUNCS or shared.Settings.EMIT_SYMBOL_MAP or shared.Settings.ASYNCIFY_WHITELIST or shared.Settings.ASYNCIFY_BLACKLIST:
args.append('-g')
if shared.Settings.LEGALIZE_JS_FFI != 1:
args.append('--no-legalize-javascript-ffi')
if not shared.Settings.MEM_INIT_IN_WASM:
args.append('--separate-data-segments=' + memfile)
if shared.Settings.SIDE_MODULE:
args.append('--side-module')
else:
# --global-base is used by wasm-emscripten-finalize to calculate the size
# of the static data used. The argument we supply here needs to match the
# global based used by lld (see Building.link_lld). For relocatable this is
# zero for the global base although at runtime __memory_base is used.
# For non-relocatable output we used shared.Settings.GLOBAL_BASE.
# TODO(sbc): Can we remove this argument infer this from the segment
# initializer?
if shared.Settings.RELOCATABLE:
args.append('--global-base=0')
else:
args.append('--global-base=%s' % shared.Settings.GLOBAL_BASE)
if shared.Settings.WASM_BACKEND and shared.Settings.STACK_OVERFLOW_CHECK >= 2:
args.append('--check-stack-overflow')
if shared.Settings.STANDALONE_WASM:
args.append('--standalone-wasm')
# When we dynamically link our JS loader adds functions from wasm modules to
# the table. It must add the original versions of them, not legalized ones,
# so that indirect calls have the right type, so export those.
if shared.Settings.RELOCATABLE:
args.append('--pass-arg=legalize-js-interface-export-originals')
if shared.Settings.FULL_DWARF:
args.append('--dwarf')
stdout = shared.Building.run_binaryen_command('wasm-emscripten-finalize',
infile=base_wasm,
outfile=wasm,
args=args,
stdout=subprocess.PIPE)
if write_source_map:
shared.Building.save_intermediate(wasm + '.map', 'post_finalize.map')
shared.Building.save_intermediate(wasm, 'post_finalize.wasm')
if not shared.Settings.MEM_INIT_IN_WASM:
# we have a separate .mem file. binaryen did not strip any trailing zeros,
# because it's an ABI question as to whether it is valid to do so or not.
# we can do so here, since we make sure to zero out that memory (even in
# the dynamic linking case, our loader zeros it out)
remove_trailing_zeros(memfile)
return load_metadata_wasm(stdout, DEBUG)
def create_asm_consts_wasm(forwarded_json, metadata):
asm_consts = {}
all_sigs = []
for k, v in metadata['asmConsts'].items():
const, sigs, call_types = v
const = asstr(const)
const = trim_asm_const_body(const)
args = []
max_arity = 16
arity = 0
for i in range(max_arity):
if ('$' + str(i)) in const:
arity = i + 1
for i in range(arity):
args.append('$' + str(i))
const = 'function(' + ', '.join(args) + ') {' + const + '}'
asm_consts[int(k)] = const
for sig, call_type in zip(sigs, call_types):
all_sigs.append((sig, call_type))
asm_const_funcs = []
if all_sigs:
# emit the signature-reading helper function only if we have any EM_ASM
# functions in the module
check_int = ''
check = ''
if shared.Settings.ASSERTIONS:
check_int = "if (ch === 105 /*'i'*/)"
check = ' else abort("unexpected char in asm const signature " + ch);'
asm_const_funcs.append(r'''
// Avoid creating a new array
var _readAsmConstArgsArray = [];
function readAsmConstArgs(sigPtr, buf) {
var args = _readAsmConstArgsArray;
args.length = 0;
var ch;
while (ch = HEAPU8[sigPtr++]) {
if (ch === 100/*'d'*/ || ch === 102/*'f'*/) {
buf = (buf + 7) & ~7;
args.push(HEAPF64[(buf >> 3)]);
buf += 8;
} else %s {
buf = (buf + 3) & ~3;
args.push(HEAP32[(buf >> 2)]);
buf += 4;
}%s
}
return args;
}
''' % (check_int, check))
for sig, call_type in set(all_sigs):
const_name = '_emscripten_asm_const_' + call_type + sig
forwarded_json['Functions']['libraryFunctions'][const_name] = 1
preamble = ''
if shared.Settings.USE_PTHREADS:
sync_proxy = call_type == 'sync_on_main_thread_'
async_proxy = call_type == 'async_on_main_thread_'
proxied = sync_proxy or async_proxy
if proxied:
# In proxied function calls, positive integers 1, 2, 3, ... denote pointers
# to regular C compiled functions. Negative integers -1, -2, -3, ... denote
# indices to EM_ASM() blocks, so remap the EM_ASM() indices from 0, 1, 2,
# ... over to the negative integers starting at -1.
preamble += ('\n if (ENVIRONMENT_IS_PTHREAD) { ' +
proxy_debug_print(sync_proxy) +
'return _emscripten_proxy_to_main_thread_js(-1 - code, ' +
str(int(sync_proxy)) +
', code, sigPtr, argbuf); }')
if shared.Settings.RELOCATABLE:
preamble += '\n code -= %s;\n' % shared.Settings.GLOBAL_BASE
asm_const_funcs.append(r'''
function %s(code, sigPtr, argbuf) {%s
var args = readAsmConstArgs(sigPtr, argbuf);
return ASM_CONSTS[code].apply(null, args);
}''' % (const_name, preamble))
asm_consts = [(key, value) for key, value in asm_consts.items()]
asm_consts.sort()
return asm_consts, asm_const_funcs
def create_em_js(forwarded_json, metadata):
em_js_funcs = []
separator = '<::>'
for name, raw in metadata.get('emJsFuncs', {}).items():
assert separator in raw
args, body = raw.split(separator, 1)
args = args[1:-1]
if args == 'void':
args = []
else:
args = args.split(',')
arg_names = [arg.split()[-1].replace("*", "") for arg in args if arg]
func = 'function {}({}){}'.format(name, ','.join(arg_names), asstr(body))
em_js_funcs.append(func)
forwarded_json['Functions']['libraryFunctions'][name] = 1
return em_js_funcs
def add_standard_wasm_imports(send_items_map):
# Normally we import these into the wasm (so that JS could use them even
# before the wasm loads), while in standalone mode we do not depend
# on JS to create them, but create them in the wasm and export them.
if not shared.Settings.STANDALONE_WASM:
memory_import = 'wasmMemory'
if shared.Settings.MODULARIZE and shared.Settings.USE_PTHREADS:
# Pthreads assign wasmMemory in their worker startup. In MODULARIZE mode, they cannot assign inside the
# Module scope, so lookup via Module as well.
memory_import += " || Module['wasmMemory']"
send_items_map['memory'] = memory_import
send_items_map['table'] = 'wasmTable'
# With the wasm backend __memory_base and __table_base and only needed for
# relocatable output.
if shared.Settings.RELOCATABLE or not shared.Settings.WASM_BACKEND: # FIXME
send_items_map['__memory_base'] = str(shared.Settings.GLOBAL_BASE) # tell the memory segments where to place themselves
# the wasm backend reserves slot 0 for the NULL function pointer
table_base = '1' if shared.Settings.WASM_BACKEND else '0'
send_items_map['__table_base'] = table_base
if shared.Settings.RELOCATABLE and shared.Settings.WASM_BACKEND: # FIXME
send_items_map['__stack_pointer'] = 'STACK_BASE'
if shared.Settings.MAYBE_WASM2JS or shared.Settings.AUTODEBUG or shared.Settings.LINKABLE:
# legalization of i64 support code may require these in some modes
send_items_map['setTempRet0'] = 'setTempRet0'
send_items_map['getTempRet0'] = 'getTempRet0'
if shared.Settings.AUTODEBUG:
send_items_map['log_execution'] = '''function(loc) {
console.log('log_execution ' + loc);
}'''
send_items_map['get_i32'] = '''function(loc, index, value) {
console.log('get_i32 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_i64'] = '''function(loc, index, low, high) {
console.log('get_i64 ' + [loc, index, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['get_f32'] = '''function(loc, index, value) {
console.log('get_f32 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_f64'] = '''function(loc, index, value) {
console.log('get_f64 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_anyref'] = '''function(loc, index, value) {
console.log('get_anyref ' + [loc, index, value]);
return value;
}'''
send_items_map['get_exnref'] = '''function(loc, index, value) {
console.log('get_exnref ' + [loc, index, value]);
return value;
}'''
send_items_map['set_i32'] = '''function(loc, index, value) {
console.log('set_i32 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_i64'] = '''function(loc, index, low, high) {
console.log('set_i64 ' + [loc, index, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['set_f32'] = '''function(loc, index, value) {
console.log('set_f32 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_f64'] = '''function(loc, index, value) {
console.log('set_f64 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_anyref'] = '''function(loc, index, value) {
console.log('set_anyref ' + [loc, index, value]);
return value;
}'''
send_items_map['set_exnref'] = '''function(loc, index, value) {
console.log('set_exnref ' + [loc, index, value]);
return value;
}'''
send_items_map['load_ptr'] = '''function(loc, bytes, offset, ptr) {
console.log('load_ptr ' + [loc, bytes, offset, ptr]);
return ptr;
}'''
send_items_map['load_val_i32'] = '''function(loc, value) {
console.log('load_val_i32 ' + [loc, value]);
return value;
}'''
send_items_map['load_val_i64'] = '''function(loc, low, high) {
console.log('load_val_i64 ' + [loc, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['load_val_f32'] = '''function(loc, value) {
console.log('loaload_val_i32d_ptr ' + [loc, value]);
return value;
}'''
send_items_map['load_val_f64'] = '''function(loc, value) {
console.log('load_val_f64 ' + [loc, value]);
return value;
}'''
send_items_map['store_ptr'] = '''function(loc, bytes, offset, ptr) {
console.log('store_ptr ' + [loc, bytes, offset, ptr]);
return ptr;
}'''
send_items_map['store_val_i32'] = '''function(loc, value) {
console.log('store_val_i32 ' + [loc, value]);
return value;
}'''
send_items_map['store_val_i64'] = '''function(loc, low, high) {
console.log('store_val_i64 ' + [loc, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['store_val_f32'] = '''function(loc, value) {
console.log('loastore_val_i32d_ptr ' + [loc, value]);
return value;
}'''
send_items_map['store_val_f64'] = '''function(loc, value) {
console.log('store_val_f64 ' + [loc, value]);
return value;
}'''
def create_sending_wasm(invoke_funcs, forwarded_json, metadata):
basic_funcs = []
if shared.Settings.SAFE_HEAP:
basic_funcs += ['segfault', 'alignfault']
em_asm_sigs = [zip(sigs, call_types) for _, sigs, call_types in metadata['asmConsts'].values()]
# flatten em_asm_sigs
em_asm_sigs = [sig for sigs in em_asm_sigs for sig in sigs]
em_asm_funcs = ['_emscripten_asm_const_' + call_type + sig for sig, call_type in em_asm_sigs]
em_js_funcs = list(metadata['emJsFuncs'].keys())
declared_items = ['_' + item for item in metadata['declares']]
send_items = set(basic_funcs + invoke_funcs + em_asm_funcs + em_js_funcs + declared_items)
def fix_import_name(g):
if g.startswith('Math_'):
return g.split('_')[1]
# Unlike fastcomp the wasm backend doesn't use the '_' prefix for native
# symbols. Emscripten currently expects symbols to start with '_' so we
# artificially add them to the output of emscripten-wasm-finalize and them
# strip them again here.
# note that we don't do this for EM_JS functions (which, rarely, may have
# a '_' prefix)
if g.startswith('_') and g not in metadata['emJsFuncs']:
return g[1:]
return g
send_items_map = OrderedDict()
for name in send_items:
internal_name = fix_import_name(name)
if internal_name in send_items_map:
exit_with_error('duplicate symbol in exports to wasm: %s', name)
send_items_map[internal_name] = name
add_standard_wasm_imports(send_items_map)
sorted_keys = sorted(send_items_map.keys())
return '{ ' + ', '.join('"' + k + '": ' + send_items_map[k] for k in sorted_keys) + ' }'
def create_receiving_wasm(exports, initializers):
exports_that_are_not_initializers = [x for x in exports if x not in initializers]
receiving = []
runtime_assertions = ''
if shared.Settings.ASSERTIONS and not shared.Settings.MINIMAL_RUNTIME:
runtime_assertions = RUNTIME_ASSERTIONS
# assert on the runtime being in a valid state when calling into compiled code. The only exceptions are
# some support code
for e in exports:
receiving.append('''\
var real_%(mangled)s = asm["%(e)s"];
asm["%(e)s"] = function() {%(assertions)s
return real_%(mangled)s.apply(null, arguments);
};
''' % {'mangled': asmjs_mangle(e), 'e': e, 'assertions': runtime_assertions})
if not shared.Settings.SWAPPABLE_ASM_MODULE:
if shared.Settings.DECLARE_ASM_MODULE_EXPORTS:
if shared.Settings.WASM and shared.Settings.MINIMAL_RUNTIME:
# In Wasm exports are assigned inside a function to variables existing in top level JS scope, i.e.
# var _main;
# WebAssembly.instantiate(Module["wasm"], imports).then((function(output) {
# var asm = output.instance.exports;
# _main = asm["_main"];
receiving += [asmjs_mangle(s) + ' = asm["' + s + '"];' for s in exports_that_are_not_initializers]
else:
if shared.Settings.MINIMAL_RUNTIME:
# In wasm2js exports can be directly processed at top level, i.e.
# var asm = Module["asm"](asmGlobalArg, asmLibraryArg, buffer);
# var _main = asm["_main"];
receiving += ['var ' + asmjs_mangle(s) + ' = asm["' + asmjs_mangle(s) + '"];' for s in exports_that_are_not_initializers]
else:
receiving += ['var ' + asmjs_mangle(s) + ' = Module["' + asmjs_mangle(s) + '"] = asm["' + s + '"];' for s in exports]
else:
if shared.Settings.target_environment_may_be('node') and shared.Settings.target_environment_may_be('web'):
global_object = '(typeof process !== "undefined" ? global : this)'
elif shared.Settings.target_environment_may_be('node'):
global_object = 'global'
else:
global_object = 'this'
if shared.Settings.MINIMAL_RUNTIME:
module_assign = ''
else:
module_assign = 'Module[asmjs_mangle(__exportedFunc)] = '
receiving.append('''
function asmjs_mangle(x) {
var unmangledSymbols = %s;
return x.indexOf('dynCall_') == 0 || unmangledSymbols.indexOf(x) != -1 ? x : '_' + x;
}
''' % shared.Settings.WASM_FUNCTIONS_THAT_ARE_NOT_NAME_MANGLED)
receiving.append('for(var __exportedFunc in asm) ' + global_object + '[asmjs_mangle(__exportedFunc)] = ' + module_assign + 'asm[__exportedFunc];')
else:
receiving.append('Module["asm"] = asm;')
for e in exports:
if shared.Settings.ASSERTIONS:
# With assertions on, don't hot-swap implementation.
receiving.append('''\
var %(mangled)s = Module["%(mangled)s"] = function() {%(assertions)s
return Module["asm"]["%(e)s"].apply(null, arguments)
};
''' % {'mangled': asmjs_mangle(e), 'e': e, 'assertions': runtime_assertions})
else:
# With assertions off, hot-swap implementation to avoid garbage via
# arguments keyword.
receiving.append('''\
var %(mangled)s = Module["%(mangled)s"] = function() {%(assertions)s
return (%(mangled)s = Module["%(mangled)s"] = Module["asm"]["%(e)s"]).apply(null, arguments);
};
''' % {'mangled': asmjs_mangle(e), 'e': e, 'assertions': runtime_assertions})
return '\n'.join(receiving) + '\n'
def create_module_wasm(sending, receiving, invoke_funcs, metadata):
invoke_wrappers = create_invoke_wrappers(invoke_funcs)
receiving += create_named_globals(metadata)
receiving += create_fp_accessors(metadata)
module = []
module.append('var asmGlobalArg = {};\n')
if shared.Settings.USE_PTHREADS and not shared.Settings.WASM:
module.append("if (typeof SharedArrayBuffer !== 'undefined') asmGlobalArg['Atomics'] = Atomics;\n")
module.append('var asmLibraryArg = %s;\n' % (sending))
if shared.Settings.ASYNCIFY and shared.Settings.ASSERTIONS:
module.append('Asyncify.instrumentWasmImports(asmLibraryArg);\n')
if not shared.Settings.MINIMAL_RUNTIME:
module.append("var asm = createWasm();\n")
module.append(receiving)
module.append(invoke_wrappers)
return module
def load_metadata_wasm(metadata_raw, DEBUG):
try:
metadata_json = json.loads(metadata_raw)
except Exception:
logger.error('emscript: failure to parse metadata output from wasm-emscripten-finalize. raw output is: \n' + metadata_raw)
raise
metadata = {
'aliases': {},
'declares': [],
'implementedFunctions': [],
'externs': [],
'simd': False,
'maxGlobalAlign': 0,
'staticBump': 0,
'tableSize': 0,
'initializers': [],
'exports': [],
'namedGlobals': {},
'emJsFuncs': {},
'asmConsts': {},
'invokeFuncs': [],
'features': [],
'mainReadsParams': 1,
}
assert 'tableSize' in metadata_json.keys()
for key, value in metadata_json.items():
# json.loads returns `unicode` for strings but other code in this file
# generally works with utf8 encoded `str` objects, and they don't alwasy
# mix well. e.g. s.replace(x, y) will blow up is `s` a uts8 str containing
# non-ascii and either x or y are unicode objects.
# TODO(sbc): Remove this encoding if we switch to unicode elsewhere
# (specifically the glue returned from compile_settings)
if type(value) == list:
value = [asstr(v) for v in value]
if key not in metadata:
exit_with_error('unexpected metadata key received from wasm-emscripten-finalize: %s', key)
metadata[key] = value
if not shared.Settings.MINIMAL_RUNTIME:
# In regular runtime initializers call the global var version of the export, so they get the mangled name.
# In MINIMAL_RUNTIME, the initializers are called directly off the export object for minimal code size.
metadata['initializers'] = [asmjs_mangle(i) for i in metadata['initializers']]
if DEBUG:
logger.debug("Metadata parsed: " + pprint.pformat(metadata))
# Calculate the subset of exports that were explicitly marked with llvm.used.
# These are any exports that were not requested on the command line and are
# not known auto-generated system functions.
unexpected_exports = [e for e in metadata['exports'] if treat_as_user_function(e)]
unexpected_exports = [asmjs_mangle(e) for e in unexpected_exports]
unexpected_exports = [e for e in unexpected_exports if e not in shared.Settings.EXPORTED_FUNCTIONS]
shared.Building.user_requested_exports += unexpected_exports
return metadata
def create_invoke_wrappers(invoke_funcs):
invoke_wrappers = ''
for invoke in invoke_funcs:
sig = invoke[len('invoke_'):]
invoke_wrappers += '\n' + shared.JS.make_invoke(sig) + '\n'
return invoke_wrappers
def normalize_line_endings(text):
if WINDOWS:
return text.replace('\r\n', '\n')
return text
def run(infile, outfile, memfile):
temp_files = get_configuration().get_temp_files()
infile, outfile = substitute_response_files([infile, outfile])
if not shared.Settings.BOOTSTRAPPING_STRUCT_INFO:
generated_struct_info_name = 'generated_struct_info.json'
def generate_struct_info():
with ToolchainProfiler.profile_block('gen_struct_info'):
out = shared.Cache.get_path(generated_struct_info_name)
gen_struct_info.main(['-q', '-c', '-o', out])
return out
shared.Settings.STRUCT_INFO = shared.Cache.get(generated_struct_info_name, generate_struct_info)
# do we need an else, to define it for the bootstrap case?
outfile_obj = open(outfile, 'w')
emscripter = emscript_wasm_backend if shared.Settings.WASM_BACKEND else emscript_fastcomp
return temp_files.run_and_clean(lambda: emscripter(
infile, outfile_obj, memfile, shared.NODE_JS, temp_files, shared.DEBUG)
)
| true | true |
f72e8982cdd4262f5a24940e7a182364aca39d7f | 13,812 | py | Python | troposphere/pinpoint.py | ds-mn/troposphere | ebaa1749dff9da552e4dbaff188b740c60a8387a | [
"BSD-2-Clause"
] | 1 | 2019-10-09T04:11:40.000Z | 2019-10-09T04:11:40.000Z | troposphere/pinpoint.py | ds-mn/troposphere | ebaa1749dff9da552e4dbaff188b740c60a8387a | [
"BSD-2-Clause"
] | null | null | null | troposphere/pinpoint.py | ds-mn/troposphere | ebaa1749dff9da552e4dbaff188b740c60a8387a | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 47.0.0
from . import AWSObject, AWSProperty
from .validators import boolean, double, integer
class ADMChannel(AWSObject):
resource_type = "AWS::Pinpoint::ADMChannel"
props = {
"ApplicationId": (str, True),
"ClientId": (str, True),
"ClientSecret": (str, True),
"Enabled": (boolean, False),
}
class APNSChannel(AWSObject):
resource_type = "AWS::Pinpoint::APNSChannel"
props = {
"ApplicationId": (str, True),
"BundleId": (str, False),
"Certificate": (str, False),
"DefaultAuthenticationMethod": (str, False),
"Enabled": (boolean, False),
"PrivateKey": (str, False),
"TeamId": (str, False),
"TokenKey": (str, False),
"TokenKeyId": (str, False),
}
class APNSSandboxChannel(AWSObject):
resource_type = "AWS::Pinpoint::APNSSandboxChannel"
props = {
"ApplicationId": (str, True),
"BundleId": (str, False),
"Certificate": (str, False),
"DefaultAuthenticationMethod": (str, False),
"Enabled": (boolean, False),
"PrivateKey": (str, False),
"TeamId": (str, False),
"TokenKey": (str, False),
"TokenKeyId": (str, False),
}
class APNSVoipChannel(AWSObject):
resource_type = "AWS::Pinpoint::APNSVoipChannel"
props = {
"ApplicationId": (str, True),
"BundleId": (str, False),
"Certificate": (str, False),
"DefaultAuthenticationMethod": (str, False),
"Enabled": (boolean, False),
"PrivateKey": (str, False),
"TeamId": (str, False),
"TokenKey": (str, False),
"TokenKeyId": (str, False),
}
class APNSVoipSandboxChannel(AWSObject):
resource_type = "AWS::Pinpoint::APNSVoipSandboxChannel"
props = {
"ApplicationId": (str, True),
"BundleId": (str, False),
"Certificate": (str, False),
"DefaultAuthenticationMethod": (str, False),
"Enabled": (boolean, False),
"PrivateKey": (str, False),
"TeamId": (str, False),
"TokenKey": (str, False),
"TokenKeyId": (str, False),
}
class App(AWSObject):
resource_type = "AWS::Pinpoint::App"
props = {
"Name": (str, True),
"Tags": (dict, False),
}
class CampaignHook(AWSProperty):
props = {
"LambdaFunctionName": (str, False),
"Mode": (str, False),
"WebUrl": (str, False),
}
class Limits(AWSProperty):
props = {
"Daily": (integer, False),
"MaximumDuration": (integer, False),
"MessagesPerSecond": (integer, False),
"Session": (integer, False),
"Total": (integer, False),
}
class QuietTime(AWSProperty):
props = {
"End": (str, True),
"Start": (str, True),
}
class ApplicationSettings(AWSObject):
resource_type = "AWS::Pinpoint::ApplicationSettings"
props = {
"ApplicationId": (str, True),
"CampaignHook": (CampaignHook, False),
"CloudWatchMetricsEnabled": (boolean, False),
"Limits": (Limits, False),
"QuietTime": (QuietTime, False),
}
class BaiduChannel(AWSObject):
resource_type = "AWS::Pinpoint::BaiduChannel"
props = {
"ApiKey": (str, True),
"ApplicationId": (str, True),
"Enabled": (boolean, False),
"SecretKey": (str, True),
}
class CampaignEmailMessage(AWSProperty):
props = {
"Body": (str, False),
"FromAddress": (str, False),
"HtmlBody": (str, False),
"Title": (str, False),
}
class BodyConfig(AWSProperty):
props = {
"Alignment": (str, False),
"Body": (str, False),
"TextColor": (str, False),
}
class DefaultButtonConfiguration(AWSProperty):
props = {
"BackgroundColor": (str, False),
"BorderRadius": (integer, False),
"ButtonAction": (str, False),
"Link": (str, False),
"Text": (str, False),
"TextColor": (str, False),
}
class OverrideButtonConfiguration(AWSProperty):
props = {
"ButtonAction": (str, False),
"Link": (str, False),
}
class ButtonConfig(AWSProperty):
props = {
"Android": (OverrideButtonConfiguration, False),
"DefaultConfig": (DefaultButtonConfiguration, False),
"IOS": (OverrideButtonConfiguration, False),
"Web": (OverrideButtonConfiguration, False),
}
class HeaderConfig(AWSProperty):
props = {
"Alignment": (str, False),
"Header": (str, False),
"TextColor": (str, False),
}
class InAppMessageContent(AWSProperty):
props = {
"BackgroundColor": (str, False),
"BodyConfig": (BodyConfig, False),
"HeaderConfig": (HeaderConfig, False),
"ImageUrl": (str, False),
"PrimaryBtn": (ButtonConfig, False),
"SecondaryBtn": (ButtonConfig, False),
}
class CampaignInAppMessage(AWSProperty):
props = {
"Content": ([InAppMessageContent], False),
"CustomConfig": (dict, False),
"Layout": (str, False),
}
class CampaignSmsMessage(AWSProperty):
props = {
"Body": (str, False),
"EntityId": (str, False),
"MessageType": (str, False),
"OriginationNumber": (str, False),
"SenderId": (str, False),
"TemplateId": (str, False),
}
class Message(AWSProperty):
props = {
"Action": (str, False),
"Body": (str, False),
"ImageIconUrl": (str, False),
"ImageSmallIconUrl": (str, False),
"ImageUrl": (str, False),
"JsonBody": (str, False),
"MediaUrl": (str, False),
"RawContent": (str, False),
"SilentPush": (boolean, False),
"TimeToLive": (integer, False),
"Title": (str, False),
"Url": (str, False),
}
class MessageConfiguration(AWSProperty):
props = {
"ADMMessage": (Message, False),
"APNSMessage": (Message, False),
"BaiduMessage": (Message, False),
"DefaultMessage": (Message, False),
"EmailMessage": (CampaignEmailMessage, False),
"GCMMessage": (Message, False),
"InAppMessage": (CampaignInAppMessage, False),
"SMSMessage": (CampaignSmsMessage, False),
}
class SetDimension(AWSProperty):
props = {
"DimensionType": (str, False),
"Values": ([str], False),
}
class EventDimensions(AWSProperty):
props = {
"Attributes": (dict, False),
"EventType": (SetDimension, False),
"Metrics": (dict, False),
}
class CampaignEventFilter(AWSProperty):
props = {
"Dimensions": (EventDimensions, False),
"FilterType": (str, False),
}
class Schedule(AWSProperty):
props = {
"EndTime": (str, False),
"EventFilter": (CampaignEventFilter, False),
"Frequency": (str, False),
"IsLocalTime": (boolean, False),
"QuietTime": (QuietTime, False),
"StartTime": (str, False),
"TimeZone": (str, False),
}
class WriteTreatmentResource(AWSProperty):
props = {
"MessageConfiguration": (MessageConfiguration, False),
"Schedule": (Schedule, False),
"SizePercent": (integer, False),
"TreatmentDescription": (str, False),
"TreatmentName": (str, False),
}
class Campaign(AWSObject):
resource_type = "AWS::Pinpoint::Campaign"
props = {
"AdditionalTreatments": ([WriteTreatmentResource], False),
"ApplicationId": (str, True),
"CampaignHook": (CampaignHook, False),
"Description": (str, False),
"HoldoutPercent": (integer, False),
"IsPaused": (boolean, False),
"Limits": (Limits, False),
"MessageConfiguration": (MessageConfiguration, True),
"Name": (str, True),
"Priority": (integer, False),
"Schedule": (Schedule, True),
"SegmentId": (str, True),
"SegmentVersion": (integer, False),
"Tags": (dict, False),
"TreatmentDescription": (str, False),
"TreatmentName": (str, False),
}
class EmailChannel(AWSObject):
resource_type = "AWS::Pinpoint::EmailChannel"
props = {
"ApplicationId": (str, True),
"ConfigurationSet": (str, False),
"Enabled": (boolean, False),
"FromAddress": (str, True),
"Identity": (str, True),
"RoleArn": (str, False),
}
class EmailTemplate(AWSObject):
resource_type = "AWS::Pinpoint::EmailTemplate"
props = {
"DefaultSubstitutions": (str, False),
"HtmlPart": (str, False),
"Subject": (str, True),
"Tags": (dict, False),
"TemplateDescription": (str, False),
"TemplateName": (str, True),
"TextPart": (str, False),
}
class EventStream(AWSObject):
resource_type = "AWS::Pinpoint::EventStream"
props = {
"ApplicationId": (str, True),
"DestinationStreamArn": (str, True),
"RoleArn": (str, True),
}
class GCMChannel(AWSObject):
resource_type = "AWS::Pinpoint::GCMChannel"
props = {
"ApiKey": (str, True),
"ApplicationId": (str, True),
"Enabled": (boolean, False),
}
class InAppTemplate(AWSObject):
resource_type = "AWS::Pinpoint::InAppTemplate"
props = {
"Content": ([InAppMessageContent], False),
"CustomConfig": (dict, False),
"Layout": (str, False),
"Tags": (dict, False),
"TemplateDescription": (str, False),
"TemplateName": (str, True),
}
class APNSPushNotificationTemplate(AWSProperty):
props = {
"Action": (str, False),
"Body": (str, False),
"MediaUrl": (str, False),
"Sound": (str, False),
"Title": (str, False),
"Url": (str, False),
}
class AndroidPushNotificationTemplate(AWSProperty):
props = {
"Action": (str, False),
"Body": (str, False),
"ImageIconUrl": (str, False),
"ImageUrl": (str, False),
"SmallImageIconUrl": (str, False),
"Sound": (str, False),
"Title": (str, False),
"Url": (str, False),
}
class DefaultPushNotificationTemplate(AWSProperty):
props = {
"Action": (str, False),
"Body": (str, False),
"Sound": (str, False),
"Title": (str, False),
"Url": (str, False),
}
class PushTemplate(AWSObject):
resource_type = "AWS::Pinpoint::PushTemplate"
props = {
"ADM": (AndroidPushNotificationTemplate, False),
"APNS": (APNSPushNotificationTemplate, False),
"Baidu": (AndroidPushNotificationTemplate, False),
"Default": (DefaultPushNotificationTemplate, False),
"DefaultSubstitutions": (str, False),
"GCM": (AndroidPushNotificationTemplate, False),
"Tags": (dict, False),
"TemplateDescription": (str, False),
"TemplateName": (str, True),
}
class SMSChannel(AWSObject):
resource_type = "AWS::Pinpoint::SMSChannel"
props = {
"ApplicationId": (str, True),
"Enabled": (boolean, False),
"SenderId": (str, False),
"ShortCode": (str, False),
}
class Recency(AWSProperty):
props = {
"Duration": (str, True),
"RecencyType": (str, True),
}
class Behavior(AWSProperty):
props = {
"Recency": (Recency, False),
}
class Demographic(AWSProperty):
props = {
"AppVersion": (SetDimension, False),
"Channel": (SetDimension, False),
"DeviceType": (SetDimension, False),
"Make": (SetDimension, False),
"Model": (SetDimension, False),
"Platform": (SetDimension, False),
}
class Coordinates(AWSProperty):
props = {
"Latitude": (double, True),
"Longitude": (double, True),
}
class GPSPoint(AWSProperty):
props = {
"Coordinates": (Coordinates, True),
"RangeInKilometers": (double, True),
}
class Location(AWSProperty):
props = {
"Country": (SetDimension, False),
"GPSPoint": (GPSPoint, False),
}
class SegmentDimensions(AWSProperty):
props = {
"Attributes": (dict, False),
"Behavior": (Behavior, False),
"Demographic": (Demographic, False),
"Location": (Location, False),
"Metrics": (dict, False),
"UserAttributes": (dict, False),
}
class SourceSegments(AWSProperty):
props = {
"Id": (str, True),
"Version": (integer, False),
}
class Groups(AWSProperty):
props = {
"Dimensions": ([SegmentDimensions], False),
"SourceSegments": ([SourceSegments], False),
"SourceType": (str, False),
"Type": (str, False),
}
class SegmentGroups(AWSProperty):
props = {
"Groups": ([Groups], False),
"Include": (str, False),
}
class Segment(AWSObject):
resource_type = "AWS::Pinpoint::Segment"
props = {
"ApplicationId": (str, True),
"Dimensions": (SegmentDimensions, False),
"Name": (str, True),
"SegmentGroups": (SegmentGroups, False),
"Tags": (dict, False),
}
class SmsTemplate(AWSObject):
resource_type = "AWS::Pinpoint::SmsTemplate"
props = {
"Body": (str, True),
"DefaultSubstitutions": (str, False),
"Tags": (dict, False),
"TemplateDescription": (str, False),
"TemplateName": (str, True),
}
class VoiceChannel(AWSObject):
resource_type = "AWS::Pinpoint::VoiceChannel"
props = {
"ApplicationId": (str, True),
"Enabled": (boolean, False),
}
| 24.841727 | 66 | 0.564147 |
from . import AWSObject, AWSProperty
from .validators import boolean, double, integer
class ADMChannel(AWSObject):
resource_type = "AWS::Pinpoint::ADMChannel"
props = {
"ApplicationId": (str, True),
"ClientId": (str, True),
"ClientSecret": (str, True),
"Enabled": (boolean, False),
}
class APNSChannel(AWSObject):
resource_type = "AWS::Pinpoint::APNSChannel"
props = {
"ApplicationId": (str, True),
"BundleId": (str, False),
"Certificate": (str, False),
"DefaultAuthenticationMethod": (str, False),
"Enabled": (boolean, False),
"PrivateKey": (str, False),
"TeamId": (str, False),
"TokenKey": (str, False),
"TokenKeyId": (str, False),
}
class APNSSandboxChannel(AWSObject):
resource_type = "AWS::Pinpoint::APNSSandboxChannel"
props = {
"ApplicationId": (str, True),
"BundleId": (str, False),
"Certificate": (str, False),
"DefaultAuthenticationMethod": (str, False),
"Enabled": (boolean, False),
"PrivateKey": (str, False),
"TeamId": (str, False),
"TokenKey": (str, False),
"TokenKeyId": (str, False),
}
class APNSVoipChannel(AWSObject):
resource_type = "AWS::Pinpoint::APNSVoipChannel"
props = {
"ApplicationId": (str, True),
"BundleId": (str, False),
"Certificate": (str, False),
"DefaultAuthenticationMethod": (str, False),
"Enabled": (boolean, False),
"PrivateKey": (str, False),
"TeamId": (str, False),
"TokenKey": (str, False),
"TokenKeyId": (str, False),
}
class APNSVoipSandboxChannel(AWSObject):
resource_type = "AWS::Pinpoint::APNSVoipSandboxChannel"
props = {
"ApplicationId": (str, True),
"BundleId": (str, False),
"Certificate": (str, False),
"DefaultAuthenticationMethod": (str, False),
"Enabled": (boolean, False),
"PrivateKey": (str, False),
"TeamId": (str, False),
"TokenKey": (str, False),
"TokenKeyId": (str, False),
}
class App(AWSObject):
resource_type = "AWS::Pinpoint::App"
props = {
"Name": (str, True),
"Tags": (dict, False),
}
class CampaignHook(AWSProperty):
props = {
"LambdaFunctionName": (str, False),
"Mode": (str, False),
"WebUrl": (str, False),
}
class Limits(AWSProperty):
props = {
"Daily": (integer, False),
"MaximumDuration": (integer, False),
"MessagesPerSecond": (integer, False),
"Session": (integer, False),
"Total": (integer, False),
}
class QuietTime(AWSProperty):
props = {
"End": (str, True),
"Start": (str, True),
}
class ApplicationSettings(AWSObject):
resource_type = "AWS::Pinpoint::ApplicationSettings"
props = {
"ApplicationId": (str, True),
"CampaignHook": (CampaignHook, False),
"CloudWatchMetricsEnabled": (boolean, False),
"Limits": (Limits, False),
"QuietTime": (QuietTime, False),
}
class BaiduChannel(AWSObject):
resource_type = "AWS::Pinpoint::BaiduChannel"
props = {
"ApiKey": (str, True),
"ApplicationId": (str, True),
"Enabled": (boolean, False),
"SecretKey": (str, True),
}
class CampaignEmailMessage(AWSProperty):
props = {
"Body": (str, False),
"FromAddress": (str, False),
"HtmlBody": (str, False),
"Title": (str, False),
}
class BodyConfig(AWSProperty):
props = {
"Alignment": (str, False),
"Body": (str, False),
"TextColor": (str, False),
}
class DefaultButtonConfiguration(AWSProperty):
props = {
"BackgroundColor": (str, False),
"BorderRadius": (integer, False),
"ButtonAction": (str, False),
"Link": (str, False),
"Text": (str, False),
"TextColor": (str, False),
}
class OverrideButtonConfiguration(AWSProperty):
props = {
"ButtonAction": (str, False),
"Link": (str, False),
}
class ButtonConfig(AWSProperty):
props = {
"Android": (OverrideButtonConfiguration, False),
"DefaultConfig": (DefaultButtonConfiguration, False),
"IOS": (OverrideButtonConfiguration, False),
"Web": (OverrideButtonConfiguration, False),
}
class HeaderConfig(AWSProperty):
props = {
"Alignment": (str, False),
"Header": (str, False),
"TextColor": (str, False),
}
class InAppMessageContent(AWSProperty):
props = {
"BackgroundColor": (str, False),
"BodyConfig": (BodyConfig, False),
"HeaderConfig": (HeaderConfig, False),
"ImageUrl": (str, False),
"PrimaryBtn": (ButtonConfig, False),
"SecondaryBtn": (ButtonConfig, False),
}
class CampaignInAppMessage(AWSProperty):
props = {
"Content": ([InAppMessageContent], False),
"CustomConfig": (dict, False),
"Layout": (str, False),
}
class CampaignSmsMessage(AWSProperty):
props = {
"Body": (str, False),
"EntityId": (str, False),
"MessageType": (str, False),
"OriginationNumber": (str, False),
"SenderId": (str, False),
"TemplateId": (str, False),
}
class Message(AWSProperty):
props = {
"Action": (str, False),
"Body": (str, False),
"ImageIconUrl": (str, False),
"ImageSmallIconUrl": (str, False),
"ImageUrl": (str, False),
"JsonBody": (str, False),
"MediaUrl": (str, False),
"RawContent": (str, False),
"SilentPush": (boolean, False),
"TimeToLive": (integer, False),
"Title": (str, False),
"Url": (str, False),
}
class MessageConfiguration(AWSProperty):
props = {
"ADMMessage": (Message, False),
"APNSMessage": (Message, False),
"BaiduMessage": (Message, False),
"DefaultMessage": (Message, False),
"EmailMessage": (CampaignEmailMessage, False),
"GCMMessage": (Message, False),
"InAppMessage": (CampaignInAppMessage, False),
"SMSMessage": (CampaignSmsMessage, False),
}
class SetDimension(AWSProperty):
props = {
"DimensionType": (str, False),
"Values": ([str], False),
}
class EventDimensions(AWSProperty):
props = {
"Attributes": (dict, False),
"EventType": (SetDimension, False),
"Metrics": (dict, False),
}
class CampaignEventFilter(AWSProperty):
props = {
"Dimensions": (EventDimensions, False),
"FilterType": (str, False),
}
class Schedule(AWSProperty):
props = {
"EndTime": (str, False),
"EventFilter": (CampaignEventFilter, False),
"Frequency": (str, False),
"IsLocalTime": (boolean, False),
"QuietTime": (QuietTime, False),
"StartTime": (str, False),
"TimeZone": (str, False),
}
class WriteTreatmentResource(AWSProperty):
props = {
"MessageConfiguration": (MessageConfiguration, False),
"Schedule": (Schedule, False),
"SizePercent": (integer, False),
"TreatmentDescription": (str, False),
"TreatmentName": (str, False),
}
class Campaign(AWSObject):
resource_type = "AWS::Pinpoint::Campaign"
props = {
"AdditionalTreatments": ([WriteTreatmentResource], False),
"ApplicationId": (str, True),
"CampaignHook": (CampaignHook, False),
"Description": (str, False),
"HoldoutPercent": (integer, False),
"IsPaused": (boolean, False),
"Limits": (Limits, False),
"MessageConfiguration": (MessageConfiguration, True),
"Name": (str, True),
"Priority": (integer, False),
"Schedule": (Schedule, True),
"SegmentId": (str, True),
"SegmentVersion": (integer, False),
"Tags": (dict, False),
"TreatmentDescription": (str, False),
"TreatmentName": (str, False),
}
class EmailChannel(AWSObject):
resource_type = "AWS::Pinpoint::EmailChannel"
props = {
"ApplicationId": (str, True),
"ConfigurationSet": (str, False),
"Enabled": (boolean, False),
"FromAddress": (str, True),
"Identity": (str, True),
"RoleArn": (str, False),
}
class EmailTemplate(AWSObject):
resource_type = "AWS::Pinpoint::EmailTemplate"
props = {
"DefaultSubstitutions": (str, False),
"HtmlPart": (str, False),
"Subject": (str, True),
"Tags": (dict, False),
"TemplateDescription": (str, False),
"TemplateName": (str, True),
"TextPart": (str, False),
}
class EventStream(AWSObject):
resource_type = "AWS::Pinpoint::EventStream"
props = {
"ApplicationId": (str, True),
"DestinationStreamArn": (str, True),
"RoleArn": (str, True),
}
class GCMChannel(AWSObject):
resource_type = "AWS::Pinpoint::GCMChannel"
props = {
"ApiKey": (str, True),
"ApplicationId": (str, True),
"Enabled": (boolean, False),
}
class InAppTemplate(AWSObject):
resource_type = "AWS::Pinpoint::InAppTemplate"
props = {
"Content": ([InAppMessageContent], False),
"CustomConfig": (dict, False),
"Layout": (str, False),
"Tags": (dict, False),
"TemplateDescription": (str, False),
"TemplateName": (str, True),
}
class APNSPushNotificationTemplate(AWSProperty):
props = {
"Action": (str, False),
"Body": (str, False),
"MediaUrl": (str, False),
"Sound": (str, False),
"Title": (str, False),
"Url": (str, False),
}
class AndroidPushNotificationTemplate(AWSProperty):
props = {
"Action": (str, False),
"Body": (str, False),
"ImageIconUrl": (str, False),
"ImageUrl": (str, False),
"SmallImageIconUrl": (str, False),
"Sound": (str, False),
"Title": (str, False),
"Url": (str, False),
}
class DefaultPushNotificationTemplate(AWSProperty):
props = {
"Action": (str, False),
"Body": (str, False),
"Sound": (str, False),
"Title": (str, False),
"Url": (str, False),
}
class PushTemplate(AWSObject):
resource_type = "AWS::Pinpoint::PushTemplate"
props = {
"ADM": (AndroidPushNotificationTemplate, False),
"APNS": (APNSPushNotificationTemplate, False),
"Baidu": (AndroidPushNotificationTemplate, False),
"Default": (DefaultPushNotificationTemplate, False),
"DefaultSubstitutions": (str, False),
"GCM": (AndroidPushNotificationTemplate, False),
"Tags": (dict, False),
"TemplateDescription": (str, False),
"TemplateName": (str, True),
}
class SMSChannel(AWSObject):
resource_type = "AWS::Pinpoint::SMSChannel"
props = {
"ApplicationId": (str, True),
"Enabled": (boolean, False),
"SenderId": (str, False),
"ShortCode": (str, False),
}
class Recency(AWSProperty):
props = {
"Duration": (str, True),
"RecencyType": (str, True),
}
class Behavior(AWSProperty):
props = {
"Recency": (Recency, False),
}
class Demographic(AWSProperty):
props = {
"AppVersion": (SetDimension, False),
"Channel": (SetDimension, False),
"DeviceType": (SetDimension, False),
"Make": (SetDimension, False),
"Model": (SetDimension, False),
"Platform": (SetDimension, False),
}
class Coordinates(AWSProperty):
props = {
"Latitude": (double, True),
"Longitude": (double, True),
}
class GPSPoint(AWSProperty):
props = {
"Coordinates": (Coordinates, True),
"RangeInKilometers": (double, True),
}
class Location(AWSProperty):
props = {
"Country": (SetDimension, False),
"GPSPoint": (GPSPoint, False),
}
class SegmentDimensions(AWSProperty):
props = {
"Attributes": (dict, False),
"Behavior": (Behavior, False),
"Demographic": (Demographic, False),
"Location": (Location, False),
"Metrics": (dict, False),
"UserAttributes": (dict, False),
}
class SourceSegments(AWSProperty):
props = {
"Id": (str, True),
"Version": (integer, False),
}
class Groups(AWSProperty):
props = {
"Dimensions": ([SegmentDimensions], False),
"SourceSegments": ([SourceSegments], False),
"SourceType": (str, False),
"Type": (str, False),
}
class SegmentGroups(AWSProperty):
props = {
"Groups": ([Groups], False),
"Include": (str, False),
}
class Segment(AWSObject):
resource_type = "AWS::Pinpoint::Segment"
props = {
"ApplicationId": (str, True),
"Dimensions": (SegmentDimensions, False),
"Name": (str, True),
"SegmentGroups": (SegmentGroups, False),
"Tags": (dict, False),
}
class SmsTemplate(AWSObject):
resource_type = "AWS::Pinpoint::SmsTemplate"
props = {
"Body": (str, True),
"DefaultSubstitutions": (str, False),
"Tags": (dict, False),
"TemplateDescription": (str, False),
"TemplateName": (str, True),
}
class VoiceChannel(AWSObject):
resource_type = "AWS::Pinpoint::VoiceChannel"
props = {
"ApplicationId": (str, True),
"Enabled": (boolean, False),
}
| true | true |
f72e8a516b80749a38c870c143ff7e475d8b0389 | 6,372 | py | Python | builder/main.py | djix123/platform-gd32v | 3c949e3ab9d615b441da64a7f3162db48ac1eae3 | [
"Apache-2.0"
] | null | null | null | builder/main.py | djix123/platform-gd32v | 3c949e3ab9d615b441da64a7f3162db48ac1eae3 | [
"Apache-2.0"
] | null | null | null | builder/main.py | djix123/platform-gd32v | 3c949e3ab9d615b441da64a7f3162db48ac1eae3 | [
"Apache-2.0"
] | null | null | null | import sys
from os.path import join
from SCons.Script import (ARGUMENTS, COMMAND_LINE_TARGETS, AlwaysBuild,
Default, DefaultEnvironment)
env = DefaultEnvironment()
platform = env.PioPlatform()
board = env.BoardConfig()
env.Replace(
AR="riscv-nuclei-elf-gcc-ar",
AS="riscv-nuclei-elf-as",
CC="riscv-nuclei-elf-gcc",
GDB="riscv-nuclei-elf-gdb",
CXX="riscv-nuclei-elf-g++",
OBJCOPY="riscv-nuclei-elf-objcopy",
RANLIB="riscv-nuclei-elf-gcc-ranlib",
SIZETOOL="riscv-nuclei-elf-size",
ARFLAGS=["rc"],
SIZEPRINTCMD='$SIZETOOL -d $SOURCES',
PROGSUFFIX=".elf"
)
# Allow user to override via pre:script
if env.get("PROGNAME", "program") == "program":
env.Replace(PROGNAME="firmware")
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".bin"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".hex"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py", exports="env")
#
# Target: Build executable and linkable firmware
#
target_elf = None
if "nobuild" in COMMAND_LINE_TARGETS:
target_elf = join("$BUILD_DIR", "${PROGNAME}.elf")
target_firm = join("$BUILD_DIR", "${PROGNAME}.bin")
target_hex = join("$BUILD_DIR", "${PROGNAME}.hex")
else:
target_elf = env.BuildProgram()
target_firm = env.ElfToBin(join("$BUILD_DIR", "${PROGNAME}"), target_elf)
target_hex = env.ElfToHex(join("$BUILD_DIR", "${PROGNAME}"), target_elf)
AlwaysBuild(env.Alias("nobuild", target_firm))
target_buildprog = env.Alias("buildprog", target_firm, target_firm)
target_buildhex = env.Alias("buildhex", target_hex, target_hex)
#
# Target: Print binary size
#
target_size = env.Alias(
"size", target_elf,
env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"))
AlwaysBuild(target_size)
#
# Target: Upload by default .elf file
#
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
debug_tools = board.get("debug.tools", {})
upload_source = target_firm
upload_actions = []
if upload_protocol == "serial":
# def __configure_upload_port(env):
# return basename(env.subst("$UPLOAD_PORT"))
env.Replace(
# __configure_upload_port=__configure_upload_port,
UPLOADER="stm32flash",
UPLOADERFLAGS=[
"-g", board.get("upload.offset_address", "0x08000000"),
"-b", "115200", "-w"
],
#UPLOADCMD='$UPLOADER $UPLOADERFLAGS "$SOURCE" "${__configure_upload_port(__env__)}"'
UPLOADCMD='$UPLOADER $UPLOADERFLAGS "$SOURCE" "$UPLOAD_PORT"'
)
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for upload port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol.startswith("rv-link"):
env.Replace(
UPLOADER="$GDB",
UPLOADERFLAGS=[
"-nx",
"--batch",
"-ex", "target extended-remote $UPLOAD_PORT",
"-ex", "monitor reset halt",
"-ex", "load",
"-ex", "monitor reset",
"-ex", "kill"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $SOURCE"
)
upload_source = target_elf
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for RV-Link port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol == "dfu":
hwids = board.get("build.hwids", [["0x0483", "0xDF11"]])
vid = hwids[0][0]
pid = hwids[0][1]
_upload_tool = join(platform.get_package_dir(
"tool-dfuutil") or "", "bin", "dfu-util")
_upload_flags = [
"-d", "%s:%s" % (vid.split('x')[1], pid.split('x')[1]),
"-a", "0", "--dfuse-address",
"%s:leave" % board.get("upload.offset_address", "0x08000000"), "-D"
]
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
# Add special DFU header to the binary image
env.AddPostAction(
join("$BUILD_DIR", "${PROGNAME}.bin"),
env.VerboseAction(
" ".join([
join(platform.get_package_dir(
"tool-dfuutil") or "", "bin", "dfu-suffix"),
"-v %s" % vid,
"-p %s" % pid,
"-d 0xffff", "-a", "$TARGET"
]), "Adding dfu suffix to ${PROGNAME}.bin"))
env.Replace(
UPLOADER = _upload_tool,
UPLOADERFLAGS = _upload_flags,
UPLOADCMD = '$UPLOADER $UPLOADERFLAGS "${SOURCE.get_abspath()}"'
)
upload_source = target_firm
elif upload_protocol in debug_tools:
openocd_args = [
#"-c",
#"debug_level %d" % (2 if int(ARGUMENTS.get("PIOVERBOSE", 0)) else 1),
#"-s", platform.get_package_dir("tool-openocd-gd32v") or ""
]
# openocd_args.extend([
# "-f",
# "scripts/temp/openocd_%s.cfg" %("gdlink" if upload_protocol == "gd-link" else "jlink") # .cfg in a temp path
# ])
openocd_args.extend(
debug_tools.get(upload_protocol).get("server").get("arguments", [])
)
openocd_args.extend([
"-c", "init; halt;",
#"-c", "flash protect 0 0 last off; program {$SOURCE} verify; mww 0xe004200c 0x4b5a6978; mww 0xe0042008 0x01; resume; exit 0;"
#"-c", "program {$SOURCE} verify; exit 0;"
"-c", "program {$SOURCE} verify; reset; exit;"
])
env.Replace(
UPLOADER="openocd",
UPLOADERFLAGS=openocd_args,
UPLOADCMD="$UPLOADER $UPLOADERFLAGS")
upload_source = target_elf
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
# custom upload tool
elif upload_protocol == "custom":
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
else:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("upload", upload_source, upload_actions))
#
# Setup default targets
#
Default([target_buildprog, target_buildhex, target_size])
| 30.056604 | 134 | 0.59275 | import sys
from os.path import join
from SCons.Script import (ARGUMENTS, COMMAND_LINE_TARGETS, AlwaysBuild,
Default, DefaultEnvironment)
env = DefaultEnvironment()
platform = env.PioPlatform()
board = env.BoardConfig()
env.Replace(
AR="riscv-nuclei-elf-gcc-ar",
AS="riscv-nuclei-elf-as",
CC="riscv-nuclei-elf-gcc",
GDB="riscv-nuclei-elf-gdb",
CXX="riscv-nuclei-elf-g++",
OBJCOPY="riscv-nuclei-elf-objcopy",
RANLIB="riscv-nuclei-elf-gcc-ranlib",
SIZETOOL="riscv-nuclei-elf-size",
ARFLAGS=["rc"],
SIZEPRINTCMD='$SIZETOOL -d $SOURCES',
PROGSUFFIX=".elf"
)
if env.get("PROGNAME", "program") == "program":
env.Replace(PROGNAME="firmware")
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".bin"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".hex"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py", exports="env")
target_elf = None
if "nobuild" in COMMAND_LINE_TARGETS:
target_elf = join("$BUILD_DIR", "${PROGNAME}.elf")
target_firm = join("$BUILD_DIR", "${PROGNAME}.bin")
target_hex = join("$BUILD_DIR", "${PROGNAME}.hex")
else:
target_elf = env.BuildProgram()
target_firm = env.ElfToBin(join("$BUILD_DIR", "${PROGNAME}"), target_elf)
target_hex = env.ElfToHex(join("$BUILD_DIR", "${PROGNAME}"), target_elf)
AlwaysBuild(env.Alias("nobuild", target_firm))
target_buildprog = env.Alias("buildprog", target_firm, target_firm)
target_buildhex = env.Alias("buildhex", target_hex, target_hex)
target_size = env.Alias(
"size", target_elf,
env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"))
AlwaysBuild(target_size)
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
debug_tools = board.get("debug.tools", {})
upload_source = target_firm
upload_actions = []
if upload_protocol == "serial":
env.Replace(
UPLOADER="stm32flash",
UPLOADERFLAGS=[
"-g", board.get("upload.offset_address", "0x08000000"),
"-b", "115200", "-w"
],
UPLOADCMD='$UPLOADER $UPLOADERFLAGS "$SOURCE" "$UPLOAD_PORT"'
)
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for upload port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol.startswith("rv-link"):
env.Replace(
UPLOADER="$GDB",
UPLOADERFLAGS=[
"-nx",
"--batch",
"-ex", "target extended-remote $UPLOAD_PORT",
"-ex", "monitor reset halt",
"-ex", "load",
"-ex", "monitor reset",
"-ex", "kill"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $SOURCE"
)
upload_source = target_elf
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for RV-Link port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol == "dfu":
hwids = board.get("build.hwids", [["0x0483", "0xDF11"]])
vid = hwids[0][0]
pid = hwids[0][1]
_upload_tool = join(platform.get_package_dir(
"tool-dfuutil") or "", "bin", "dfu-util")
_upload_flags = [
"-d", "%s:%s" % (vid.split('x')[1], pid.split('x')[1]),
"-a", "0", "--dfuse-address",
"%s:leave" % board.get("upload.offset_address", "0x08000000"), "-D"
]
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
env.AddPostAction(
join("$BUILD_DIR", "${PROGNAME}.bin"),
env.VerboseAction(
" ".join([
join(platform.get_package_dir(
"tool-dfuutil") or "", "bin", "dfu-suffix"),
"-v %s" % vid,
"-p %s" % pid,
"-d 0xffff", "-a", "$TARGET"
]), "Adding dfu suffix to ${PROGNAME}.bin"))
env.Replace(
UPLOADER = _upload_tool,
UPLOADERFLAGS = _upload_flags,
UPLOADCMD = '$UPLOADER $UPLOADERFLAGS "${SOURCE.get_abspath()}"'
)
upload_source = target_firm
elif upload_protocol in debug_tools:
openocd_args = [
]
s.extend(
debug_tools.get(upload_protocol).get("server").get("arguments", [])
)
openocd_args.extend([
"-c", "init; halt;",
"-c", "program {$SOURCE} verify; reset; exit;"
])
env.Replace(
UPLOADER="openocd",
UPLOADERFLAGS=openocd_args,
UPLOADCMD="$UPLOADER $UPLOADERFLAGS")
upload_source = target_elf
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
elif upload_protocol == "custom":
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
else:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("upload", upload_source, upload_actions))
Default([target_buildprog, target_buildhex, target_size])
| true | true |
f72e8ad1c3608b91eaaa2a795f92f4b5c3a9b434 | 2,445 | py | Python | mmseg/integration/nncf/utils.py | yunchu/mmsegmentation | 404f3e0e8859991931b6a39a583de412348e98f0 | [
"Apache-2.0"
] | 3 | 2021-12-21T07:25:13.000Z | 2022-02-07T01:59:19.000Z | mmseg/integration/nncf/utils.py | yunchu/mmsegmentation | 404f3e0e8859991931b6a39a583de412348e98f0 | [
"Apache-2.0"
] | 13 | 2021-12-10T15:08:56.000Z | 2022-03-23T08:58:03.000Z | mmseg/integration/nncf/utils.py | yunchu/mmsegmentation | 404f3e0e8859991931b6a39a583de412348e98f0 | [
"Apache-2.0"
] | 3 | 2021-11-11T23:16:51.000Z | 2021-12-08T23:49:29.000Z | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import importlib
from collections import OrderedDict
from contextlib import contextmanager
import torch
_is_nncf_enabled = importlib.util.find_spec('nncf') is not None
def is_nncf_enabled():
return _is_nncf_enabled
def check_nncf_is_enabled():
if not is_nncf_enabled():
raise RuntimeError('Tried to use NNCF, but NNCF is not installed')
def get_nncf_version():
if not is_nncf_enabled():
return None
import nncf
return nncf.__version__
def load_checkpoint(model, filename, map_location=None, strict=False):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Either a filepath or URL or modelzoo://xxxxxxx.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
from nncf.torch import load_state
checkpoint = torch.load(filename, map_location=map_location)
# get state_dict from checkpoint
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(filename))
_ = load_state(model, state_dict, strict)
return checkpoint
@contextmanager
def nullcontext():
"""
Context which does nothing
"""
yield
def no_nncf_trace():
"""
Wrapper for original NNCF no_nncf_trace() context
"""
if is_nncf_enabled():
from nncf.torch.dynamic_graph.context import no_nncf_trace as original_no_nncf_trace
return original_no_nncf_trace()
return nullcontext()
def is_in_nncf_tracing():
if not is_nncf_enabled():
return False
from nncf.torch.dynamic_graph.context import get_current_context
ctx = get_current_context()
if ctx is None:
return False
return ctx.is_tracing
def is_accuracy_aware_training_set(nncf_config):
if not is_nncf_enabled():
return False
from nncf.config.utils import is_accuracy_aware_training
is_acc_aware_training_set = is_accuracy_aware_training(nncf_config)
return is_acc_aware_training_set
| 25.46875 | 92 | 0.708384 |
import importlib
from collections import OrderedDict
from contextlib import contextmanager
import torch
_is_nncf_enabled = importlib.util.find_spec('nncf') is not None
def is_nncf_enabled():
return _is_nncf_enabled
def check_nncf_is_enabled():
if not is_nncf_enabled():
raise RuntimeError('Tried to use NNCF, but NNCF is not installed')
def get_nncf_version():
if not is_nncf_enabled():
return None
import nncf
return nncf.__version__
def load_checkpoint(model, filename, map_location=None, strict=False):
from nncf.torch import load_state
checkpoint = torch.load(filename, map_location=map_location)
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(filename))
_ = load_state(model, state_dict, strict)
return checkpoint
@contextmanager
def nullcontext():
yield
def no_nncf_trace():
if is_nncf_enabled():
from nncf.torch.dynamic_graph.context import no_nncf_trace as original_no_nncf_trace
return original_no_nncf_trace()
return nullcontext()
def is_in_nncf_tracing():
if not is_nncf_enabled():
return False
from nncf.torch.dynamic_graph.context import get_current_context
ctx = get_current_context()
if ctx is None:
return False
return ctx.is_tracing
def is_accuracy_aware_training_set(nncf_config):
if not is_nncf_enabled():
return False
from nncf.config.utils import is_accuracy_aware_training
is_acc_aware_training_set = is_accuracy_aware_training(nncf_config)
return is_acc_aware_training_set
| true | true |
f72e8ad32ab4f0397c6c09682578a2c085bad8b4 | 6,857 | py | Python | mir_eval/key.py | f90/mir_eval | 645f46258515677620add3a60fc21a6bf6b27363 | [
"MIT"
] | 431 | 2015-03-05T18:08:23.000Z | 2022-03-29T14:51:02.000Z | mir_eval/key.py | f90/mir_eval | 645f46258515677620add3a60fc21a6bf6b27363 | [
"MIT"
] | 217 | 2015-02-10T15:23:19.000Z | 2022-02-16T21:14:13.000Z | mir_eval/key.py | f90/mir_eval | 645f46258515677620add3a60fc21a6bf6b27363 | [
"MIT"
] | 118 | 2015-01-15T09:22:55.000Z | 2022-02-08T18:22:14.000Z | '''
Key Detection involves determining the underlying key (distribution of notes
and note transitions) in a piece of music. Key detection algorithms are
evaluated by comparing their estimated key to a ground-truth reference key and
reporting a score according to the relationship of the keys.
Conventions
-----------
Keys are represented as strings of the form ``'(key) (mode)'``, e.g. ``'C#
major'`` or ``'Fb minor'``. The case of the key is ignored. Note that certain
key strings are equivalent, e.g. ``'C# major'`` and ``'Db major'``. The mode
may only be specified as either ``'major'`` or ``'minor'``, no other mode
strings will be accepted.
Metrics
-------
* :func:`mir_eval.key.weighted_score`: Heuristic scoring of the relation of two
keys.
'''
import collections
from . import util
KEY_TO_SEMITONE = {'c': 0, 'c#': 1, 'db': 1, 'd': 2, 'd#': 3, 'eb': 3, 'e': 4,
'f': 5, 'f#': 6, 'gb': 6, 'g': 7, 'g#': 8, 'ab': 8, 'a': 9,
'a#': 10, 'bb': 10, 'b': 11, 'x': None}
def validate_key(key):
"""Checks that a key is well-formatted, e.g. in the form ``'C# major'``.
The Key can be 'X' if it is not possible to categorize the Key and mode
can be 'other' if it can't be categorized as major or minor.
Parameters
----------
key : str
Key to verify
"""
if len(key.split()) != 2 \
and not (len(key.split()) and key.lower() == 'x'):
raise ValueError("'{}' is not in the form '(key) (mode)' "
"or 'X'".format(key))
if key.lower() != 'x':
key, mode = key.split()
if key.lower() == 'x':
raise ValueError(
"Mode {} is invalid; 'X' (Uncategorized) "
"doesn't have mode".format(mode))
if key.lower() not in KEY_TO_SEMITONE:
raise ValueError(
"Key {} is invalid; should be e.g. D or C# or Eb or "
"X (Uncategorized)".format(key))
if mode not in ['major', 'minor', 'other']:
raise ValueError(
"Mode '{}' is invalid; must be 'major', 'minor' or 'other'"
.format(mode))
def validate(reference_key, estimated_key):
"""Checks that the input annotations to a metric are valid key strings and
throws helpful errors if not.
Parameters
----------
reference_key : str
Reference key string.
estimated_key : str
Estimated key string.
"""
for key in [reference_key, estimated_key]:
validate_key(key)
def split_key_string(key):
"""Splits a key string (of the form, e.g. ``'C# major'``), into a tuple of
``(key, mode)`` where ``key`` is is an integer representing the semitone
distance from C.
Parameters
----------
key : str
String representing a key.
Returns
-------
key : int
Number of semitones above C.
mode : str
String representing the mode.
"""
if key.lower() != 'x':
key, mode = key.split()
else:
mode = None
return KEY_TO_SEMITONE[key.lower()], mode
def weighted_score(reference_key, estimated_key):
"""Computes a heuristic score which is weighted according to the
relationship of the reference and estimated key, as follows:
+------------------------------------------------------+-------+
| Relationship | Score |
+------------------------------------------------------+-------+
| Same key and mode | 1.0 |
+------------------------------------------------------+-------+
| Estimated key is a perfect fifth above reference key | 0.5 |
+------------------------------------------------------+-------+
| Relative major/minor (same key signature) | 0.3 |
+------------------------------------------------------+-------+
| Parallel major/minor (same key) | 0.2 |
+------------------------------------------------------+-------+
| Other | 0.0 |
+------------------------------------------------------+-------+
Examples
--------
>>> ref_key = mir_eval.io.load_key('ref.txt')
>>> est_key = mir_eval.io.load_key('est.txt')
>>> score = mir_eval.key.weighted_score(ref_key, est_key)
Parameters
----------
reference_key : str
Reference key string.
estimated_key : str
Estimated key string.
Returns
-------
score : float
Score representing how closely related the keys are.
"""
validate(reference_key, estimated_key)
reference_key, reference_mode = split_key_string(reference_key)
estimated_key, estimated_mode = split_key_string(estimated_key)
# If keys are the same, return 1.
if reference_key == estimated_key and reference_mode == estimated_mode:
return 1.
# If reference or estimated key are x and they are not the same key
# then the result is 'Other'.
if reference_key is None or estimated_key is None:
return 0.
# If keys are the same mode and a perfect fifth (differ by 7 semitones)
if (estimated_mode == reference_mode and
(estimated_key - reference_key) % 12 == 7):
return 0.5
# Estimated key is relative minor of reference key (9 semitones)
if (estimated_mode != reference_mode == 'major' and
(estimated_key - reference_key) % 12 == 9):
return 0.3
# Estimated key is relative major of reference key (3 semitones)
if (estimated_mode != reference_mode == 'minor' and
(estimated_key - reference_key) % 12 == 3):
return 0.3
# If keys are in different modes and parallel (same key name)
if estimated_mode != reference_mode and reference_key == estimated_key:
return 0.2
# Otherwise return 0
return 0.
def evaluate(reference_key, estimated_key, **kwargs):
"""Compute all metrics for the given reference and estimated annotations.
Examples
--------
>>> ref_key = mir_eval.io.load_key('reference.txt')
>>> est_key = mir_eval.io.load_key('estimated.txt')
>>> scores = mir_eval.key.evaluate(ref_key, est_key)
Parameters
----------
ref_key : str
Reference key string.
ref_key : str
Estimated key string.
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Compute all metrics
scores = collections.OrderedDict()
scores['Weighted Score'] = util.filter_kwargs(
weighted_score, reference_key, estimated_key)
return scores
| 34.631313 | 79 | 0.553157 |
import collections
from . import util
KEY_TO_SEMITONE = {'c': 0, 'c#': 1, 'db': 1, 'd': 2, 'd#': 3, 'eb': 3, 'e': 4,
'f': 5, 'f#': 6, 'gb': 6, 'g': 7, 'g#': 8, 'ab': 8, 'a': 9,
'a#': 10, 'bb': 10, 'b': 11, 'x': None}
def validate_key(key):
if len(key.split()) != 2 \
and not (len(key.split()) and key.lower() == 'x'):
raise ValueError("'{}' is not in the form '(key) (mode)' "
"or 'X'".format(key))
if key.lower() != 'x':
key, mode = key.split()
if key.lower() == 'x':
raise ValueError(
"Mode {} is invalid; 'X' (Uncategorized) "
"doesn't have mode".format(mode))
if key.lower() not in KEY_TO_SEMITONE:
raise ValueError(
"Key {} is invalid; should be e.g. D or C# or Eb or "
"X (Uncategorized)".format(key))
if mode not in ['major', 'minor', 'other']:
raise ValueError(
"Mode '{}' is invalid; must be 'major', 'minor' or 'other'"
.format(mode))
def validate(reference_key, estimated_key):
for key in [reference_key, estimated_key]:
validate_key(key)
def split_key_string(key):
if key.lower() != 'x':
key, mode = key.split()
else:
mode = None
return KEY_TO_SEMITONE[key.lower()], mode
def weighted_score(reference_key, estimated_key):
validate(reference_key, estimated_key)
reference_key, reference_mode = split_key_string(reference_key)
estimated_key, estimated_mode = split_key_string(estimated_key)
# If keys are the same, return 1.
if reference_key == estimated_key and reference_mode == estimated_mode:
return 1.
# If reference or estimated key are x and they are not the same key
# then the result is 'Other'.
if reference_key is None or estimated_key is None:
return 0.
# If keys are the same mode and a perfect fifth (differ by 7 semitones)
if (estimated_mode == reference_mode and
(estimated_key - reference_key) % 12 == 7):
return 0.5
# Estimated key is relative minor of reference key (9 semitones)
if (estimated_mode != reference_mode == 'major' and
(estimated_key - reference_key) % 12 == 9):
return 0.3
# Estimated key is relative major of reference key (3 semitones)
if (estimated_mode != reference_mode == 'minor' and
(estimated_key - reference_key) % 12 == 3):
return 0.3
# If keys are in different modes and parallel (same key name)
if estimated_mode != reference_mode and reference_key == estimated_key:
return 0.2
# Otherwise return 0
return 0.
def evaluate(reference_key, estimated_key, **kwargs):
# Compute all metrics
scores = collections.OrderedDict()
scores['Weighted Score'] = util.filter_kwargs(
weighted_score, reference_key, estimated_key)
return scores
| true | true |
f72e8c5866fb2e87ce436a3e9688b5cbeb43d9f5 | 52,622 | py | Python | main.py | LinZichuan/AdMRL | 50a22d4d480e99125cc91cc65dfcc0df4a883ac6 | [
"MIT"
] | 27 | 2020-06-17T11:40:17.000Z | 2021-11-16T07:39:33.000Z | main.py | LinZichuan/AdMRL | 50a22d4d480e99125cc91cc65dfcc0df4a883ac6 | [
"MIT"
] | 3 | 2020-06-19T07:01:48.000Z | 2020-06-19T07:14:57.000Z | main.py | LinZichuan/AdMRL | 50a22d4d480e99125cc91cc65dfcc0df4a883ac6 | [
"MIT"
] | 5 | 2020-11-19T01:11:24.000Z | 2021-12-24T09:03:56.000Z | import sys
sys.path = ['./rllab/'] + sys.path
print (sys.path)
import pickle
import os,time
from collections import deque
import tensorflow as tf
import numpy as np
import lunzi.nn as nn
from lunzi.Logger import logger
from slbo.utils.average_meter import AverageMeter
from slbo.utils.flags import FLAGS
from slbo.utils.dataset import Dataset, gen_dtype
from slbo.utils.OU_noise import OUNoise
from slbo.utils.normalizer import Normalizers
from slbo.utils.tf_utils import get_tf_config
from slbo.utils.runner import Runner
from slbo.policies.gaussian_mlp_policy import GaussianMLPPolicy
from slbo.envs.virtual_env import VirtualEnv
from slbo.dynamics_model import DynamicsModel
from slbo.v_function.mlp_v_function import MLPVFunction
from slbo.partial_envs import make_env, make_task
from slbo.loss.multi_step_loss import MultiStepLoss
from slbo.algos.TRPO import TRPO
from slbo.algos.ADVTASK import ADVTASK
from slbo.utils.tf_utils import initialize_uninitialized
import click
from gym.wrappers.monitor import Monitor
import gym
import scipy.misc
import scipy.ndimage
def render(env_, policy=None):
logger.info('start render video...')
observation = env_.reset()
imgs = []
return_ = 0.
cnt_ = 0
obs = []
for t in range(200):
cnt_ += 1
observation = observation.reshape(1, -1)
obs.append(observation)
if policy is not None:
action = policy.get_actions(observation)
observation, reward, done, info = env_.step(action[0])
if done: break
return_ += reward
else:
action = env_.action_space.sample()
observation, reward, done, info = env_.step(action)
if done: break
return_ += reward
logger.info (f"render {cnt_} steps, return = {return_:.6f}")
res = {'obs': obs, 'return': return_}
return res
def eval_rollout(runner, p, des):
logger.info(des)
runner.reset()
data, ep_infos = runner.run(p, FLAGS.plan.n_trpo_samples)
logp = p(data.state).log_prob(data.action).reduce_sum(axis=1).reduce_mean()
logp = tf.get_default_session().run(logp)
print ("state_mean:", np.mean(data.state))
print ("action_mean:", np.mean(data.action))
print ("warmup_logpac_mean:", logp)
def testeval(policy, runner):
runner.reset()
_, ep_infos = runner.run(policy, FLAGS.rollout.n_test_samples)
returns = [info['return'] for info in ep_infos]
returns = np.mean(returns)
return returns
def evaluate(settings, tag):
res = {}
for runner, policy, name in settings:
runner.reset()
_, ep_infos = runner.run(policy, FLAGS.rollout.n_test_samples)
returns = np.array([ep_info['return'] for ep_info in ep_infos])
res[name] = np.mean(returns)
logger.info('Tag = %s, Reward on %s (%d episodes): mean = %.6f, std = %.6f', tag, name,
len(returns), np.mean(returns), np.std(returns))
return res['Real Env'], res['Virt Env']
def add_multi_step(src: Dataset, dst: Dataset):
n_envs = 1
dst.extend(src[:-n_envs])
ending = src[-n_envs:].copy()
ending.timeout = True
dst.extend(ending)
def make_real_runner(n_envs, task_config=None):
from slbo.envs.batched_env import BatchedEnv
batched_env = BatchedEnv([make_env(FLAGS.env.id, task_config=task_config) for _ in range(n_envs)])
return Runner(batched_env, rescale_action=True, **FLAGS.runner.as_dict())
@click.command()
@click.option('--setting', default='default')
@click.option('--adv', default=1)
@click.option('--gpu', default=0)
@click.option('--debug', is_flag=True, default=False)
@click.option('--taskname', default='Ant2D')
@click.option('--verbose', is_flag=True, default=False)
@click.option('--test', is_flag=True, default=False)
@click.option('--warmupent', default=0.005)
@click.option('--alpha', default=1.0)
@click.option('--beta', default=1.0)
@click.option('--snapshot', default=1)
@click.option('--testadv', default=0)
@click.option('--seed', default=1)
@click.option('--nsample', default=10000)
@click.option('--fixedvel', default=None)
@click.option('--initnslbo', default=20)
@click.option('--nslbo', default=3)
@click.option('--warmniter', default=40)
@click.option('--slboniter', default=20)
@click.option('--piter', default=20)
@click.option('--miter', default=100)
@click.option('--atype', default='gae') # gae, 1step, ret, adv
@click.option('--video', is_flag=True, default=False)
@click.option('--maxstep', default=1)
@click.option('--genadvstrategy', default=None)
@click.option('--inittask', default='none')
@click.option('--decay', default='joint')
@click.option('--testgiven', default=None)
@click.option('--testnum', default=1)
@click.option('--testparam', default='')
def main(setting, adv, gpu, debug, taskname, verbose, test, warmupent, alpha, beta, snapshot, testadv, seed, nsample, fixedvel, initnslbo, nslbo, warmniter, slboniter, piter, miter, atype, video, maxstep, genadvstrategy, inittask, decay, testgiven, testnum, testparam):
print ('warmupent:', warmupent)
print ("seed:", seed)
setting = os.path.join('./data/', setting)
#FLAGS.run_id = setting
FLAGS.rollout.n_train_samples = 10000
FLAGS.rollout.n_dev_samples = 10000
FLAGS.rollout.n_test_samples = 10000
FLAGS.plan.n_trpo_samples = 10000
if taskname == 'HC':
FLAGS.env.id = 'HalfCheetahTask-v2'
elif taskname == 'HC2D':
FLAGS.env.id = 'HalfCheetah2D-v2'
elif taskname == 'HClinearstate':
FLAGS.env.id = 'HalfCheetahLinearState-v2'
elif taskname == 'HCgoalstate':
FLAGS.env.id = 'HalfCheetahGoalState-v2'
elif taskname == 'Hopper2D':
FLAGS.env.id = 'Hopper2D-v2'
elif taskname == 'Walker2D':
FLAGS.env.id = 'Walker2D-v2'
elif taskname == 'Ant3D':
FLAGS.env.id = 'Ant3DTask-v2'
elif taskname == 'Ant2D':
FLAGS.env.id = 'Ant2DTask-v2'
else:
raise Exception(f'Unsupported taskname: {taskname}')
if not os.path.isdir(setting):
os.makedirs(setting)
if not test:
filename = f'res_{taskname}_adv{adv}.txt'
infofilename = f'res_{taskname}_adv{adv}.npy'
filename = setting+'/'+filename
infofilename = setting+'/'+infofilename
fout = open(filename, 'w')
else:
maxstep = 100
logger.info(f'fixedvel={fixedvel}')
if testadv:
logger.info('Test with adversarial generated tasks!')
logger.info(f'testadv=1, maxstep={maxstep}, using model revert!')
else:
logger.info('We still do not consider this senario: test with random tasks')
print ('adv=', adv)
FLAGS.seed = seed
FLAGS.set_seed()
FLAGS.freeze()
print ("FLAGS.log_dir:", FLAGS.log_dir)
if test:
model_load = f'{FLAGS.log_dir}/{taskname}-stage-{snapshot}.npy'
else:
model_load = None
print ("model_load:", model_load)
task = make_task(FLAGS.env.id)
env = make_env(FLAGS.env.id, task_config=task)
dim_state = int(np.prod(env.observation_space.shape))
dim_action = int(np.prod(env.action_space.shape))
env.verify()
normalizers = Normalizers(dim_action=dim_action, dim_state=dim_state)
normalizers_copy = Normalizers(dim_action=dim_action, dim_state=dim_state)
normalizers_parameters = normalizers.parameters(trainable=False, non_trainable=True)
normalizers_copy_parameters = normalizers_copy.parameters(trainable=False, non_trainable=True)
copy_normalizers = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(normalizers_copy_parameters, normalizers_parameters)])
revert_normalizers = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(normalizers_parameters, normalizers_copy_parameters)])
dtype = gen_dtype(env, 'state action next_state reward done timeout')
train_set = Dataset(dtype, FLAGS.rollout.max_buf_size)
dev_set = Dataset(dtype, FLAGS.rollout.max_buf_size)
task_train_sets = [Dataset(dtype, FLAGS.rollout.max_buf_size) for i in range(100)]
task_dev_sets = [Dataset(dtype, FLAGS.rollout.max_buf_size) for i in range(100)]
print ("state and action dim:", dim_state, dim_action)
policy = GaussianMLPPolicy(dim_state, dim_action, normalizer=normalizers.state, **FLAGS.policy.as_dict())
warmup_policy = GaussianMLPPolicy(dim_state, dim_action, normalizer=normalizers.state, **FLAGS.policy.as_dict())
print (policy.parameters())
print (warmup_policy.parameters())
sync_warmup_policy = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_policy.parameters(), policy.parameters())])
# batched noises
noise = OUNoise(env.action_space, theta=FLAGS.OUNoise.theta, sigma=FLAGS.OUNoise.sigma, shape=(1, dim_action))
vfn = MLPVFunction(dim_state, [64, 64], normalizers.state)
warmup_vfn = MLPVFunction(dim_state, [64, 64], normalizers.state)
sync_warmup_vfn = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_vfn.parameters(), vfn.parameters())])
model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes)
lazy_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes)
warmup_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes)
sync_warmup_model = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_model.parameters(), model.parameters())])
shadow_models = [DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) for n in range(FLAGS.warmup.n_shadow_models)]
sync_model_from_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(model.parameters(), lazy_model.parameters())])
sync_model_to_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(lazy_model.parameters(), model.parameters())])
virt_env = VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model)
virt_runner = Runner(virt_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})
virt_env_copy = VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), nsample//FLAGS.plan.max_steps, opt_model=FLAGS.slbo.opt_model)
virt_runner_copy = Runner(virt_env_copy, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})
extra_runners = {}
for sam in [1000, 2000, 4000, 8000, 10000, 16000]:
extra_runners[f'train{sam}']= Runner(VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), sam//FLAGS.plan.max_steps, opt_model=FLAGS.slbo.opt_model), **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})
extra_runners[f'collect{sam}'] = make_real_runner(sam//FLAGS.plan.max_steps, task_config=task)
warmup_virt_env = VirtualEnv(warmup_model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model)
warmup_virt_runner = Runner(warmup_virt_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})
logger.info('FLAGS.plan.n_envs=%d' % FLAGS.plan.n_envs)
shadow_envs = [VirtualEnv(shadow_model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model) for shadow_model in shadow_models]
shadow_runners = [Runner(shadow_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps}) for shadow_env in shadow_envs]
criterion_map = {
'L1': nn.L1Loss(),
'L2': nn.L2Loss(),
'MSE': nn.MSELoss(),
}
criterion = criterion_map[FLAGS.model.loss]
loss_mod = MultiStepLoss(model, normalizers, dim_state, dim_action, criterion, FLAGS.model.multi_step)
loss_mod.build_backward(FLAGS.model.lr, FLAGS.model.weight_decay)
shadow_loss_mods = [MultiStepLoss(shadow_model, normalizers, dim_state, dim_action, criterion, FLAGS.model.multi_step) for shadow_model in shadow_models]
for shadow_loss_mod in shadow_loss_mods:
shadow_loss_mod.build_backward(FLAGS.model.lr, FLAGS.model.weight_decay)
algo = TRPO(vfn=vfn, policy=policy, dim_state=dim_state, dim_action=dim_action, **FLAGS.TRPO.as_dict())
advtask = ADVTASK(dim_state, dim_action, policy, vfn, warmup_policy, warmup_vfn, task, alpha=alpha, beta=beta, nsample=nsample, atype=atype)
tf.get_default_session().run(tf.global_variables_initializer())
print ("norm params:", normalizers_parameters)
print ("norm_copy params:", normalizers_copy_parameters)
norm_before = tf.get_default_session().run(normalizers_parameters)
print ("norm_before:", norm_before)
assert FLAGS.algorithm != 'MF', "don't support model free for now"
print (f"n_envs for task: {nsample}//{FLAGS.plan.max_steps}={nsample//FLAGS.plan.max_steps}")
runners = {
'test': make_real_runner(FLAGS.plan.n_envs, task_config=task),
'collect': make_real_runner(FLAGS.plan.n_envs, task_config=task), #1
'collect_copy': make_real_runner(nsample//FLAGS.plan.max_steps, task_config=task), #1
'dev': make_real_runner(FLAGS.plan.n_envs, task_config=task),
'train': make_real_runner(FLAGS.plan.n_envs, task_config=task) if FLAGS.algorithm == 'MF' else virt_runner,
'train_copy': make_real_runner(nsample//FLAGS.plan.max_steps, task_config=task) if FLAGS.algorithm == 'MF' else virt_runner_copy,
'warmup_train': make_real_runner(FLAGS.plan.n_envs, task_config=task) if FLAGS.algorithm == 'MF' else warmup_virt_runner,
}
for name, runner in extra_runners.items():
runners[name] = runner
print ("runner name is ", name)
settings = [(runners['test'], policy, 'Real Env'), (runners['train'], policy, 'Virt Env')]
for (i, runner) in enumerate(shadow_runners):
settings.append((runner, policy, f'Shadow Env-{i}'))
saver = nn.ModuleDict({'policy': policy, 'model': model, 'vfn': vfn, 'normalizers': normalizers}) #, 'loss_mod': loss_mod})
print(saver)
max_ent_coef = FLAGS.TRPO.ent_coef
skip_metrics = []
TASK_NUM = 0
if test:
verbose = True
else:
task.init()
print (f"task.params_={task.params_}, task.init_goal_vel={task.goal_velocity}")
if test:
ITERS = testnum + 1
warmup_n_iters = warmniter
warmup_n_policy_iters = piter
warmup_n_model_iters = miter
slbo_n_iters = slboniter
slbo_n_policy_iters = piter
slbo_n_model_iters = miter
else:
ITERS = FLAGS.task.n_iters
warmup_n_iters = warmniter
warmup_n_policy_iters = piter
warmup_n_model_iters = miter
slbo_n_iters = slboniter
slbo_n_policy_iters = piter
slbo_n_model_iters = miter
print (f"Total Iters = {ITERS}")
alltaskres = []
generated_adversarial_task = []
init_generator = False
logger.info(f'inittask:{inittask}')
if not test:
if inittask == 'none':
pass
elif not (os.path.exists(f'./{inittask}/{taskname}.trainset.task0.slbo0.pkl') and os.path.exists(f'./{inittask}/{taskname}.task0.saver.npy')):
init_generator = True
else:
logger.info('Load the first task dataset!')
for i in range(20):
if not os.path.exists(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl'): continue
traindata = pickle.load(open(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl', 'rb'))
add_multi_step(traindata, train_set)
add_multi_step(traindata, task_train_sets[0])
logger.info(f'load trainset-{i} {len(traindata)}')
for i in range(20):
if not os.path.exists(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl'): continue
devdata = pickle.load(open(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl', 'rb'))
add_multi_step(devdata, task_dev_sets[0])
logger.info(f'load devset-{i} {len(devdata)}')
logger.info('Load the first task saver!')
saver.load_state_dict(np.load(f'./{inittask}/{taskname}.task0.saver.npy', allow_pickle=True)[()])
logger.info('Update all copies! (lazymodel, normalizers_copy)')
tf.get_default_session().run(sync_model_to_lazymodel)
tf.get_default_session().run(copy_normalizers)
logger.info('Loaded normalizers:')
load_norm = tf.get_default_session().run(normalizers_parameters)
logger.info(load_norm)
TASK_NUM = 1
########################## debug #########################
#for task_idx in range(TASK_NUM):
# total_loss = []
# for scan in range(100):
# samples = task_train_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
# loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
# total_loss.append(loss_i.mean())
# total_loss = np.mean(total_loss)
# print ('loaded model train loss:', total_loss)
#for task_idx in range(TASK_NUM):
# total_loss = []
# for scan in range(100):
# samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
# loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
# total_loss.append(loss_i.mean())
# total_loss = np.mean(total_loss)
# print ('loaded model val loss:', total_loss)
##exit(0)
########################## debug #########################
else:
test_summary = {
'task':[],
'random':[],
'warmup':[],
'warmupprocess':[],
'slbo':[],
}
logger.info('Testing mode!')
train_tasknum = snapshot + 1
test_tasknum = testnum
logger.info(f'train_tasknum = {train_tasknum}, test_tasknum = {test_tasknum}')
assert(testgiven is not None)
if 'noent' in testparam: warmupent = 0.
have_data = False
task_generator = 'fixed' # random or fixed
if testgiven[-4:] == '.pkl':
f = testgiven
logger.info(f'Load all tasks from {f}!')
task.fixed_velocities = pickle.load(open(f, 'rb'))
logger.info(f"Test on task")
logger.info(task.fixed_velocities)
logger.info(f"Task number: {np.array(task.fixed_velocities).shape}")
else:
f = f'{testgiven}/all_task_parameter.pkl'
gen_adv_task = pickle.load(open(f, 'rb'))
logger.info(f'Load all adversarial task from {f}!')
task.fixed_velocities = gen_adv_task[train_tasknum: train_tasknum + test_tasknum]
logger.info(f"Test random method on task {train_tasknum}~{train_tasknum+test_tasknum}:")
logger.info(task.fixed_velocities)
logger.info(f"Task number: {np.array(task.fixed_velocities).shape}")
def load_data_during_test():
if inittask != 'none':
logger.info('Load the first task dataset!')
for i in range(20):
if not os.path.exists(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl'): continue
traindata = pickle.load(open(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl', 'rb'))
add_multi_step(traindata, train_set)
add_multi_step(traindata, task_train_sets[0])
logger.info(f'load task0 trainset{i} size={len(traindata)}')
have_data = True
for i in range(20):
if not os.path.exists(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl'): continue
devdata = pickle.load(open(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl', 'rb'))
add_multi_step(devdata, task_dev_sets[0])
logger.info(f'load task0 devset{i} size={len(devdata)}')
have_data = True
logger.info(f'Load all task dataset from {setting}!')
for t in range(0,train_tasknum):
for i in range(20):
if not os.path.exists(f'./{setting}/{taskname}.trainset.task{t}.slbo{i}.pkl'): continue
traindata = pickle.load(open(f'./{setting}/{taskname}.trainset.task{t}.slbo{i}.pkl', 'rb'))
add_multi_step(traindata, train_set)
add_multi_step(traindata, task_train_sets[t])
logger.info(f'load task{t} trainset{i} size={len(traindata)}')
if not os.path.exists(f'./{setting}/{taskname}.devset.task{t}.slbo{i}.pkl'): continue
devdata = pickle.load(open(f'./{setting}/{taskname}.devset.task{t}.slbo{i}.pkl', 'rb'))
add_multi_step(devdata, task_dev_sets[t])
logger.info(f'load task{t} devset{i} size={len(devdata)}')
have_data = True
load_data_during_test()
logger.info(f'Load the task{snapshot} saver!')
saver.load_state_dict(np.load(f'./{setting}/{taskname}.task{snapshot}.saver.npy', allow_pickle=True)[()])
logger.info('Update all copies! (lazymodel, normalizers_copy)')
tf.get_default_session().run(sync_model_to_lazymodel)
tf.get_default_session().run(copy_normalizers)
logger.info('Loaded normalizers:')
load_norm = tf.get_default_session().run(normalizers_parameters)
logger.info(load_norm)
TASK_NUM = train_tasknum
TEST_TASK_NUM = 0
########################## debug #########################
#if have_data:
# for task_idx in range(TASK_NUM):
# total_loss = []
# for scan in range(100):
# samples = task_train_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
# loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
# total_loss.append(loss_i.mean())
# total_loss = np.mean(total_loss)
# print ('loaded model train loss:', total_loss)
# for task_idx in range(TASK_NUM):
# total_loss = []
# for scan in range(100):
# samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
# loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
# total_loss.append(loss_i.mean())
# total_loss = np.mean(total_loss)
# print ('loaded model val loss:', total_loss)
##exit(0)
######################### debug #########################
slbo_n_stages = nslbo
print (f"each task will do nslbo = {nslbo}")
for param in model.parameters():
param.invalidate()
all_task_parameter = []
while (not test and TASK_NUM < ITERS) or (test and TEST_TASK_NUM < ITERS):
# first task or maxstep, update the model. Otherwise, revert the model
logger.info('Sync model from lazymodel')
tf.get_default_session().run(sync_model_from_lazymodel)
taskres = {}
if 'goal_velocity' not in taskres.keys():
taskres['goal_velocity'] = []
if not test and inittask == 'none':
slbo_n_stages = nslbo
elif not test and TASK_NUM == 0:
slbo_n_stages = initnslbo
elif not test and TASK_NUM > 0:
slbo_n_stages = nslbo
time_start = time.time()
trpo_warmup = []
trpo_slbo = []
surprisal = []
train_losses_warmup = deque(maxlen=warmup_n_model_iters // FLAGS.model.validation_freq)
train_losses_slbo = deque(maxlen=slbo_n_model_iters // FLAGS.model.validation_freq)
val_losses_warmup = deque(maxlen=warmup_n_model_iters // FLAGS.model.validation_freq)
val_losses_slbo = deque(maxlen=slbo_n_model_iters // FLAGS.model.validation_freq)
# NOTE: For each test task, we should reset model to the loaded one, and randomly initialize policy and vfn
#if test:
# saver.load_state_dict(np.load(model_load, allow_pickle=True)[()])
# logger.warning('Load model from %s', model_load)
if test:
logger.info("################################################## TESTING TASK %d ################################################", TEST_TASK_NUM)
logger.info(f'TEST_TASK_NUM={TEST_TASK_NUM}, TASK_NUM={TASK_NUM}')
logger.warning('Revert model and normalizers')
tf.get_default_session().run(sync_model_from_lazymodel)
tf.get_default_session().run(revert_normalizers)
else:
logger.info("################################################## TRAINING TASK %d ################################################", TASK_NUM)
if test:
test_returns = []
test_summary['warmupprocess'].append([])
test_summary['slbo'].append([])
if not test: #and FLAGS.task.method == 'random':
if inittask != 'none' and TASK_NUM == 1:
if 'HClinearstate' in taskname:
task.init([0.2] * task.n_params)
else:
task.init([0.] * task.n_params)
else:
if TASK_NUM > 0: #fix the 1st tasks during training
if adv == 0:
task.random_sample('uniform')
elif adv == 2:
task.random_sample('normal')
elif adv == 1:
if TASK_NUM == 1 and inittask != 'none':
task.random_sample()
print (f"task.params_={task.params_}, task.init_goal_vel={task.goal_velocity}")
task.sample(adv=True)
logger.info('Task Sampled: %s', task.goal_velocity)
taskres['goal_velocity'].append(task.goal_velocity)
all_task_parameter.append(task.goal_velocity)
print (f"task.params_={task.params_}, task.init_goal_vel={task.goal_velocity}")
if test:
if task_generator == 'fixed':
task.goal_velocity = task.fixed_velocities[TEST_TASK_NUM] #TODO
logger.info('Task Fixed: %s', task.goal_velocity)
if task_generator == 'random':
task.sample(adv=False) #sample randomly
logger.info('Task Sampled: %s', task.goal_velocity)
if task_generator == 'adv':
task.sample(adv=True) #sample adversarially
logger.info('Task Sampled: %s', task.goal_velocity)
generated_adversarial_task.append(task.goal_velocity)
logger.info('Tasks dump!')
assert (task_generator == 'fixed')
test_summary['task'].append(task.goal_velocity)
if FLAGS.task.reset_policy:
# NOTE: reset policy and valuefunc
logger.info("Resetting Policy")
pol_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())])
tf.get_default_session().run(tf.variables_initializer(policy.parameters()))
pol_params_after = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())])
print ("pol_params:", np.linalg.norm(pol_params), "pol_params_after_reset:", np.linalg.norm(pol_params_after))
logger.info("Resetting Valuefunc")
tf.get_default_session().run(tf.variables_initializer(vfn.parameters()))
tf.get_default_session().run(tf.variables_initializer(warmup_policy.parameters()))
tf.get_default_session().run(tf.variables_initializer(warmup_vfn.parameters()))
for p in warmup_policy.parameters(): p.invalidate()
for p in warmup_vfn.parameters(): p.invalidate()
for p in policy.parameters(): p.invalidate()
for p in vfn.parameters(): p.invalidate()
last_end = None
drops = []
evaluate(settings, 'pre-warm-up')
returns_pre_warmup = testeval(policy, runners['collect'])
if test:
test_returns.append(returns_pre_warmup)
test_summary['random'].append(returns_pre_warmup)
t1 = time.time()
trpo_time = 0
logger.info('----------------------------- Warmup for %d iterations ------------------------' % warmup_n_iters)
if decay == 'joint':
logger.info('Joint train from a joint dataset')
elif decay == 'taskid':
Z = np.sum([float(i+1) for i in range(0, TASK_NUM)])
prop = [float(taskid+1) / Z for taskid in range(TASK_NUM)]
logger.info(f'Sampling prop={prop}, Z={Z}')
elif decay == 'none':
Z = TASK_NUM
prop = [1. / TASK_NUM for _ in range(TASK_NUM)]
logger.info(f'Sampling prop={prop}, Z={Z}')
for i in range(warmup_n_iters):
#exit(0)
if TASK_NUM == 0 and not test and not model_load:
logger.info('Break because TASK_NUM=0')
break
losses = deque(maxlen=warmup_n_model_iters)
grad_norm_meter = AverageMeter()
n_model_iters = warmup_n_model_iters
drop_plot = 0
if test and verbose:
logger.info(f'warmup iter #{i}/{warmup_n_iters}, Do Not train Model during warmup of test time')
if 'warmup_task_val_loss' not in taskres.keys():
taskres['warmup_task_val_loss'] = [[] for _ in range(TASK_NUM)]
if verbose: logger.info('Train Model for %d iterations' % n_model_iters)
model_time = time.time()
if not test or (test and have_data):
for _ in range(n_model_iters):
if decay == 'joint':
samples = train_set.sample_multi_step(FLAGS.model.train_batch_size, 1, FLAGS.model.multi_step)
else:
all_samples = []
for taskid in range(TASK_NUM):
samples_i = task_train_sets[taskid].sample_multi_step(int(FLAGS.model.train_batch_size*prop[taskid])+1, 1, FLAGS.model.multi_step)
all_samples.append(samples_i)
samples = np.concatenate(all_samples, axis=1).view(np.recarray)
_, train_loss, grad_norm = loss_mod.get_loss(
samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout,
fetch='train loss grad_norm')
losses.append(train_loss.mean())
grad_norm_meter.update(grad_norm)
# ideally, we should define an Optimizer class, which takes parameters as inputs.
# The `update` method of `Optimizer` will invalidate all parameters during updates.
for param in model.parameters():
param.invalidate()
model_time = time.time() - model_time
if i % FLAGS.model.validation_freq == 0:
task_val_loss = []
val_time = time.time()
for task_idx in range(TASK_NUM):
total_loss = []
for scan in range(FLAGS.rollout.n_dev_samples // FLAGS.model.dev_batch_size + 1):
samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
total_loss.append(loss_i.mean())
total_loss = np.mean(total_loss)
task_val_loss.append(total_loss)
taskres['warmup_task_val_loss'][task_idx].append(total_loss)
val_time = time.time() - val_time
val_loss = np.mean(task_val_loss)
val_losses_warmup.append(val_loss)
train_losses_warmup.append(np.mean(losses))
if np.isnan(val_loss) or np.isnan(np.mean(losses)):
logger.info('nan! %s %s', np.isnan(val_loss), np.isnan(np.mean(losses)))
logger.info('# Warmup Iter %3d: Loss = [train = %.3f, dev = %.3f], after %d steps, grad_norm = %.6f, drop = %.2f, model_time=%d, trpo_time=%d, val_time=%d',
i, np.mean(losses), val_loss, n_model_iters, grad_norm_meter.get(), drop_plot, model_time, trpo_time, val_time)
logger.info(f'# task_val_loss: {task_val_loss}')
if verbose: logger.info('Train policy for %d iterations' % warmup_n_policy_iters)
trpo_time = time.time()
for n_updates in range(warmup_n_policy_iters):
if FLAGS.algorithm != 'MF' and FLAGS.warmup.start == 'buffer':
runners['train'].set_state(train_set.sample(FLAGS.plan.n_envs).state)
else:
runners['train'].reset()
data, ep_infos = runners['train'].run(policy, FLAGS.plan.n_trpo_samples)
advantages, advantages_params, values, td, coef_mat, coef_mat_returns, reward_ctrl, x_velocity, begin_mark = runners['train'].compute_advantage(vfn, data,task)
dist_mean, dist_std, vf_loss, plotinfo = algo.train(warmupent, data, advantages, values)
trpo_warmup.append(plotinfo)
returns = [info['return'] for info in ep_infos]
if n_updates == 0:
if last_end is not None:
drop_plot = last_end - np.mean(returns)
drops.append(last_end - np.mean(returns))
last_end = np.mean(returns)
if n_updates == warmup_n_policy_iters-1:
logger.info('[TRPO] # %d: n_episodes = %d, returns: {mean = %.0f, std = %.0f}, '
'dist std = %.10f, dist mean = %.10f, vf_loss = %.3f',
n_updates, len(returns), np.mean(returns), np.std(returns) / np.sqrt(len(returns)),
dist_std, dist_mean, vf_loss)
trpo_time = time.time() - trpo_time
if i % FLAGS.warmup.n_evaluate_iters == 0 or i == warmup_n_iters-1:# and i != 0:
real_eval, virt_eval = evaluate(settings, 'iteration')
if 'warmup_real_eval' not in taskres.keys(): taskres['warmup_real_eval'] = []
if 'warmup_virt_eval' not in taskres.keys(): taskres['warmup_virt_eval'] = []
taskres['warmup_real_eval'].append(real_eval)
taskres['warmup_virt_eval'].append(virt_eval)
if test:
test_summary['warmupprocess'][TEST_TASK_NUM].append(real_eval)
if not test:
res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f"./{setting}/{taskname}-task{TASK_NUM}-warmup/", force=True, video_callable=lambda episode_id: True), policy)
else:
res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f"./{setting}/{taskname}-testtask{TEST_TASK_NUM}-warm{warmup_n_iters}-warmup/", force=True, video_callable=lambda episode_id: True), policy)
taskres['warmup_monitor'] = [res]
t2 = time.time()
warmup_time = t2 - t1
evaluate(settings, 'post-warm-up')
returns_post_warmup = testeval(policy, runners['collect'])
if test:
test_returns.append(returns_post_warmup)
test_summary['warmup'].append(returns_post_warmup)
print ("warmupprocess:", test_summary['warmupprocess'][TEST_TASK_NUM])
logger.info('Sync warmup policy and vfn and model')
tf.get_default_session().run([sync_warmup_policy, sync_warmup_vfn, sync_warmup_model])
for p in warmup_policy.parameters(): p.invalidate()
for p in warmup_vfn.parameters(): p.invalidate()
for p in warmup_model.parameters(): p.invalidate()
for p in policy.parameters(): p.invalidate()
task.parameters().invalidate()
pol_params, warm_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters()), nn.utils.parameters_to_vector(warmup_policy.parameters())])
print ("After WARMUP, pol_params_norm:", np.linalg.norm(pol_params), "warm_params_norm:", np.linalg.norm(warm_params))
mod, warm_mod = tf.get_default_session().run([nn.utils.parameters_to_vector(model.parameters()), nn.utils.parameters_to_vector(warmup_model.parameters())])
print ("mod_norm:", np.linalg.norm(mod), "warm_mod_norm:", np.linalg.norm(warm_mod))
eval_rollout(runners['train'], warmup_policy, 'Use warmup policy to collect data from virtual env')
warmup_collect_virt = []
eval_rollout(runners['train'], policy, 'Use policy to collect data from virtual env')
warmup_collect_real = []
logger.info('--------------------------------------------- SLBO for %d outer stages -----------------------------------------' % slbo_n_stages)
for T in range(slbo_n_stages):
logger.info('-------- Starting Stage %d ---------', T)
evaluate(settings, 'episode')
# collect data
if not test:
logger.info('-------- Collect data from REAL env for %d samples --------' % FLAGS.rollout.n_train_samples)
recent_train_set, ep_infos = runners['collect'].run(noise.make(policy), FLAGS.rollout.n_train_samples)
recent_dev_set, _ = runners['dev'].run(noise.make(policy), FLAGS.rollout.n_dev_samples)
else:
logger.info('-------- Collect data from REAL env for %d samples --------' % 2000)
recent_train_set, ep_infos = runners['collect2000'].run(noise.make(policy), 2000)
recent_dev_set, _ = runners['dev'].run(noise.make(policy), FLAGS.rollout.n_dev_samples)
logger.info('save setting dataset! trainset and devset!')
if not test:
pickle.dump(recent_train_set, open(f'./{setting}/{taskname}.trainset.task{TASK_NUM}.slbo{T}.pkl', 'wb'))
pickle.dump(recent_dev_set, open(f'./{setting}/{taskname}.devset.task{TASK_NUM}.slbo{T}.pkl', 'wb'))
# Add real data to task_train_sets and task_dev_sets
#if not test:
# add_multi_step(recent_train_set, train_set)
add_multi_step(recent_train_set, task_train_sets[TASK_NUM])
add_multi_step(recent_dev_set, task_dev_sets[TASK_NUM])
#if not test:
# states = recent_train_set.state
# mean = np.mean(states, axis=0)
# std = np.std(states, axis=0)
# min_ = np.min(states, axis=0)
# max_ = np.max(states, axis=0)
# states_stat = {"mean": mean, "std": std, "min": min_, "max": max_}
# evaluate the surprisal of collected real data for model
new_set = Dataset(dtype, FLAGS.rollout.max_buf_size)
add_multi_step(recent_train_set, new_set)
losses_new = []
for i in range(FLAGS.rollout.n_train_samples // FLAGS.model.dev_batch_size + 1):
samples = new_set.sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
loss = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
loss = loss.mean()
losses_new.append(loss)
losses_new_mean = np.mean(losses_new)
surprisal.append(losses_new_mean)
logger.info(f'(surprisal) model loss on new collected data is {losses_new_mean}')
add_multi_step(recent_train_set, train_set)
add_multi_step(
runners['dev'].run(noise.make(policy), FLAGS.rollout.n_dev_samples)[0],
dev_set,
)
returns = np.array([ep_info['return'] for ep_info in ep_infos])
if len(returns) > 0:
logger.info("episode: %s", np.mean(returns))
if T == 0: # check
samples = train_set.sample_multi_step(100, 1, FLAGS.model.multi_step)
for i in range(FLAGS.model.multi_step - 1):
masks = 1 - (samples.done[i] | samples.timeout[i])[..., np.newaxis]
assert np.allclose(samples.state[i + 1] * masks, samples.next_state[i] * masks)
normalizers.state.update(recent_train_set.state)
normalizers.action.update(recent_train_set.action)
normalizers.diff.update(recent_train_set.next_state - recent_train_set.state)
if TASK_NUM == 0: #In the 1st task, no warmup, but we validate loss of the random model
samples = dev_set.sample_multi_step(FLAGS.model.train_batch_size, 1, FLAGS.model.multi_step)
loss = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
loss = loss.mean()
val_losses_warmup.append(loss)
logger.info('SLBO for %d inner stages' % slbo_n_iters)
model_time, trpo_time = 0, 0
if 'slbo_task_val_loss' not in taskres.keys():
taskres['slbo_task_val_loss'] = [[] for _ in range(TASK_NUM+1)]
if decay == 'joint':
logger.info('Joint train from a joint dataset')
elif decay == 'taskid':
Z = np.sum([float(i+1) for i in range(0, TASK_NUM+1)])
prop = [float(taskid+1) / Z for taskid in range(TASK_NUM+1)]
logger.info(f'Sampling prop={prop}, Z={Z}')
elif decay == 'none':
Z = TASK_NUM+1
prop = [1. / float(Z) for _ in range(Z)]
logger.info(f'Sampling prop={prop}, Z={Z}')
for i in range(slbo_n_iters):
if i % FLAGS.slbo.n_evaluate_iters == 0 or i == slbo_n_iters-1:# and i != 0:
# cur_actions = policy.eval('actions_mean actions_std', states=recent_states)
# kl_old_new = gaussian_kl(*ref_actions, *cur_actions).sum(axis=1).mean()
# logger.info('KL(old || cur) = %.6f', kl_old_new)
real_eval, virt_eval = evaluate(settings, 'iteration')
if 'slbo_real_eval' not in taskres.keys(): taskres['slbo_real_eval'] = []
if 'slbo_virt_eval' not in taskres.keys(): taskres['slbo_virt_eval'] = []
taskres['slbo_real_eval'].append(real_eval)
taskres['slbo_virt_eval'].append(virt_eval)
losses = deque(maxlen=slbo_n_model_iters)
grad_norm_meter = AverageMeter()
n_model_iters = slbo_n_model_iters
if verbose: logger.info('Train model %d iterations'% n_model_iters)
model_time = time.time()
for _ in range(n_model_iters):
if decay == 'joint':
samples = train_set.sample_multi_step(FLAGS.model.train_batch_size, 1, FLAGS.model.multi_step)
else:
all_samples = []
sample_size = 0
for taskid in range(TASK_NUM+1):
samples_i = task_train_sets[taskid].sample_multi_step(int(FLAGS.model.train_batch_size*prop[taskid])+1, 1, FLAGS.model.multi_step)
all_samples.append(samples_i)
sample_size += int(FLAGS.model.train_batch_size*prop[taskid])+1
samples = np.concatenate(all_samples, axis=1).view(np.recarray)
_, train_loss, grad_norm = loss_mod.get_loss(
samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout,
fetch='train loss grad_norm')
losses.append(train_loss.mean())
grad_norm_meter.update(grad_norm)
# ideally, we should define an Optimizer class, which takes parameters as inputs.
# The `update` method of `Optimizer` will invalidate all parameters during updates.
for param in model.parameters():
param.invalidate()
model_time = time.time() - model_time
if i % FLAGS.model.validation_freq == 0:
task_val_loss = []
val_time = time.time()
for task_idx in range(TASK_NUM+1):
total_loss = []
for scan in range(FLAGS.rollout.n_dev_samples // FLAGS.model.dev_batch_size + 1):
samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
total_loss.append(loss_i.mean())
total_loss = np.mean(total_loss)
task_val_loss.append(total_loss)
taskres['slbo_task_val_loss'][task_idx].append(total_loss)
val_loss = np.mean(task_val_loss)
val_time = time.time() - val_time
if np.isnan(val_loss) or np.isnan(np.mean(losses)):
logger.info('nan! %s %s', np.isnan(val_loss), np.isnan(np.mean(losses)))
logger.info('# SLBO Inner Iter %3d: Loss = [train = %.3f, dev = %.3f], after %d steps, grad_norm = %.6f, model_time=%d, trpo_time=%d, val_time=%d',
i, np.mean(losses), val_loss, n_model_iters, grad_norm_meter.get(), model_time, trpo_time, val_time)
logger.info(f'# task_val_loss: {task_val_loss}')
model_time, trpo_time = 0, 0
val_losses_slbo.append(val_loss)
train_losses_slbo.append(np.mean(losses))
if verbose: logger.info('Train policy %d iterations'% slbo_n_policy_iters)
trpo_time = time.time()
for n_updates in range(slbo_n_policy_iters):
if FLAGS.algorithm != 'MF' and FLAGS.slbo.start == 'buffer':
runners['train'].set_state(train_set.sample(FLAGS.plan.n_envs).state)
else:
runners['train'].reset()
data, ep_infos = runners['train'].run(policy, FLAGS.plan.n_trpo_samples)
advantages, advantages_params, values, td, coef_mat, coef_mat_returns, reward_ctrl, x_velocity, begin_mark = runners['train'].compute_advantage(vfn, data, task)
dist_mean, dist_std, vf_loss, plotinfo = algo.train(max_ent_coef, data, advantages, values)
trpo_slbo.append(plotinfo)
returns = [info['return'] for info in ep_infos]
if n_updates == slbo_n_policy_iters-1:
logger.info('[TRPO] # %d: n_episodes = %d, returns: {mean = %.0f, std = %.0f}, '
'dist std = %.10f, dist mean = %.10f, vf_loss = %.3f',
n_updates, len(returns), np.mean(returns), np.std(returns) / np.sqrt(len(returns)),
dist_std, dist_mean, vf_loss)
trpo_time = time.time() - trpo_time
if not test and (TASK_NUM) % FLAGS.ckpt.n_save_stages == 0:
np.save(f'{FLAGS.log_dir}/{taskname}-stage-{TASK_NUM}', saver.state_dict())
np.save(f'{FLAGS.log_dir}/{taskname}-final', saver.state_dict())
res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f"./{setting}/{taskname}-task{TASK_NUM}-slbo{T}/", force=True, video_callable=lambda episode_id: True), policy)
if 'slbo_monitor' not in taskres.keys():
taskres['slbo_monitor'] = []
taskres['slbo_monitor'].append(res)
if not test and FLAGS.ckpt.n_save_stages == 1:
pickle.dump(recent_train_set, open(f'{FLAGS.log_dir}/stage-{TASK_NUM}.inc-buf.pkl', 'wb'))
if test:
returns_post_slbo_update = testeval(policy, runners['collect'])
test_returns.append(returns_post_slbo_update)
real_eval, virt_eval = evaluate(settings, 'iteration')
test_summary['slbo'][TEST_TASK_NUM].append(real_eval)
test_summary[f'slbo{T+1}'].append(returns_post_slbo_update)
res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f"./{setting}/{taskname}-testtask{TEST_TASK_NUM}-slbo{T}/", force=True, video_callable=lambda episode_id: True), policy)
print ('test_summary_slbo:', test_summary['slbo'][TEST_TASK_NUM])
if not test:
np.save(f'{setting}/{taskname}.task{TASK_NUM}.saver', saver.state_dict())
np.save(f'{setting}/{taskname}.final.saver', saver.state_dict())
if init_generator and TASK_NUM==0:
print ('finished init generator!')
exit(0)
pol_params, warm_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters()), nn.utils.parameters_to_vector(warmup_policy.parameters())])
print ("After SLBO, pol_params_norm:", np.linalg.norm(pol_params), "warm_params_norm:", np.linalg.norm(warm_params))
eval_rollout(runners['train'], policy, 'Use optimal policy to collect data from real env')
optimal_collect_real = []
t3 = time.time()
slbo_time = t3 - t2
evaluate(settings, 'post-slbo')
logger.info(f'Warmup time = {warmup_time}, SLBO time = {slbo_time}')
alltaskres.append(taskres)
if not test:
pickle.dump(alltaskres, open(f'{setting}/{taskname}-alltaskres.info.pkl', 'wb'))
pickle.dump(all_task_parameter, open(f'{setting}/all_task_parameter.pkl', 'wb'))
else:
pickle.dump(alltaskres, open(f'{setting}/{taskname}-alltaskres.info.pkl.{testparam}', 'wb'))
pickle.dump(all_task_parameter, open(f'{setting}/all_task_parameter.pkl.{testparam}', 'wb'))
eval_rollout(runners['train'], warmup_policy, 'Use warmup policy to collect data from virtual env')
if not test:
#if TASK_NUM > 0:
if TASK_NUM > -1:
task_params_before, final_grad, advtask_info = advtask.train(runners['train_copy'], runners['collect_copy'], warmup_collect_virt, warmup_collect_real, optimal_collect_real, returns_pre_warmup, val_losses_warmup, val_losses_slbo, train_losses_warmup, train_losses_slbo, surprisal, trpo_warmup, trpo_slbo, fout, infofilename, extra_runners)
# first task or maxstep, update the model
if not test and (TASK_NUM == 0 or TASK_NUM % maxstep == 0):
logger.info(f"task_num={TASK_NUM}, sync_model_to_lazymodel")
tf.get_default_session().run(sync_model_to_lazymodel)
if test:
pickle.dump(test_summary, open(f'{setting}/test_summary.pkl.{testparam}', 'wb'))
TEST_TASK_NUM += 1
TASK_NUM = train_tasknum
#task_train_sets[TASK_NUM].clear()
#task_dev_sets[TASK_NUM].clear()
for tt in range(TASK_NUM+1):
task_train_sets[tt].clear()
task_dev_sets[tt].clear()
train_set.clear()
load_data_during_test()
continue
task_params_after = task_params_before + final_grad * alpha
task.set_parameters(task_params_after)
if not test:
advtask_info['alpha'].append(alpha)
with open(infofilename, 'wb') as handle:
pickle.dump(advtask_info, handle, protocol=pickle.HIGHEST_PROTOCOL)
print ('>>>>>>dump')
TASK_NUM += 1
time_end = time.time()
print (f"Task Done! Total Time Consumed for 1 task = {time_end - time_start}s")
if __name__ == '__main__':
with tf.Session(config=get_tf_config()):
main()
| 54.30547 | 354 | 0.609137 | import sys
sys.path = ['./rllab/'] + sys.path
print (sys.path)
import pickle
import os,time
from collections import deque
import tensorflow as tf
import numpy as np
import lunzi.nn as nn
from lunzi.Logger import logger
from slbo.utils.average_meter import AverageMeter
from slbo.utils.flags import FLAGS
from slbo.utils.dataset import Dataset, gen_dtype
from slbo.utils.OU_noise import OUNoise
from slbo.utils.normalizer import Normalizers
from slbo.utils.tf_utils import get_tf_config
from slbo.utils.runner import Runner
from slbo.policies.gaussian_mlp_policy import GaussianMLPPolicy
from slbo.envs.virtual_env import VirtualEnv
from slbo.dynamics_model import DynamicsModel
from slbo.v_function.mlp_v_function import MLPVFunction
from slbo.partial_envs import make_env, make_task
from slbo.loss.multi_step_loss import MultiStepLoss
from slbo.algos.TRPO import TRPO
from slbo.algos.ADVTASK import ADVTASK
from slbo.utils.tf_utils import initialize_uninitialized
import click
from gym.wrappers.monitor import Monitor
import gym
import scipy.misc
import scipy.ndimage
def render(env_, policy=None):
logger.info('start render video...')
observation = env_.reset()
imgs = []
return_ = 0.
cnt_ = 0
obs = []
for t in range(200):
cnt_ += 1
observation = observation.reshape(1, -1)
obs.append(observation)
if policy is not None:
action = policy.get_actions(observation)
observation, reward, done, info = env_.step(action[0])
if done: break
return_ += reward
else:
action = env_.action_space.sample()
observation, reward, done, info = env_.step(action)
if done: break
return_ += reward
logger.info (f"render {cnt_} steps, return = {return_:.6f}")
res = {'obs': obs, 'return': return_}
return res
def eval_rollout(runner, p, des):
logger.info(des)
runner.reset()
data, ep_infos = runner.run(p, FLAGS.plan.n_trpo_samples)
logp = p(data.state).log_prob(data.action).reduce_sum(axis=1).reduce_mean()
logp = tf.get_default_session().run(logp)
print ("state_mean:", np.mean(data.state))
print ("action_mean:", np.mean(data.action))
print ("warmup_logpac_mean:", logp)
def testeval(policy, runner):
runner.reset()
_, ep_infos = runner.run(policy, FLAGS.rollout.n_test_samples)
returns = [info['return'] for info in ep_infos]
returns = np.mean(returns)
return returns
def evaluate(settings, tag):
res = {}
for runner, policy, name in settings:
runner.reset()
_, ep_infos = runner.run(policy, FLAGS.rollout.n_test_samples)
returns = np.array([ep_info['return'] for ep_info in ep_infos])
res[name] = np.mean(returns)
logger.info('Tag = %s, Reward on %s (%d episodes): mean = %.6f, std = %.6f', tag, name,
len(returns), np.mean(returns), np.std(returns))
return res['Real Env'], res['Virt Env']
def add_multi_step(src: Dataset, dst: Dataset):
n_envs = 1
dst.extend(src[:-n_envs])
ending = src[-n_envs:].copy()
ending.timeout = True
dst.extend(ending)
def make_real_runner(n_envs, task_config=None):
from slbo.envs.batched_env import BatchedEnv
batched_env = BatchedEnv([make_env(FLAGS.env.id, task_config=task_config) for _ in range(n_envs)])
return Runner(batched_env, rescale_action=True, **FLAGS.runner.as_dict())
@click.command()
@click.option('--setting', default='default')
@click.option('--adv', default=1)
@click.option('--gpu', default=0)
@click.option('--debug', is_flag=True, default=False)
@click.option('--taskname', default='Ant2D')
@click.option('--verbose', is_flag=True, default=False)
@click.option('--test', is_flag=True, default=False)
@click.option('--warmupent', default=0.005)
@click.option('--alpha', default=1.0)
@click.option('--beta', default=1.0)
@click.option('--snapshot', default=1)
@click.option('--testadv', default=0)
@click.option('--seed', default=1)
@click.option('--nsample', default=10000)
@click.option('--fixedvel', default=None)
@click.option('--initnslbo', default=20)
@click.option('--nslbo', default=3)
@click.option('--warmniter', default=40)
@click.option('--slboniter', default=20)
@click.option('--piter', default=20)
@click.option('--miter', default=100)
@click.option('--atype', default='gae')
@click.option('--video', is_flag=True, default=False)
@click.option('--maxstep', default=1)
@click.option('--genadvstrategy', default=None)
@click.option('--inittask', default='none')
@click.option('--decay', default='joint')
@click.option('--testgiven', default=None)
@click.option('--testnum', default=1)
@click.option('--testparam', default='')
def main(setting, adv, gpu, debug, taskname, verbose, test, warmupent, alpha, beta, snapshot, testadv, seed, nsample, fixedvel, initnslbo, nslbo, warmniter, slboniter, piter, miter, atype, video, maxstep, genadvstrategy, inittask, decay, testgiven, testnum, testparam):
print ('warmupent:', warmupent)
print ("seed:", seed)
setting = os.path.join('./data/', setting)
FLAGS.rollout.n_train_samples = 10000
FLAGS.rollout.n_dev_samples = 10000
FLAGS.rollout.n_test_samples = 10000
FLAGS.plan.n_trpo_samples = 10000
if taskname == 'HC':
FLAGS.env.id = 'HalfCheetahTask-v2'
elif taskname == 'HC2D':
FLAGS.env.id = 'HalfCheetah2D-v2'
elif taskname == 'HClinearstate':
FLAGS.env.id = 'HalfCheetahLinearState-v2'
elif taskname == 'HCgoalstate':
FLAGS.env.id = 'HalfCheetahGoalState-v2'
elif taskname == 'Hopper2D':
FLAGS.env.id = 'Hopper2D-v2'
elif taskname == 'Walker2D':
FLAGS.env.id = 'Walker2D-v2'
elif taskname == 'Ant3D':
FLAGS.env.id = 'Ant3DTask-v2'
elif taskname == 'Ant2D':
FLAGS.env.id = 'Ant2DTask-v2'
else:
raise Exception(f'Unsupported taskname: {taskname}')
if not os.path.isdir(setting):
os.makedirs(setting)
if not test:
filename = f'res_{taskname}_adv{adv}.txt'
infofilename = f'res_{taskname}_adv{adv}.npy'
filename = setting+'/'+filename
infofilename = setting+'/'+infofilename
fout = open(filename, 'w')
else:
maxstep = 100
logger.info(f'fixedvel={fixedvel}')
if testadv:
logger.info('Test with adversarial generated tasks!')
logger.info(f'testadv=1, maxstep={maxstep}, using model revert!')
else:
logger.info('We still do not consider this senario: test with random tasks')
print ('adv=', adv)
FLAGS.seed = seed
FLAGS.set_seed()
FLAGS.freeze()
print ("FLAGS.log_dir:", FLAGS.log_dir)
if test:
model_load = f'{FLAGS.log_dir}/{taskname}-stage-{snapshot}.npy'
else:
model_load = None
print ("model_load:", model_load)
task = make_task(FLAGS.env.id)
env = make_env(FLAGS.env.id, task_config=task)
dim_state = int(np.prod(env.observation_space.shape))
dim_action = int(np.prod(env.action_space.shape))
env.verify()
normalizers = Normalizers(dim_action=dim_action, dim_state=dim_state)
normalizers_copy = Normalizers(dim_action=dim_action, dim_state=dim_state)
normalizers_parameters = normalizers.parameters(trainable=False, non_trainable=True)
normalizers_copy_parameters = normalizers_copy.parameters(trainable=False, non_trainable=True)
copy_normalizers = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(normalizers_copy_parameters, normalizers_parameters)])
revert_normalizers = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(normalizers_parameters, normalizers_copy_parameters)])
dtype = gen_dtype(env, 'state action next_state reward done timeout')
train_set = Dataset(dtype, FLAGS.rollout.max_buf_size)
dev_set = Dataset(dtype, FLAGS.rollout.max_buf_size)
task_train_sets = [Dataset(dtype, FLAGS.rollout.max_buf_size) for i in range(100)]
task_dev_sets = [Dataset(dtype, FLAGS.rollout.max_buf_size) for i in range(100)]
print ("state and action dim:", dim_state, dim_action)
policy = GaussianMLPPolicy(dim_state, dim_action, normalizer=normalizers.state, **FLAGS.policy.as_dict())
warmup_policy = GaussianMLPPolicy(dim_state, dim_action, normalizer=normalizers.state, **FLAGS.policy.as_dict())
print (policy.parameters())
print (warmup_policy.parameters())
sync_warmup_policy = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_policy.parameters(), policy.parameters())])
noise = OUNoise(env.action_space, theta=FLAGS.OUNoise.theta, sigma=FLAGS.OUNoise.sigma, shape=(1, dim_action))
vfn = MLPVFunction(dim_state, [64, 64], normalizers.state)
warmup_vfn = MLPVFunction(dim_state, [64, 64], normalizers.state)
sync_warmup_vfn = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_vfn.parameters(), vfn.parameters())])
model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes)
lazy_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes)
warmup_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes)
sync_warmup_model = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_model.parameters(), model.parameters())])
shadow_models = [DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) for n in range(FLAGS.warmup.n_shadow_models)]
sync_model_from_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(model.parameters(), lazy_model.parameters())])
sync_model_to_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(lazy_model.parameters(), model.parameters())])
virt_env = VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model)
virt_runner = Runner(virt_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})
virt_env_copy = VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), nsample//FLAGS.plan.max_steps, opt_model=FLAGS.slbo.opt_model)
virt_runner_copy = Runner(virt_env_copy, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})
extra_runners = {}
for sam in [1000, 2000, 4000, 8000, 10000, 16000]:
extra_runners[f'train{sam}']= Runner(VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), sam//FLAGS.plan.max_steps, opt_model=FLAGS.slbo.opt_model), **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})
extra_runners[f'collect{sam}'] = make_real_runner(sam//FLAGS.plan.max_steps, task_config=task)
warmup_virt_env = VirtualEnv(warmup_model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model)
warmup_virt_runner = Runner(warmup_virt_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})
logger.info('FLAGS.plan.n_envs=%d' % FLAGS.plan.n_envs)
shadow_envs = [VirtualEnv(shadow_model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model) for shadow_model in shadow_models]
shadow_runners = [Runner(shadow_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps}) for shadow_env in shadow_envs]
criterion_map = {
'L1': nn.L1Loss(),
'L2': nn.L2Loss(),
'MSE': nn.MSELoss(),
}
criterion = criterion_map[FLAGS.model.loss]
loss_mod = MultiStepLoss(model, normalizers, dim_state, dim_action, criterion, FLAGS.model.multi_step)
loss_mod.build_backward(FLAGS.model.lr, FLAGS.model.weight_decay)
shadow_loss_mods = [MultiStepLoss(shadow_model, normalizers, dim_state, dim_action, criterion, FLAGS.model.multi_step) for shadow_model in shadow_models]
for shadow_loss_mod in shadow_loss_mods:
shadow_loss_mod.build_backward(FLAGS.model.lr, FLAGS.model.weight_decay)
algo = TRPO(vfn=vfn, policy=policy, dim_state=dim_state, dim_action=dim_action, **FLAGS.TRPO.as_dict())
advtask = ADVTASK(dim_state, dim_action, policy, vfn, warmup_policy, warmup_vfn, task, alpha=alpha, beta=beta, nsample=nsample, atype=atype)
tf.get_default_session().run(tf.global_variables_initializer())
print ("norm params:", normalizers_parameters)
print ("norm_copy params:", normalizers_copy_parameters)
norm_before = tf.get_default_session().run(normalizers_parameters)
print ("norm_before:", norm_before)
assert FLAGS.algorithm != 'MF', "don't support model free for now"
print (f"n_envs for task: {nsample}//{FLAGS.plan.max_steps}={nsample//FLAGS.plan.max_steps}")
runners = {
'test': make_real_runner(FLAGS.plan.n_envs, task_config=task),
'collect': make_real_runner(FLAGS.plan.n_envs, task_config=task), #1
'collect_copy': make_real_runner(nsample//FLAGS.plan.max_steps, task_config=task), #1
'dev': make_real_runner(FLAGS.plan.n_envs, task_config=task),
'train': make_real_runner(FLAGS.plan.n_envs, task_config=task) if FLAGS.algorithm == 'MF' else virt_runner,
'train_copy': make_real_runner(nsample//FLAGS.plan.max_steps, task_config=task) if FLAGS.algorithm == 'MF' else virt_runner_copy,
'warmup_train': make_real_runner(FLAGS.plan.n_envs, task_config=task) if FLAGS.algorithm == 'MF' else warmup_virt_runner,
}
for name, runner in extra_runners.items():
runners[name] = runner
print ("runner name is ", name)
settings = [(runners['test'], policy, 'Real Env'), (runners['train'], policy, 'Virt Env')]
for (i, runner) in enumerate(shadow_runners):
settings.append((runner, policy, f'Shadow Env-{i}'))
saver = nn.ModuleDict({'policy': policy, 'model': model, 'vfn': vfn, 'normalizers': normalizers}) #, 'loss_mod': loss_mod})
print(saver)
max_ent_coef = FLAGS.TRPO.ent_coef
skip_metrics = []
TASK_NUM = 0
if test:
verbose = True
else:
task.init()
print (f"task.params_={task.params_}, task.init_goal_vel={task.goal_velocity}")
if test:
ITERS = testnum + 1
warmup_n_iters = warmniter
warmup_n_policy_iters = piter
warmup_n_model_iters = miter
slbo_n_iters = slboniter
slbo_n_policy_iters = piter
slbo_n_model_iters = miter
else:
ITERS = FLAGS.task.n_iters
warmup_n_iters = warmniter
warmup_n_policy_iters = piter
warmup_n_model_iters = miter
slbo_n_iters = slboniter
slbo_n_policy_iters = piter
slbo_n_model_iters = miter
print (f"Total Iters = {ITERS}")
alltaskres = []
generated_adversarial_task = []
init_generator = False
logger.info(f'inittask:{inittask}')
if not test:
if inittask == 'none':
pass
elif not (os.path.exists(f'./{inittask}/{taskname}.trainset.task0.slbo0.pkl') and os.path.exists(f'./{inittask}/{taskname}.task0.saver.npy')):
init_generator = True
else:
logger.info('Load the first task dataset!')
for i in range(20):
if not os.path.exists(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl'): continue
traindata = pickle.load(open(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl', 'rb'))
add_multi_step(traindata, train_set)
add_multi_step(traindata, task_train_sets[0])
logger.info(f'load trainset-{i} {len(traindata)}')
for i in range(20):
if not os.path.exists(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl'): continue
devdata = pickle.load(open(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl', 'rb'))
add_multi_step(devdata, task_dev_sets[0])
logger.info(f'load devset-{i} {len(devdata)}')
logger.info('Load the first task saver!')
saver.load_state_dict(np.load(f'./{inittask}/{taskname}.task0.saver.npy', allow_pickle=True)[()])
logger.info('Update all copies! (lazymodel, normalizers_copy)')
tf.get_default_session().run(sync_model_to_lazymodel)
tf.get_default_session().run(copy_normalizers)
logger.info('Loaded normalizers:')
load_norm = tf.get_default_session().run(normalizers_parameters)
logger.info(load_norm)
TASK_NUM = 1
########################## debug #########################
#for task_idx in range(TASK_NUM):
# total_loss = []
# for scan in range(100):
# samples = task_train_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
# loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
# total_loss.append(loss_i.mean())
# total_loss = np.mean(total_loss)
# print ('loaded model train loss:', total_loss)
#for task_idx in range(TASK_NUM):
# total_loss = []
# for scan in range(100):
# samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
# loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
# total_loss.append(loss_i.mean())
# total_loss = np.mean(total_loss)
# print ('loaded model val loss:', total_loss)
##exit(0)
########################## debug #########################
else:
test_summary = {
'task':[],
'random':[],
'warmup':[],
'warmupprocess':[],
'slbo':[],
}
logger.info('Testing mode!')
train_tasknum = snapshot + 1
test_tasknum = testnum
logger.info(f'train_tasknum = {train_tasknum}, test_tasknum = {test_tasknum}')
assert(testgiven is not None)
if 'noent' in testparam: warmupent = 0.
have_data = False
task_generator = 'fixed' # random or fixed
if testgiven[-4:] == '.pkl':
f = testgiven
logger.info(f'Load all tasks from {f}!')
task.fixed_velocities = pickle.load(open(f, 'rb'))
logger.info(f"Test on task")
logger.info(task.fixed_velocities)
logger.info(f"Task number: {np.array(task.fixed_velocities).shape}")
else:
f = f'{testgiven}/all_task_parameter.pkl'
gen_adv_task = pickle.load(open(f, 'rb'))
logger.info(f'Load all adversarial task from {f}!')
task.fixed_velocities = gen_adv_task[train_tasknum: train_tasknum + test_tasknum]
logger.info(f"Test random method on task {train_tasknum}~{train_tasknum+test_tasknum}:")
logger.info(task.fixed_velocities)
logger.info(f"Task number: {np.array(task.fixed_velocities).shape}")
def load_data_during_test():
if inittask != 'none':
logger.info('Load the first task dataset!')
for i in range(20):
if not os.path.exists(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl'): continue
traindata = pickle.load(open(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl', 'rb'))
add_multi_step(traindata, train_set)
add_multi_step(traindata, task_train_sets[0])
logger.info(f'load task0 trainset{i} size={len(traindata)}')
have_data = True
for i in range(20):
if not os.path.exists(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl'): continue
devdata = pickle.load(open(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl', 'rb'))
add_multi_step(devdata, task_dev_sets[0])
logger.info(f'load task0 devset{i} size={len(devdata)}')
have_data = True
logger.info(f'Load all task dataset from {setting}!')
for t in range(0,train_tasknum):
for i in range(20):
if not os.path.exists(f'./{setting}/{taskname}.trainset.task{t}.slbo{i}.pkl'): continue
traindata = pickle.load(open(f'./{setting}/{taskname}.trainset.task{t}.slbo{i}.pkl', 'rb'))
add_multi_step(traindata, train_set)
add_multi_step(traindata, task_train_sets[t])
logger.info(f'load task{t} trainset{i} size={len(traindata)}')
if not os.path.exists(f'./{setting}/{taskname}.devset.task{t}.slbo{i}.pkl'): continue
devdata = pickle.load(open(f'./{setting}/{taskname}.devset.task{t}.slbo{i}.pkl', 'rb'))
add_multi_step(devdata, task_dev_sets[t])
logger.info(f'load task{t} devset{i} size={len(devdata)}')
have_data = True
load_data_during_test()
logger.info(f'Load the task{snapshot} saver!')
saver.load_state_dict(np.load(f'./{setting}/{taskname}.task{snapshot}.saver.npy', allow_pickle=True)[()])
logger.info('Update all copies! (lazymodel, normalizers_copy)')
tf.get_default_session().run(sync_model_to_lazymodel)
tf.get_default_session().run(copy_normalizers)
logger.info('Loaded normalizers:')
load_norm = tf.get_default_session().run(normalizers_parameters)
logger.info(load_norm)
TASK_NUM = train_tasknum
TEST_TASK_NUM = 0
########################## debug #########################
#if have_data:
# for task_idx in range(TASK_NUM):
# total_loss = []
# for scan in range(100):
# samples = task_train_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
# loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
# total_loss.append(loss_i.mean())
# total_loss = np.mean(total_loss)
# print ('loaded model train loss:', total_loss)
# for task_idx in range(TASK_NUM):
# total_loss = []
# for scan in range(100):
# samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
# loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
# total_loss.append(loss_i.mean())
# total_loss = np.mean(total_loss)
# print ('loaded model val loss:', total_loss)
##exit(0)
######################### debug #########################
slbo_n_stages = nslbo
print (f"each task will do nslbo = {nslbo}")
for param in model.parameters():
param.invalidate()
all_task_parameter = []
while (not test and TASK_NUM < ITERS) or (test and TEST_TASK_NUM < ITERS):
# first task or maxstep, update the model. Otherwise, revert the model
logger.info('Sync model from lazymodel')
tf.get_default_session().run(sync_model_from_lazymodel)
taskres = {}
if 'goal_velocity' not in taskres.keys():
taskres['goal_velocity'] = []
if not test and inittask == 'none':
slbo_n_stages = nslbo
elif not test and TASK_NUM == 0:
slbo_n_stages = initnslbo
elif not test and TASK_NUM > 0:
slbo_n_stages = nslbo
time_start = time.time()
trpo_warmup = []
trpo_slbo = []
surprisal = []
train_losses_warmup = deque(maxlen=warmup_n_model_iters // FLAGS.model.validation_freq)
train_losses_slbo = deque(maxlen=slbo_n_model_iters // FLAGS.model.validation_freq)
val_losses_warmup = deque(maxlen=warmup_n_model_iters // FLAGS.model.validation_freq)
val_losses_slbo = deque(maxlen=slbo_n_model_iters // FLAGS.model.validation_freq)
# NOTE: For each test task, we should reset model to the loaded one, and randomly initialize policy and vfn
#if test:
# saver.load_state_dict(np.load(model_load, allow_pickle=True)[()])
# logger.warning('Load model from %s', model_load)
if test:
logger.info("################################################## TESTING TASK %d ################################################", TEST_TASK_NUM)
logger.info(f'TEST_TASK_NUM={TEST_TASK_NUM}, TASK_NUM={TASK_NUM}')
logger.warning('Revert model and normalizers')
tf.get_default_session().run(sync_model_from_lazymodel)
tf.get_default_session().run(revert_normalizers)
else:
logger.info("################################################## TRAINING TASK %d ################################################", TASK_NUM)
if test:
test_returns = []
test_summary['warmupprocess'].append([])
test_summary['slbo'].append([])
if not test: #and FLAGS.task.method == 'random':
if inittask != 'none' and TASK_NUM == 1:
if 'HClinearstate' in taskname:
task.init([0.2] * task.n_params)
else:
task.init([0.] * task.n_params)
else:
if TASK_NUM > 0: #fix the 1st tasks during training
if adv == 0:
task.random_sample('uniform')
elif adv == 2:
task.random_sample('normal')
elif adv == 1:
if TASK_NUM == 1 and inittask != 'none':
task.random_sample()
print (f"task.params_={task.params_}, task.init_goal_vel={task.goal_velocity}")
task.sample(adv=True)
logger.info('Task Sampled: %s', task.goal_velocity)
taskres['goal_velocity'].append(task.goal_velocity)
all_task_parameter.append(task.goal_velocity)
print (f"task.params_={task.params_}, task.init_goal_vel={task.goal_velocity}")
if test:
if task_generator == 'fixed':
task.goal_velocity = task.fixed_velocities[TEST_TASK_NUM] #TODO
logger.info('Task Fixed: %s', task.goal_velocity)
if task_generator == 'random':
task.sample(adv=False) #sample randomly
logger.info('Task Sampled: %s', task.goal_velocity)
if task_generator == 'adv':
task.sample(adv=True) #sample adversarially
logger.info('Task Sampled: %s', task.goal_velocity)
generated_adversarial_task.append(task.goal_velocity)
logger.info('Tasks dump!')
assert (task_generator == 'fixed')
test_summary['task'].append(task.goal_velocity)
if FLAGS.task.reset_policy:
# NOTE: reset policy and valuefunc
logger.info("Resetting Policy")
pol_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())])
tf.get_default_session().run(tf.variables_initializer(policy.parameters()))
pol_params_after = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())])
print ("pol_params:", np.linalg.norm(pol_params), "pol_params_after_reset:", np.linalg.norm(pol_params_after))
logger.info("Resetting Valuefunc")
tf.get_default_session().run(tf.variables_initializer(vfn.parameters()))
tf.get_default_session().run(tf.variables_initializer(warmup_policy.parameters()))
tf.get_default_session().run(tf.variables_initializer(warmup_vfn.parameters()))
for p in warmup_policy.parameters(): p.invalidate()
for p in warmup_vfn.parameters(): p.invalidate()
for p in policy.parameters(): p.invalidate()
for p in vfn.parameters(): p.invalidate()
last_end = None
drops = []
evaluate(settings, 'pre-warm-up')
returns_pre_warmup = testeval(policy, runners['collect'])
if test:
test_returns.append(returns_pre_warmup)
test_summary['random'].append(returns_pre_warmup)
t1 = time.time()
trpo_time = 0
logger.info('----------------------------- Warmup for %d iterations ------------------------' % warmup_n_iters)
if decay == 'joint':
logger.info('Joint train from a joint dataset')
elif decay == 'taskid':
Z = np.sum([float(i+1) for i in range(0, TASK_NUM)])
prop = [float(taskid+1) / Z for taskid in range(TASK_NUM)]
logger.info(f'Sampling prop={prop}, Z={Z}')
elif decay == 'none':
Z = TASK_NUM
prop = [1. / TASK_NUM for _ in range(TASK_NUM)]
logger.info(f'Sampling prop={prop}, Z={Z}')
for i in range(warmup_n_iters):
#exit(0)
if TASK_NUM == 0 and not test and not model_load:
logger.info('Break because TASK_NUM=0')
break
losses = deque(maxlen=warmup_n_model_iters)
grad_norm_meter = AverageMeter()
n_model_iters = warmup_n_model_iters
drop_plot = 0
if test and verbose:
logger.info(f'warmup iter
if 'warmup_task_val_loss' not in taskres.keys():
taskres['warmup_task_val_loss'] = [[] for _ in range(TASK_NUM)]
if verbose: logger.info('Train Model for %d iterations' % n_model_iters)
model_time = time.time()
if not test or (test and have_data):
for _ in range(n_model_iters):
if decay == 'joint':
samples = train_set.sample_multi_step(FLAGS.model.train_batch_size, 1, FLAGS.model.multi_step)
else:
all_samples = []
for taskid in range(TASK_NUM):
samples_i = task_train_sets[taskid].sample_multi_step(int(FLAGS.model.train_batch_size*prop[taskid])+1, 1, FLAGS.model.multi_step)
all_samples.append(samples_i)
samples = np.concatenate(all_samples, axis=1).view(np.recarray)
_, train_loss, grad_norm = loss_mod.get_loss(
samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout,
fetch='train loss grad_norm')
losses.append(train_loss.mean())
grad_norm_meter.update(grad_norm)
# ideally, we should define an Optimizer class, which takes parameters as inputs.
# The `update` method of `Optimizer` will invalidate all parameters during updates.
for param in model.parameters():
param.invalidate()
model_time = time.time() - model_time
if i % FLAGS.model.validation_freq == 0:
task_val_loss = []
val_time = time.time()
for task_idx in range(TASK_NUM):
total_loss = []
for scan in range(FLAGS.rollout.n_dev_samples // FLAGS.model.dev_batch_size + 1):
samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
total_loss.append(loss_i.mean())
total_loss = np.mean(total_loss)
task_val_loss.append(total_loss)
taskres['warmup_task_val_loss'][task_idx].append(total_loss)
val_time = time.time() - val_time
val_loss = np.mean(task_val_loss)
val_losses_warmup.append(val_loss)
train_losses_warmup.append(np.mean(losses))
if np.isnan(val_loss) or np.isnan(np.mean(losses)):
logger.info('nan! %s %s', np.isnan(val_loss), np.isnan(np.mean(losses)))
logger.info('
i, np.mean(losses), val_loss, n_model_iters, grad_norm_meter.get(), drop_plot, model_time, trpo_time, val_time)
logger.info(f'
if verbose: logger.info('Train policy for %d iterations' % warmup_n_policy_iters)
trpo_time = time.time()
for n_updates in range(warmup_n_policy_iters):
if FLAGS.algorithm != 'MF' and FLAGS.warmup.start == 'buffer':
runners['train'].set_state(train_set.sample(FLAGS.plan.n_envs).state)
else:
runners['train'].reset()
data, ep_infos = runners['train'].run(policy, FLAGS.plan.n_trpo_samples)
advantages, advantages_params, values, td, coef_mat, coef_mat_returns, reward_ctrl, x_velocity, begin_mark = runners['train'].compute_advantage(vfn, data,task)
dist_mean, dist_std, vf_loss, plotinfo = algo.train(warmupent, data, advantages, values)
trpo_warmup.append(plotinfo)
returns = [info['return'] for info in ep_infos]
if n_updates == 0:
if last_end is not None:
drop_plot = last_end - np.mean(returns)
drops.append(last_end - np.mean(returns))
last_end = np.mean(returns)
if n_updates == warmup_n_policy_iters-1:
logger.info('[TRPO]
'dist std = %.10f, dist mean = %.10f, vf_loss = %.3f',
n_updates, len(returns), np.mean(returns), np.std(returns) / np.sqrt(len(returns)),
dist_std, dist_mean, vf_loss)
trpo_time = time.time() - trpo_time
if i % FLAGS.warmup.n_evaluate_iters == 0 or i == warmup_n_iters-1:# and i != 0:
real_eval, virt_eval = evaluate(settings, 'iteration')
if 'warmup_real_eval' not in taskres.keys(): taskres['warmup_real_eval'] = []
if 'warmup_virt_eval' not in taskres.keys(): taskres['warmup_virt_eval'] = []
taskres['warmup_real_eval'].append(real_eval)
taskres['warmup_virt_eval'].append(virt_eval)
if test:
test_summary['warmupprocess'][TEST_TASK_NUM].append(real_eval)
if not test:
res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f"./{setting}/{taskname}-task{TASK_NUM}-warmup/", force=True, video_callable=lambda episode_id: True), policy)
else:
res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f"./{setting}/{taskname}-testtask{TEST_TASK_NUM}-warm{warmup_n_iters}-warmup/", force=True, video_callable=lambda episode_id: True), policy)
taskres['warmup_monitor'] = [res]
t2 = time.time()
warmup_time = t2 - t1
evaluate(settings, 'post-warm-up')
returns_post_warmup = testeval(policy, runners['collect'])
if test:
test_returns.append(returns_post_warmup)
test_summary['warmup'].append(returns_post_warmup)
print ("warmupprocess:", test_summary['warmupprocess'][TEST_TASK_NUM])
logger.info('Sync warmup policy and vfn and model')
tf.get_default_session().run([sync_warmup_policy, sync_warmup_vfn, sync_warmup_model])
for p in warmup_policy.parameters(): p.invalidate()
for p in warmup_vfn.parameters(): p.invalidate()
for p in warmup_model.parameters(): p.invalidate()
for p in policy.parameters(): p.invalidate()
task.parameters().invalidate()
pol_params, warm_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters()), nn.utils.parameters_to_vector(warmup_policy.parameters())])
print ("After WARMUP, pol_params_norm:", np.linalg.norm(pol_params), "warm_params_norm:", np.linalg.norm(warm_params))
mod, warm_mod = tf.get_default_session().run([nn.utils.parameters_to_vector(model.parameters()), nn.utils.parameters_to_vector(warmup_model.parameters())])
print ("mod_norm:", np.linalg.norm(mod), "warm_mod_norm:", np.linalg.norm(warm_mod))
eval_rollout(runners['train'], warmup_policy, 'Use warmup policy to collect data from virtual env')
warmup_collect_virt = []
eval_rollout(runners['train'], policy, 'Use policy to collect data from virtual env')
warmup_collect_real = []
logger.info('--------------------------------------------- SLBO for %d outer stages -----------------------------------------' % slbo_n_stages)
for T in range(slbo_n_stages):
logger.info('-------- Starting Stage %d ---------', T)
evaluate(settings, 'episode')
# collect data
if not test:
logger.info('-------- Collect data from REAL env for %d samples --------' % FLAGS.rollout.n_train_samples)
recent_train_set, ep_infos = runners['collect'].run(noise.make(policy), FLAGS.rollout.n_train_samples)
recent_dev_set, _ = runners['dev'].run(noise.make(policy), FLAGS.rollout.n_dev_samples)
else:
logger.info('-------- Collect data from REAL env for %d samples --------' % 2000)
recent_train_set, ep_infos = runners['collect2000'].run(noise.make(policy), 2000)
recent_dev_set, _ = runners['dev'].run(noise.make(policy), FLAGS.rollout.n_dev_samples)
logger.info('save setting dataset! trainset and devset!')
if not test:
pickle.dump(recent_train_set, open(f'./{setting}/{taskname}.trainset.task{TASK_NUM}.slbo{T}.pkl', 'wb'))
pickle.dump(recent_dev_set, open(f'./{setting}/{taskname}.devset.task{TASK_NUM}.slbo{T}.pkl', 'wb'))
# Add real data to task_train_sets and task_dev_sets
#if not test:
# add_multi_step(recent_train_set, train_set)
add_multi_step(recent_train_set, task_train_sets[TASK_NUM])
add_multi_step(recent_dev_set, task_dev_sets[TASK_NUM])
#if not test:
# states = recent_train_set.state
# mean = np.mean(states, axis=0)
# std = np.std(states, axis=0)
# min_ = np.min(states, axis=0)
# max_ = np.max(states, axis=0)
# states_stat = {"mean": mean, "std": std, "min": min_, "max": max_}
# evaluate the surprisal of collected real data for model
new_set = Dataset(dtype, FLAGS.rollout.max_buf_size)
add_multi_step(recent_train_set, new_set)
losses_new = []
for i in range(FLAGS.rollout.n_train_samples // FLAGS.model.dev_batch_size + 1):
samples = new_set.sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
loss = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
loss = loss.mean()
losses_new.append(loss)
losses_new_mean = np.mean(losses_new)
surprisal.append(losses_new_mean)
logger.info(f'(surprisal) model loss on new collected data is {losses_new_mean}')
add_multi_step(recent_train_set, train_set)
add_multi_step(
runners['dev'].run(noise.make(policy), FLAGS.rollout.n_dev_samples)[0],
dev_set,
)
returns = np.array([ep_info['return'] for ep_info in ep_infos])
if len(returns) > 0:
logger.info("episode: %s", np.mean(returns))
if T == 0: # check
samples = train_set.sample_multi_step(100, 1, FLAGS.model.multi_step)
for i in range(FLAGS.model.multi_step - 1):
masks = 1 - (samples.done[i] | samples.timeout[i])[..., np.newaxis]
assert np.allclose(samples.state[i + 1] * masks, samples.next_state[i] * masks)
normalizers.state.update(recent_train_set.state)
normalizers.action.update(recent_train_set.action)
normalizers.diff.update(recent_train_set.next_state - recent_train_set.state)
if TASK_NUM == 0: #In the 1st task, no warmup, but we validate loss of the random model
samples = dev_set.sample_multi_step(FLAGS.model.train_batch_size, 1, FLAGS.model.multi_step)
loss = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
loss = loss.mean()
val_losses_warmup.append(loss)
logger.info('SLBO for %d inner stages' % slbo_n_iters)
model_time, trpo_time = 0, 0
if 'slbo_task_val_loss' not in taskres.keys():
taskres['slbo_task_val_loss'] = [[] for _ in range(TASK_NUM+1)]
if decay == 'joint':
logger.info('Joint train from a joint dataset')
elif decay == 'taskid':
Z = np.sum([float(i+1) for i in range(0, TASK_NUM+1)])
prop = [float(taskid+1) / Z for taskid in range(TASK_NUM+1)]
logger.info(f'Sampling prop={prop}, Z={Z}')
elif decay == 'none':
Z = TASK_NUM+1
prop = [1. / float(Z) for _ in range(Z)]
logger.info(f'Sampling prop={prop}, Z={Z}')
for i in range(slbo_n_iters):
if i % FLAGS.slbo.n_evaluate_iters == 0 or i == slbo_n_iters-1:# and i != 0:
# cur_actions = policy.eval('actions_mean actions_std', states=recent_states)
# kl_old_new = gaussian_kl(*ref_actions, *cur_actions).sum(axis=1).mean()
# logger.info('KL(old || cur) = %.6f', kl_old_new)
real_eval, virt_eval = evaluate(settings, 'iteration')
if 'slbo_real_eval' not in taskres.keys(): taskres['slbo_real_eval'] = []
if 'slbo_virt_eval' not in taskres.keys(): taskres['slbo_virt_eval'] = []
taskres['slbo_real_eval'].append(real_eval)
taskres['slbo_virt_eval'].append(virt_eval)
losses = deque(maxlen=slbo_n_model_iters)
grad_norm_meter = AverageMeter()
n_model_iters = slbo_n_model_iters
if verbose: logger.info('Train model %d iterations'% n_model_iters)
model_time = time.time()
for _ in range(n_model_iters):
if decay == 'joint':
samples = train_set.sample_multi_step(FLAGS.model.train_batch_size, 1, FLAGS.model.multi_step)
else:
all_samples = []
sample_size = 0
for taskid in range(TASK_NUM+1):
samples_i = task_train_sets[taskid].sample_multi_step(int(FLAGS.model.train_batch_size*prop[taskid])+1, 1, FLAGS.model.multi_step)
all_samples.append(samples_i)
sample_size += int(FLAGS.model.train_batch_size*prop[taskid])+1
samples = np.concatenate(all_samples, axis=1).view(np.recarray)
_, train_loss, grad_norm = loss_mod.get_loss(
samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout,
fetch='train loss grad_norm')
losses.append(train_loss.mean())
grad_norm_meter.update(grad_norm)
# ideally, we should define an Optimizer class, which takes parameters as inputs.
# The `update` method of `Optimizer` will invalidate all parameters during updates.
for param in model.parameters():
param.invalidate()
model_time = time.time() - model_time
if i % FLAGS.model.validation_freq == 0:
task_val_loss = []
val_time = time.time()
for task_idx in range(TASK_NUM+1):
total_loss = []
for scan in range(FLAGS.rollout.n_dev_samples // FLAGS.model.dev_batch_size + 1):
samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)
loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)
total_loss.append(loss_i.mean())
total_loss = np.mean(total_loss)
task_val_loss.append(total_loss)
taskres['slbo_task_val_loss'][task_idx].append(total_loss)
val_loss = np.mean(task_val_loss)
val_time = time.time() - val_time
if np.isnan(val_loss) or np.isnan(np.mean(losses)):
logger.info('nan! %s %s', np.isnan(val_loss), np.isnan(np.mean(losses)))
logger.info('
i, np.mean(losses), val_loss, n_model_iters, grad_norm_meter.get(), model_time, trpo_time, val_time)
logger.info(f'
model_time, trpo_time = 0, 0
val_losses_slbo.append(val_loss)
train_losses_slbo.append(np.mean(losses))
if verbose: logger.info('Train policy %d iterations'% slbo_n_policy_iters)
trpo_time = time.time()
for n_updates in range(slbo_n_policy_iters):
if FLAGS.algorithm != 'MF' and FLAGS.slbo.start == 'buffer':
runners['train'].set_state(train_set.sample(FLAGS.plan.n_envs).state)
else:
runners['train'].reset()
data, ep_infos = runners['train'].run(policy, FLAGS.plan.n_trpo_samples)
advantages, advantages_params, values, td, coef_mat, coef_mat_returns, reward_ctrl, x_velocity, begin_mark = runners['train'].compute_advantage(vfn, data, task)
dist_mean, dist_std, vf_loss, plotinfo = algo.train(max_ent_coef, data, advantages, values)
trpo_slbo.append(plotinfo)
returns = [info['return'] for info in ep_infos]
if n_updates == slbo_n_policy_iters-1:
logger.info('[TRPO]
'dist std = %.10f, dist mean = %.10f, vf_loss = %.3f',
n_updates, len(returns), np.mean(returns), np.std(returns) / np.sqrt(len(returns)),
dist_std, dist_mean, vf_loss)
trpo_time = time.time() - trpo_time
if not test and (TASK_NUM) % FLAGS.ckpt.n_save_stages == 0:
np.save(f'{FLAGS.log_dir}/{taskname}-stage-{TASK_NUM}', saver.state_dict())
np.save(f'{FLAGS.log_dir}/{taskname}-final', saver.state_dict())
res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f"./{setting}/{taskname}-task{TASK_NUM}-slbo{T}/", force=True, video_callable=lambda episode_id: True), policy)
if 'slbo_monitor' not in taskres.keys():
taskres['slbo_monitor'] = []
taskres['slbo_monitor'].append(res)
if not test and FLAGS.ckpt.n_save_stages == 1:
pickle.dump(recent_train_set, open(f'{FLAGS.log_dir}/stage-{TASK_NUM}.inc-buf.pkl', 'wb'))
if test:
returns_post_slbo_update = testeval(policy, runners['collect'])
test_returns.append(returns_post_slbo_update)
real_eval, virt_eval = evaluate(settings, 'iteration')
test_summary['slbo'][TEST_TASK_NUM].append(real_eval)
test_summary[f'slbo{T+1}'].append(returns_post_slbo_update)
res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f"./{setting}/{taskname}-testtask{TEST_TASK_NUM}-slbo{T}/", force=True, video_callable=lambda episode_id: True), policy)
print ('test_summary_slbo:', test_summary['slbo'][TEST_TASK_NUM])
if not test:
np.save(f'{setting}/{taskname}.task{TASK_NUM}.saver', saver.state_dict())
np.save(f'{setting}/{taskname}.final.saver', saver.state_dict())
if init_generator and TASK_NUM==0:
print ('finished init generator!')
exit(0)
pol_params, warm_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters()), nn.utils.parameters_to_vector(warmup_policy.parameters())])
print ("After SLBO, pol_params_norm:", np.linalg.norm(pol_params), "warm_params_norm:", np.linalg.norm(warm_params))
eval_rollout(runners['train'], policy, 'Use optimal policy to collect data from real env')
optimal_collect_real = []
t3 = time.time()
slbo_time = t3 - t2
evaluate(settings, 'post-slbo')
logger.info(f'Warmup time = {warmup_time}, SLBO time = {slbo_time}')
alltaskres.append(taskres)
if not test:
pickle.dump(alltaskres, open(f'{setting}/{taskname}-alltaskres.info.pkl', 'wb'))
pickle.dump(all_task_parameter, open(f'{setting}/all_task_parameter.pkl', 'wb'))
else:
pickle.dump(alltaskres, open(f'{setting}/{taskname}-alltaskres.info.pkl.{testparam}', 'wb'))
pickle.dump(all_task_parameter, open(f'{setting}/all_task_parameter.pkl.{testparam}', 'wb'))
eval_rollout(runners['train'], warmup_policy, 'Use warmup policy to collect data from virtual env')
if not test:
#if TASK_NUM > 0:
if TASK_NUM > -1:
task_params_before, final_grad, advtask_info = advtask.train(runners['train_copy'], runners['collect_copy'], warmup_collect_virt, warmup_collect_real, optimal_collect_real, returns_pre_warmup, val_losses_warmup, val_losses_slbo, train_losses_warmup, train_losses_slbo, surprisal, trpo_warmup, trpo_slbo, fout, infofilename, extra_runners)
# first task or maxstep, update the model
if not test and (TASK_NUM == 0 or TASK_NUM % maxstep == 0):
logger.info(f"task_num={TASK_NUM}, sync_model_to_lazymodel")
tf.get_default_session().run(sync_model_to_lazymodel)
if test:
pickle.dump(test_summary, open(f'{setting}/test_summary.pkl.{testparam}', 'wb'))
TEST_TASK_NUM += 1
TASK_NUM = train_tasknum
#task_train_sets[TASK_NUM].clear()
#task_dev_sets[TASK_NUM].clear()
for tt in range(TASK_NUM+1):
task_train_sets[tt].clear()
task_dev_sets[tt].clear()
train_set.clear()
load_data_during_test()
continue
task_params_after = task_params_before + final_grad * alpha
task.set_parameters(task_params_after)
if not test:
advtask_info['alpha'].append(alpha)
with open(infofilename, 'wb') as handle:
pickle.dump(advtask_info, handle, protocol=pickle.HIGHEST_PROTOCOL)
print ('>>>>>>dump')
TASK_NUM += 1
time_end = time.time()
print (f"Task Done! Total Time Consumed for 1 task = {time_end - time_start}s")
if __name__ == '__main__':
with tf.Session(config=get_tf_config()):
main()
| true | true |
f72e8cc5d63278100727c1c262ba1c22725d8d19 | 34,804 | py | Python | AppPkg/Applications/Python/Python-2.7.2/Lib/encodings/cp855.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 2,757 | 2018-04-28T21:41:36.000Z | 2022-03-29T06:33:36.000Z | AppPkg/Applications/Python/Python-2.7.2/Lib/encodings/cp855.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 20 | 2019-07-23T15:29:32.000Z | 2022-01-21T12:53:04.000Z | AppPkg/Applications/Python/Python-2.7.2/Lib/encodings/cp855.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 449 | 2018-05-09T05:54:05.000Z | 2022-03-30T14:54:18.000Z | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp855',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE
0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE
0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x0084: 0x0451, # CYRILLIC SMALL LETTER IO
0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE
0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x008c: 0x0457, # CYRILLIC SMALL LETTER YI
0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x008e: 0x0458, # CYRILLIC SMALL LETTER JE
0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE
0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE
0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE
0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE
0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x009c: 0x044e, # CYRILLIC SMALL LETTER YU
0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF
0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00b7: 0x0438, # CYRILLIC SMALL LETTER I
0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA
0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL
0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM
0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN
0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00d6: 0x043e, # CYRILLIC SMALL LETTER O
0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00de: 0x044f, # CYRILLIC SMALL LETTER YA
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00e7: 0x0443, # CYRILLIC SMALL LETTER U
0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE
0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00ef: 0x2116, # NUMERO SIGN
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00f7: 0x044d, # CYRILLIC SMALL LETTER E
0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00fd: 0x00a7, # SECTION SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE
u'\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE
u'\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE
u'\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE
u'\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO
u'\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO
u'\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE
u'\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE
u'\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI
u'\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI
u'\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE
u'\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE
u'\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE
u'\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE
u'\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE
u'\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE
u'\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE
u'\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE
u'\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE
u'\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE
u'\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U
u'\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE
u'\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE
u'\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU
u'\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU
u'\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A
u'\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE
u'\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE
u'\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE
u'\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE
u'\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE
u'\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE
u'\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE
u'\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE
u'\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF
u'\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF
u'\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE
u'\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA
u'\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA
u'\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I
u'\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I
u'\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA
u'\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL
u'\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL
u'\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM
u'\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM
u'\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN
u'\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN
u'\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O
u'\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O
u'\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE
u'\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA
u'\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER
u'\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER
u'\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES
u'\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES
u'\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE
u'\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE
u'\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U
u'\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U
u'\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE
u'\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE
u'\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE
u'\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE
u'\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u2116' # 0x00ef -> NUMERO SIGN
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU
u'\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU
u'\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE
u'\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE
u'\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA
u'\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA
u'\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E
u'\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E
u'\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA
u'\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE
u'\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE
u'\xa7' # 0x00fd -> SECTION SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00fd, # SECTION SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ad: 0x00f0, # SOFT HYPHEN
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO
0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE
0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE
0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE
0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI
0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE
0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE
0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE
0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE
0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE
0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U
0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE
0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A
0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I
0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O
0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U
0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE
0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE
0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE
0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00b7, # CYRILLIC SMALL LETTER I
0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA
0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL
0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM
0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN
0x043e: 0x00d6, # CYRILLIC SMALL LETTER O
0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e7, # CYRILLIC SMALL LETTER U
0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF
0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00f7, # CYRILLIC SMALL LETTER E
0x044e: 0x009c, # CYRILLIC SMALL LETTER YU
0x044f: 0x00de, # CYRILLIC SMALL LETTER YA
0x0451: 0x0084, # CYRILLIC SMALL LETTER IO
0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE
0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE
0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE
0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x0457: 0x008c, # CYRILLIC SMALL LETTER YI
0x0458: 0x008e, # CYRILLIC SMALL LETTER JE
0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE
0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE
0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE
0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE
0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U
0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE
0x2116: 0x00ef, # NUMERO SIGN
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| 49.79113 | 98 | 0.596684 |
import codecs
):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
nfo(
name='cp855',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
dentity_dict(range(256))
decoding_map.update({
0x0080: 0x0452,
0x0081: 0x0402,
0x0082: 0x0453,
0x0083: 0x0403,
0x0084: 0x0451,
0x0085: 0x0401,
0x0086: 0x0454,
0x0087: 0x0404,
0x0088: 0x0455,
0x0089: 0x0405,
0x008a: 0x0456,
0x008b: 0x0406,
0x008c: 0x0457,
0x008d: 0x0407,
0x008e: 0x0458,
0x008f: 0x0408,
0x0090: 0x0459,
0x0091: 0x0409,
0x0092: 0x045a,
0x0093: 0x040a,
0x0094: 0x045b,
0x0095: 0x040b,
0x0096: 0x045c,
0x0097: 0x040c,
0x0098: 0x045e,
0x0099: 0x040e,
0x009a: 0x045f,
0x009b: 0x040f,
0x009c: 0x044e,
0x009d: 0x042e,
0x009e: 0x044a,
0x009f: 0x042a,
0x00a0: 0x0430,
0x00a1: 0x0410,
0x00a2: 0x0431,
0x00a3: 0x0411,
0x00a4: 0x0446,
0x00a5: 0x0426,
0x00a6: 0x0434,
0x00a7: 0x0414,
0x00a8: 0x0435,
0x00a9: 0x0415,
0x00aa: 0x0444,
0x00ab: 0x0424,
0x00ac: 0x0433,
0x00ad: 0x0413,
0x00ae: 0x00ab,
0x00af: 0x00bb,
0x00b0: 0x2591,
0x00b1: 0x2592,
0x00b2: 0x2593,
0x00b3: 0x2502,
0x00b4: 0x2524,
0x00b5: 0x0445,
0x00b6: 0x0425,
0x00b7: 0x0438,
0x00b8: 0x0418,
0x00b9: 0x2563,
0x00ba: 0x2551,
0x00bb: 0x2557,
0x00bc: 0x255d,
0x00bd: 0x0439,
0x00be: 0x0419,
0x00bf: 0x2510,
0x00c0: 0x2514,
0x00c1: 0x2534,
0x00c2: 0x252c,
0x00c3: 0x251c,
0x00c4: 0x2500,
0x00c5: 0x253c,
0x00c6: 0x043a,
0x00c7: 0x041a,
0x00c8: 0x255a,
0x00c9: 0x2554,
0x00ca: 0x2569,
0x00cb: 0x2566,
0x00cc: 0x2560,
0x00cd: 0x2550,
0x00ce: 0x256c,
0x00cf: 0x00a4,
0x00d0: 0x043b,
0x00d1: 0x041b,
0x00d2: 0x043c,
0x00d3: 0x041c,
0x00d4: 0x043d,
0x00d5: 0x041d,
0x00d6: 0x043e,
0x00d7: 0x041e,
0x00d8: 0x043f,
0x00d9: 0x2518,
0x00da: 0x250c,
0x00db: 0x2588,
0x00dc: 0x2584,
0x00dd: 0x041f,
0x00de: 0x044f,
0x00df: 0x2580,
0x00e0: 0x042f,
0x00e1: 0x0440,
0x00e2: 0x0420,
0x00e3: 0x0441,
0x00e4: 0x0421,
0x00e5: 0x0442,
0x00e6: 0x0422,
0x00e7: 0x0443,
0x00e8: 0x0423,
0x00e9: 0x0436,
0x00ea: 0x0416,
0x00eb: 0x0432,
0x00ec: 0x0412,
0x00ed: 0x044c,
0x00ee: 0x042c,
0x00ef: 0x2116,
0x00f0: 0x00ad,
0x00f1: 0x044b,
0x00f2: 0x042b,
0x00f3: 0x0437,
0x00f4: 0x0417,
0x00f5: 0x0448,
0x00f6: 0x0428,
0x00f7: 0x044d,
0x00f8: 0x042d,
0x00f9: 0x0449,
0x00fa: 0x0429,
0x00fb: 0x0447,
0x00fc: 0x0427,
0x00fd: 0x00a7,
0x00fe: 0x25a0,
0x00ff: 0x00a0,
})
u'\x01'
u'\x02'
u'\x03'
u'\x04'
u'\x05'
u'\x06'
u'\x07'
u'\x08'
u'\t'
u'\n'
u'\x0b'
u'\x0c'
u'\r'
u'\x0e'
u'\x0f'
u'\x10'
u'\x11'
u'\x12'
u'\x13'
u'\x14'
u'\x15'
u'\x16'
u'\x17'
u'\x18'
u'\x19'
u'\x1a'
u'\x1b'
u'\x1c'
u'\x1d'
u'\x1e'
u'\x1f'
u' '
u'!'
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE
u'\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE
u'\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE
u'\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE
u'\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO
u'\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO
u'\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE
u'\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE
u'\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI
u'\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI
u'\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE
u'\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE
u'\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE
u'\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE
u'\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE
u'\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE
u'\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE
u'\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE
u'\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE
u'\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE
u'\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U
u'\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE
u'\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE
u'\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU
u'\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU
u'\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A
u'\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE
u'\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE
u'\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE
u'\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE
u'\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE
u'\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE
u'\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE
u'\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE
u'\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF
u'\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF
u'\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE
u'\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA
u'\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA
u'\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I
u'\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I
u'\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA
u'\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL
u'\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL
u'\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM
u'\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM
u'\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN
u'\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN
u'\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O
u'\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O
u'\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE
u'\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA
u'\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER
u'\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER
u'\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES
u'\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES
u'\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE
u'\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE
u'\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U
u'\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U
u'\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE
u'\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE
u'\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE
u'\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE
u'\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u2116' # 0x00ef -> NUMERO SIGN
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU
u'\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU
u'\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE
u'\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE
u'\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA
u'\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA
u'\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E
u'\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E
u'\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA
u'\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE
u'\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE
u'\xa7' # 0x00fd -> SECTION SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00fd, # SECTION SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ad: 0x00f0, # SOFT HYPHEN
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO
0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE
0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE
0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE
0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI
0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE
0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE
0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE
0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE
0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE
0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U
0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE
0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A
0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I
0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O
0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U
0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE
0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE
0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE
0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00b7, # CYRILLIC SMALL LETTER I
0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA
0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL
0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM
0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN
0x043e: 0x00d6, # CYRILLIC SMALL LETTER O
0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e7, # CYRILLIC SMALL LETTER U
0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF
0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00f7, # CYRILLIC SMALL LETTER E
0x044e: 0x009c, # CYRILLIC SMALL LETTER YU
0x044f: 0x00de, # CYRILLIC SMALL LETTER YA
0x0451: 0x0084, # CYRILLIC SMALL LETTER IO
0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE
0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE
0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE
0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x0457: 0x008c, # CYRILLIC SMALL LETTER YI
0x0458: 0x008e, # CYRILLIC SMALL LETTER JE
0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE
0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE
0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE
0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE
0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U
0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE
0x2116: 0x00ef, # NUMERO SIGN
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| true | true |
f72e8d3f29aa1af65b058c4504d5648e6c8fb53e | 18,293 | py | Python | benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/14-sender_receiver_2.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/14-sender_receiver_2.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/14-sender_receiver_2.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.Equals(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i0))
hint = Hint("h_r2s0", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(0, x_s_l)
hint = Hint("h_s_l0", env, frozenset([s_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0))
hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, r0))
hint = Hint("h_s_timeout0", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(0, x_r_l)
hint = Hint("h_r_l0", env, frozenset([r_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r1))
hint = Hint("h_delta1", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i1))
hint = Hint("h_r2s1", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(1, mgr.Not(x_s_l))
loc1 = Location(env, mgr.Not(s_l))
loc1.set_progress(0, x_s_l)
hint = Hint("h_s_l1", env, frozenset([s_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, mgr.Plus(s_c, r1)))
hint = Hint("h_s_c1", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, mgr.Plus(s2r, i1)))
hint = Hint("h_s2r2", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| 38.350105 | 89 | 0.574865 | from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.Equals(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i0))
hint = Hint("h_r2s0", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(0, x_s_l)
hint = Hint("h_s_l0", env, frozenset([s_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0))
hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, r0))
hint = Hint("h_s_timeout0", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(0, x_r_l)
hint = Hint("h_r_l0", env, frozenset([r_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r1))
hint = Hint("h_delta1", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i1))
hint = Hint("h_r2s1", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(1, mgr.Not(x_s_l))
loc1 = Location(env, mgr.Not(s_l))
loc1.set_progress(0, x_s_l)
hint = Hint("h_s_l1", env, frozenset([s_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, mgr.Plus(s_c, r1)))
hint = Hint("h_s_c1", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, mgr.Plus(s2r, i1)))
hint = Hint("h_s2r2", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| true | true |
f72e8d751c0523f0e8d682b392acc2d5952982f1 | 394 | py | Python | checkout/migrations/0005_orderitem_completed.py | yusif763/Unistore-pro | 41ad0fa209c79a201d3f6a7aa68ec0ace707dcad | [
"MIT"
] | 3 | 2021-04-29T10:49:06.000Z | 2022-03-03T12:40:21.000Z | checkout/migrations/0005_orderitem_completed.py | yusif763/Unistore-pro | 41ad0fa209c79a201d3f6a7aa68ec0ace707dcad | [
"MIT"
] | null | null | null | checkout/migrations/0005_orderitem_completed.py | yusif763/Unistore-pro | 41ad0fa209c79a201d3f6a7aa68ec0ace707dcad | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-04-27 11:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checkout', '0004_checkout_completed'),
]
operations = [
migrations.AddField(
model_name='orderitem',
name='completed',
field=models.BooleanField(default=False),
),
]
| 20.736842 | 53 | 0.606599 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checkout', '0004_checkout_completed'),
]
operations = [
migrations.AddField(
model_name='orderitem',
name='completed',
field=models.BooleanField(default=False),
),
]
| true | true |
f72e8d94ce9c8b70aedb2c77a687ec9e45d85bfa | 2,794 | py | Python | exercises/sliding_alignment.py | saiden89/programming_stefano_roncelli | 3b77259e7e590fffd49dae96e0be16c934cae051 | [
"MIT"
] | 2 | 2020-01-13T08:41:52.000Z | 2020-01-15T15:33:28.000Z | exercises/sliding_alignment.py | saiden89/programming_stefano_roncelli | 3b77259e7e590fffd49dae96e0be16c934cae051 | [
"MIT"
] | null | null | null | exercises/sliding_alignment.py | saiden89/programming_stefano_roncelli | 3b77259e7e590fffd49dae96e0be16c934cae051 | [
"MIT"
] | null | null | null | # 2) Write a script that generates all the possible ungapped alignments of two sequences, scores them and identifies
# the best scoring ones.
#
# These are all the possible ungapped alingments of the two sequences: TCA and GA:
#
# --TCA -TCA TCA TCA TCA- TCA--
# GA--- GA-- GA- -GA --GA ---GA
#
# Using the following scoring scheme:# 2) Write a script that generates all the possible ungapped alignments of two sequences, scores them and identifies
# the best scoring ones.
#
# These are all the possible ungapped alingments of the two sequences: TCA and GA:
#
# --TCA -TCA TCA TCA TCA- TCA--
# GA--- GA-- GA- -GA --GA ---GA
#
# Using the following scoring scheme:
matrix = {'AA': 2, 'AC': -1, 'AT': -1, 'AG': -2, 'CC': 2, 'CT': 0, 'CG': -1,
'TT': 2, 'TG': -1, 'GG': 2, 'CA': -1, 'TA': -1, 'GA': -2, 'TC': 0,
'GC': -1, 'GT': -1, }
human = open('./data/titin_hu.txt', 'r')
mouse = open('./data/titin_mo.txt', 'r')
seq1 = ''
seq2 = ''
for line in human:
line = line.rstrip()
seq2 += line
for line in mouse:
line = line.rstrip()
seq2 += line
# seq1 = 'TT'
# seq2 = 'GTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT'
len_seq1 = len(seq1)
len_seq2 = len(seq2)
iters = len_seq1 + len_seq2
same_size = False
if len_seq1 < len_seq2:
short = seq1
long = seq2
elif len_seq1 > len_seq2:
short = seq2
long = seq1
else:
same_size = True
short = seq1
long = seq2
len_short = len(short)
len_long = len(long)
long = long + '-' * len_short
short = '-' * len_long + short
short = list(short)
long = list(long)
highest = False
best_seq1 = ''
best_seq2 = ''
def score_fun(s1, s2, scoring_matrix):
score = 0
gap_penalty = -2
for base1, base2 in zip(s1, s2):
if base1 == '-' or base2 == '-':
score += gap_penalty
else:
score += scoring_matrix[base1 + base2]
print(''.join(s1), ''.join(s2), score, sep = '\n')
return score
for i in range(iters - 1):
score = score_fun(long, short, matrix)
if long[-1] == '-' and short[0] == '-':
del short[0]
del long[-1]
score = score_fun(long, short, matrix)
elif long[-1] != '-' and short[0] == '-':
short.append('-')
del short[0]
score = score_fun(long, short, matrix)
else:
long.insert(0, '-')
short.append('-')
score = score_fun(long, short, matrix)
if highest == False:
highest = score
if score > highest:
best_seq1 = ''.join(long)
best_seq2 = ''.join(short)
highest = score
print(highest)
comp = ''
for base1, base2 in zip(best_seq1, best_seq2):
if base1 == base2:
comp += '|'
else:
comp += ' '
print(best_seq1, comp, best_seq2,sep = '\n')
print('The best alignment score is:', highest)
| 27.94 | 153 | 0.581961 |
'GG': 2, 'CA': -1, 'TA': -1, 'GA': -2, 'TC': 0,
'GC': -1, 'GT': -1, }
human = open('./data/titin_hu.txt', 'r')
mouse = open('./data/titin_mo.txt', 'r')
seq1 = ''
seq2 = ''
for line in human:
line = line.rstrip()
seq2 += line
for line in mouse:
line = line.rstrip()
seq2 += line
len_seq1 = len(seq1)
len_seq2 = len(seq2)
iters = len_seq1 + len_seq2
same_size = False
if len_seq1 < len_seq2:
short = seq1
long = seq2
elif len_seq1 > len_seq2:
short = seq2
long = seq1
else:
same_size = True
short = seq1
long = seq2
len_short = len(short)
len_long = len(long)
long = long + '-' * len_short
short = '-' * len_long + short
short = list(short)
long = list(long)
highest = False
best_seq1 = ''
best_seq2 = ''
def score_fun(s1, s2, scoring_matrix):
score = 0
gap_penalty = -2
for base1, base2 in zip(s1, s2):
if base1 == '-' or base2 == '-':
score += gap_penalty
else:
score += scoring_matrix[base1 + base2]
print(''.join(s1), ''.join(s2), score, sep = '\n')
return score
for i in range(iters - 1):
score = score_fun(long, short, matrix)
if long[-1] == '-' and short[0] == '-':
del short[0]
del long[-1]
score = score_fun(long, short, matrix)
elif long[-1] != '-' and short[0] == '-':
short.append('-')
del short[0]
score = score_fun(long, short, matrix)
else:
long.insert(0, '-')
short.append('-')
score = score_fun(long, short, matrix)
if highest == False:
highest = score
if score > highest:
best_seq1 = ''.join(long)
best_seq2 = ''.join(short)
highest = score
print(highest)
comp = ''
for base1, base2 in zip(best_seq1, best_seq2):
if base1 == base2:
comp += '|'
else:
comp += ' '
print(best_seq1, comp, best_seq2,sep = '\n')
print('The best alignment score is:', highest)
| true | true |
f72e8dcfb6a1d538af304734c158aabf6a2827cf | 5,580 | py | Python | keras/ltlib/evaluation.py | cambridgeltl/cancer-hallmark-cnn | a1aba55ba425aa0deac4f80c97572a146e4097bb | [
"MIT"
] | 7 | 2017-07-15T08:45:06.000Z | 2021-06-28T14:09:25.000Z | keras/ltlib/evaluation.py | cambridgeltl/cancer-hallmark-cnn | a1aba55ba425aa0deac4f80c97572a146e4097bb | [
"MIT"
] | null | null | null | keras/ltlib/evaluation.py | cambridgeltl/cancer-hallmark-cnn | a1aba55ba425aa0deac4f80c97572a146e4097bb | [
"MIT"
] | 4 | 2018-11-27T06:41:29.000Z | 2022-03-19T11:06:43.000Z | import numpy as np # TODO remove dependency
from collections import namedtuple
from itertools import chain
from sklearn import metrics as skmetrics
from util import unique
from logging import warn
BinaryClassificationCounts = namedtuple('BinaryClassificationCounts',
'tp tn fp fn')
BinaryClassificationMetrics = namedtuple('BinaryClassificationMetrics',
'tp tn fp fn acc prec rec fscore')
PrCurvePoint = namedtuple('PrCurvePoint', 'prec rec fscore threshold')
def accuracy(gold, pred):
if len(gold) != len(pred):
raise ValueError('count mismatch')
correct = sum(int(g == p) for g, p in zip(gold, pred))
return 1.*correct/len(gold)
def tp_tn_fp_fn(gold, pred):
"""Return (TP, FN, FP, FN) counts for gold and prediced values.
Assumes that 0 is negative and all others positive.
"""
tp, tn, fp, fn = 0, 0, 0, 0
for g, p in zip(gold, pred):
if g == p:
if g == 0:
tn += 1
else:
tp += 1
else:
if g == 0:
fp += 1
else:
fn += 1
return BinaryClassificationCounts(tp, tn, fp, fn)
def precision_recall_fscore(tp, fp, fn):
"""Return (precision, recall, f-score) for given counts."""
prec = 0.0 if tp + fp == 0 else 1.*tp / (tp + fp)
rec = 0.0 if tp + fn == 0 else 1.*tp / (tp + fn)
f = 0.0 if prec + rec == 0.0 else 2 * prec * rec / (prec + rec)
return prec, rec, f
def evaluate_binary_classification(gold, pred, positive):
"""Evaluate binary classification performance.
Map labels in positive to 1 and others to 0.
Return BinaryClassificationMetrics.
"""
if len(gold) != len(pred):
raise ValueError('count mismatch')
gold = _binarize(gold, positive)
pred = _binarize(pred, positive)
if not any(i for i in gold):
warn('no positive gold labels for %s' % str(positive))
acc = accuracy(gold, pred)
tp, tn, fp, fn = tp_tn_fp_fn(gold, pred)
prec, rec, f = precision_recall_fscore(tp, fp, fn)
return BinaryClassificationMetrics(tp, tn, fp, fn, acc, prec, rec, f)
def _binarize(a, positive):
"""Return values mapped to 1 or 0.
Map values in positive to 1 and others to 0.
"""
return [1 if i in positive else 0 for i in a]
def average_precision_recall_fscore(results, micro=True):
"""Return average precision, recall and f-score for list of
BinaryClassificationMetrics.
"""
if micro:
total = BinaryClassificationMetrics(*tuple(np.sum(results, axis=0)))
return precision_recall_fscore(total.tp, total.fp, total.fn)
else:
avg = BinaryClassificationMetrics(*tuple(np.average(results, axis=0)))
return avg.prec, avg.rec, avg.fscore
def _positive_label(labels):
"""Return label representing the positive class or None if ambiguous."""
if set(labels) == set(['positive', 'negative']):
return 'positive'
elif set(labels) == set(['pos', 'neg']):
return 'pos'
else:
return None # TODO other alternatives
def is_binary_labeling(labels):
"""Return True iff given labels represent binary classification."""
return len(labels) == 2 and _positive_label(labels) is not None
def _binary_labels(dataitems):
gold = dataitems.target_strs
pred = dataitems.prediction_strs
labels = unique(chain(gold, pred))
return is_binary_labeling(labels)
def f1_score(prec, rec):
from math import isnan
if isnan(prec) or isnan(rec) or prec+rec == 0.0:
return float('nan')
else:
return 2*prec*rec/(prec+rec)
def max_f_point(dataitems):
"""Return PrCurvePoint with maximal f1 score."""
import logging
from sklearn.metrics import precision_recall_curve
y_true = np.argmax(dataitems.targets, axis=-1)
prob_neg = dataitems.predictions[:,0] # 1st column
prob_pos = dataitems.predictions[:,1] # 2nd column
pos_score = prob_pos - prob_neg
precs, recs, tholds = precision_recall_curve(y_true, pos_score)
max_f, max_point = float('-inf'), PrCurvePoint(None, None, None, None)
for p, r, t in zip(precs, recs, tholds):
f = f1_score(p, r)
if f > max_f:
max_f, max_point = f, PrCurvePoint(p, r, f, t)
return max_point
def evaluate_binary_labeling(dataitems):
gold = dataitems.target_strs
pred = dataitems.prediction_strs
labels = unique(chain(gold, pred))
pos = _positive_label(labels)
res = {}
res['acc'] = accuracy(gold, pred)
bcm = evaluate_binary_classification(gold, pred, pos)
res.update(bcm._asdict())
res['auc'] = skmetrics.roc_auc_score(dataitems.targets,
dataitems.predictions)
res['ap'] = skmetrics.average_precision_score(dataitems.targets,
dataitems.predictions)
maxfp = max_f_point(dataitems)
res.update({ 'maxf-{}'.format(k): v for k, v in maxfp._asdict().items() })
return res
def summarize_classification(results):
return (
'acc: {acc:.2%} auc: {auc:.2%} ap: {ap:.2%} ' +
'f: {fscore:.2%} (p:{prec:.1%} r:{rec:.1%} ' +
'tp:{tp} fp:{fp} fn:{fn}) ' +
'maxf: {maxf-fscore:.2%} (p:{maxf-prec:.1%} r:{maxf-rec:.1%} ' +
'th:{maxf-threshold:.2})'
).format(**results)
def evaluate_classification(dataitems):
if _binary_labels(dataitems):
return evaluate_binary_labeling(dataitems)
else:
raise NotImplementedError()
| 34.233129 | 78 | 0.623835 | import numpy as np
from collections import namedtuple
from itertools import chain
from sklearn import metrics as skmetrics
from util import unique
from logging import warn
BinaryClassificationCounts = namedtuple('BinaryClassificationCounts',
'tp tn fp fn')
BinaryClassificationMetrics = namedtuple('BinaryClassificationMetrics',
'tp tn fp fn acc prec rec fscore')
PrCurvePoint = namedtuple('PrCurvePoint', 'prec rec fscore threshold')
def accuracy(gold, pred):
if len(gold) != len(pred):
raise ValueError('count mismatch')
correct = sum(int(g == p) for g, p in zip(gold, pred))
return 1.*correct/len(gold)
def tp_tn_fp_fn(gold, pred):
tp, tn, fp, fn = 0, 0, 0, 0
for g, p in zip(gold, pred):
if g == p:
if g == 0:
tn += 1
else:
tp += 1
else:
if g == 0:
fp += 1
else:
fn += 1
return BinaryClassificationCounts(tp, tn, fp, fn)
def precision_recall_fscore(tp, fp, fn):
prec = 0.0 if tp + fp == 0 else 1.*tp / (tp + fp)
rec = 0.0 if tp + fn == 0 else 1.*tp / (tp + fn)
f = 0.0 if prec + rec == 0.0 else 2 * prec * rec / (prec + rec)
return prec, rec, f
def evaluate_binary_classification(gold, pred, positive):
if len(gold) != len(pred):
raise ValueError('count mismatch')
gold = _binarize(gold, positive)
pred = _binarize(pred, positive)
if not any(i for i in gold):
warn('no positive gold labels for %s' % str(positive))
acc = accuracy(gold, pred)
tp, tn, fp, fn = tp_tn_fp_fn(gold, pred)
prec, rec, f = precision_recall_fscore(tp, fp, fn)
return BinaryClassificationMetrics(tp, tn, fp, fn, acc, prec, rec, f)
def _binarize(a, positive):
return [1 if i in positive else 0 for i in a]
def average_precision_recall_fscore(results, micro=True):
if micro:
total = BinaryClassificationMetrics(*tuple(np.sum(results, axis=0)))
return precision_recall_fscore(total.tp, total.fp, total.fn)
else:
avg = BinaryClassificationMetrics(*tuple(np.average(results, axis=0)))
return avg.prec, avg.rec, avg.fscore
def _positive_label(labels):
if set(labels) == set(['positive', 'negative']):
return 'positive'
elif set(labels) == set(['pos', 'neg']):
return 'pos'
else:
return None
def is_binary_labeling(labels):
return len(labels) == 2 and _positive_label(labels) is not None
def _binary_labels(dataitems):
gold = dataitems.target_strs
pred = dataitems.prediction_strs
labels = unique(chain(gold, pred))
return is_binary_labeling(labels)
def f1_score(prec, rec):
from math import isnan
if isnan(prec) or isnan(rec) or prec+rec == 0.0:
return float('nan')
else:
return 2*prec*rec/(prec+rec)
def max_f_point(dataitems):
import logging
from sklearn.metrics import precision_recall_curve
y_true = np.argmax(dataitems.targets, axis=-1)
prob_neg = dataitems.predictions[:,0]
prob_pos = dataitems.predictions[:,1]
pos_score = prob_pos - prob_neg
precs, recs, tholds = precision_recall_curve(y_true, pos_score)
max_f, max_point = float('-inf'), PrCurvePoint(None, None, None, None)
for p, r, t in zip(precs, recs, tholds):
f = f1_score(p, r)
if f > max_f:
max_f, max_point = f, PrCurvePoint(p, r, f, t)
return max_point
def evaluate_binary_labeling(dataitems):
gold = dataitems.target_strs
pred = dataitems.prediction_strs
labels = unique(chain(gold, pred))
pos = _positive_label(labels)
res = {}
res['acc'] = accuracy(gold, pred)
bcm = evaluate_binary_classification(gold, pred, pos)
res.update(bcm._asdict())
res['auc'] = skmetrics.roc_auc_score(dataitems.targets,
dataitems.predictions)
res['ap'] = skmetrics.average_precision_score(dataitems.targets,
dataitems.predictions)
maxfp = max_f_point(dataitems)
res.update({ 'maxf-{}'.format(k): v for k, v in maxfp._asdict().items() })
return res
def summarize_classification(results):
return (
'acc: {acc:.2%} auc: {auc:.2%} ap: {ap:.2%} ' +
'f: {fscore:.2%} (p:{prec:.1%} r:{rec:.1%} ' +
'tp:{tp} fp:{fp} fn:{fn}) ' +
'maxf: {maxf-fscore:.2%} (p:{maxf-prec:.1%} r:{maxf-rec:.1%} ' +
'th:{maxf-threshold:.2})'
).format(**results)
def evaluate_classification(dataitems):
if _binary_labels(dataitems):
return evaluate_binary_labeling(dataitems)
else:
raise NotImplementedError()
| true | true |
f72e8e165d3d5f54741251e1676396f0a12b4e4f | 5,929 | py | Python | lian/ssh_deploy.py | catroll/lian | 405fd0c8c4ce8557609bf595431284a07e7b443e | [
"Apache-2.0"
] | null | null | null | lian/ssh_deploy.py | catroll/lian | 405fd0c8c4ce8557609bf595431284a07e7b443e | [
"Apache-2.0"
] | null | null | null | lian/ssh_deploy.py | catroll/lian | 405fd0c8c4ce8557609bf595431284a07e7b443e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import os
import re
import time
from collections import OrderedDict
LOG = logging.getLogger(__name__)
def md5sum_command(directory='.', find_type='f', match='', not_match=''):
return ' '.join([i for i in [
'find', directory,
('-type %s' % find_type) if find_type else '',
'-regextype posix-extended' if match or not_match else '',
('-regex %s' % match) if match else '',
('! -regex "%s"' % not_match) if not_match else '',
"""-print0 | xargs -0 md5sum | awk '{printf "%-50s %s\\n", $2, $1}' | sort"""
] if i])
def check_sum(chain, local_path, remote_path, *md5sum_args, **md5sum_kwargs):
title = re.sub('[^a-zA-Z0-9]', '-', local_path) + '.' + time.strftime('%Y%m%d-%H%I%S')
cmd_md5sum = md5sum_command(*md5sum_args, **md5sum_kwargs)
# ---------- get md5sum ----------
# locally
command = 'cd ' + local_path + '; ' + cmd_md5sum
LOG.info('local command: %s', command)
content = os.popen(command).read()
with open('/tmp/%s.a.txt' % title, 'w') as _file:
_file.write(content)
local_sums = OrderedDict((_file, _sum) for _file, _sum in [line.split() for line in content.splitlines()])
# remotely
command = 'cd ' + remote_path + '; ' + cmd_md5sum
LOG.info('remote command: %s', command)
code, out, err = chain.execute('cd ' + remote_path + '; ' + cmd_md5sum, buff_size=1024000)
out = out.decode('utf-8')
with open('/tmp/%s.b.txt' % title, 'w') as _file:
_file.write(out)
remote_sums = OrderedDict((_file, _sum) for _file, _sum in [line.split() for line in out.splitlines()])
# ---------- compare result ----------
LOG.info('*' * 50)
LOG.info('')
is_synced = True
for _file in local_sums:
if _file not in remote_sums:
is_synced = False
LOG.info(u'🐈 [LOCAL] ' + _file)
continue
if local_sums[_file] != remote_sums[_file]:
is_synced = False
LOG.info(u'🐍 [DIFF] ' + _file)
continue
# LOG.info('[SAME] ' + _file + ' ignore it')
for _file in remote_sums:
if _file not in local_sums:
is_synced = False
LOG.info(u'🐦 [REMOTE] ' + _file)
if is_synced:
LOG.info(u'㊗️ ㊗️ ㊗️ Perfect!!! ㊗️ ㊗️ ㊗️'.center(44))
LOG.info('')
LOG.info('*' * 50)
def sftp_download(chain, files_will_transferred):
for remote_path, local_path in files_will_transferred:
try:
chain.use().download(remote_path, local_path)
except Exception as error:
LOG.warning(error)
def download_files(chain, local_path, remote_path, files=None):
# download specified files
if not files:
LOG.debug('Download, but no file specified, over!')
return
move_tasks = [(os.path.join(remote_path, path), os.path.join(local_path, path)) for path in files]
sftp_download(chain, move_tasks)
def sftp_upload(chain, files_will_transferred):
""" SFTP upload
Args:
chain: object of SSHChain
files_will_transferred: list[tuple]
"""
LOG.info(files_will_transferred)
for local_path, remote_path in files_will_transferred:
chain.use().upload(local_path, remote_path)
def upload_files(chain, local_path, remote_path, files=None, ignore_patterns=None):
"""Upload local files or directory, can ignore some files by pattern
Args:
chain:
local_path:
remote_path:
files:
ignore_patterns:
"""
files = files or []
ignore_patterns = ignore_patterns or []
re_ignore = re.compile('(%s)' % (')|('.join(ignore_patterns))) if ignore_patterns else ''
move_tasks = []
for path in files:
fullpath = os.path.join(local_path, path)
if not os.path.exists(fullpath):
LOG.error('The file need uploaded not found: %s', fullpath)
exit()
if os.path.isfile(fullpath):
move_tasks.append((fullpath, os.path.join(remote_path, path)))
continue
assert os.path.isdir(fullpath)
for root, dirs, _files in os.walk(fullpath):
for _file in _files:
_fullpath = os.path.join(root, _file)
if re_ignore and re_ignore.search(_fullpath):
continue
relpath = os.path.relpath(_fullpath, local_path)
move_tasks.append((_fullpath, os.path.join(remote_path, relpath)))
sftp_upload(chain, move_tasks)
def file_sync(chain, local_path, remote_path,
files_upload=None, ignore_patterns=None, # upload arguments
files_download=None): # download arguments
if files_download:
download_files(chain, local_path, remote_path, files_download)
if files_upload:
upload_files(chain, local_path, remote_path, files_upload, ignore_patterns)
ACTIONS = 'check', 'sync', 'all',
def main(chain, local_path, remote_path, action='check',
files_upload=None, ignore_patterns=None, files_download=None,
*md5sum_args, **md5sum_kwargs):
"""
Args:
chain: object of SSHChain
local_path: str, absolute path
remote_path: str, absolute path
action: str
files_upload: list of files to upload
ignore_patterns
files_download: list of files to download
md5sum_args:
md5sum_kwargs: like: directory='.', find_type='f', match='', not_match=''
"""
if action not in ACTIONS:
return
def _file_sync():
file_sync(chain, local_path, remote_path, files_upload, ignore_patterns, files_download)
def _check_sum():
check_sum(chain, local_path, remote_path, *md5sum_args, **md5sum_kwargs)
if action == 'sync':
_file_sync()
return
if action == 'check':
_check_sum()
return
_file_sync()
_check_sum()
| 31.041885 | 110 | 0.608366 |
import logging
import os
import re
import time
from collections import OrderedDict
LOG = logging.getLogger(__name__)
def md5sum_command(directory='.', find_type='f', match='', not_match=''):
return ' '.join([i for i in [
'find', directory,
('-type %s' % find_type) if find_type else '',
'-regextype posix-extended' if match or not_match else '',
('-regex %s' % match) if match else '',
('! -regex "%s"' % not_match) if not_match else '',
"""-print0 | xargs -0 md5sum | awk '{printf "%-50s %s\\n", $2, $1}' | sort"""
] if i])
def check_sum(chain, local_path, remote_path, *md5sum_args, **md5sum_kwargs):
title = re.sub('[^a-zA-Z0-9]', '-', local_path) + '.' + time.strftime('%Y%m%d-%H%I%S')
cmd_md5sum = md5sum_command(*md5sum_args, **md5sum_kwargs)
command = 'cd ' + local_path + '; ' + cmd_md5sum
LOG.info('local command: %s', command)
content = os.popen(command).read()
with open('/tmp/%s.a.txt' % title, 'w') as _file:
_file.write(content)
local_sums = OrderedDict((_file, _sum) for _file, _sum in [line.split() for line in content.splitlines()])
command = 'cd ' + remote_path + '; ' + cmd_md5sum
LOG.info('remote command: %s', command)
code, out, err = chain.execute('cd ' + remote_path + '; ' + cmd_md5sum, buff_size=1024000)
out = out.decode('utf-8')
with open('/tmp/%s.b.txt' % title, 'w') as _file:
_file.write(out)
remote_sums = OrderedDict((_file, _sum) for _file, _sum in [line.split() for line in out.splitlines()])
LOG.info('*' * 50)
LOG.info('')
is_synced = True
for _file in local_sums:
if _file not in remote_sums:
is_synced = False
LOG.info(u'🐈 [LOCAL] ' + _file)
continue
if local_sums[_file] != remote_sums[_file]:
is_synced = False
LOG.info(u'🐍 [DIFF] ' + _file)
continue
for _file in remote_sums:
if _file not in local_sums:
is_synced = False
LOG.info(u'🐦 [REMOTE] ' + _file)
if is_synced:
LOG.info(u'㊗️ ㊗️ ㊗️ Perfect!!! ㊗️ ㊗️ ㊗️'.center(44))
LOG.info('')
LOG.info('*' * 50)
def sftp_download(chain, files_will_transferred):
for remote_path, local_path in files_will_transferred:
try:
chain.use().download(remote_path, local_path)
except Exception as error:
LOG.warning(error)
def download_files(chain, local_path, remote_path, files=None):
if not files:
LOG.debug('Download, but no file specified, over!')
return
move_tasks = [(os.path.join(remote_path, path), os.path.join(local_path, path)) for path in files]
sftp_download(chain, move_tasks)
def sftp_upload(chain, files_will_transferred):
LOG.info(files_will_transferred)
for local_path, remote_path in files_will_transferred:
chain.use().upload(local_path, remote_path)
def upload_files(chain, local_path, remote_path, files=None, ignore_patterns=None):
files = files or []
ignore_patterns = ignore_patterns or []
re_ignore = re.compile('(%s)' % (')|('.join(ignore_patterns))) if ignore_patterns else ''
move_tasks = []
for path in files:
fullpath = os.path.join(local_path, path)
if not os.path.exists(fullpath):
LOG.error('The file need uploaded not found: %s', fullpath)
exit()
if os.path.isfile(fullpath):
move_tasks.append((fullpath, os.path.join(remote_path, path)))
continue
assert os.path.isdir(fullpath)
for root, dirs, _files in os.walk(fullpath):
for _file in _files:
_fullpath = os.path.join(root, _file)
if re_ignore and re_ignore.search(_fullpath):
continue
relpath = os.path.relpath(_fullpath, local_path)
move_tasks.append((_fullpath, os.path.join(remote_path, relpath)))
sftp_upload(chain, move_tasks)
def file_sync(chain, local_path, remote_path,
files_upload=None, ignore_patterns=None,
files_download=None):
if files_download:
download_files(chain, local_path, remote_path, files_download)
if files_upload:
upload_files(chain, local_path, remote_path, files_upload, ignore_patterns)
ACTIONS = 'check', 'sync', 'all',
def main(chain, local_path, remote_path, action='check',
files_upload=None, ignore_patterns=None, files_download=None,
*md5sum_args, **md5sum_kwargs):
if action not in ACTIONS:
return
def _file_sync():
file_sync(chain, local_path, remote_path, files_upload, ignore_patterns, files_download)
def _check_sum():
check_sum(chain, local_path, remote_path, *md5sum_args, **md5sum_kwargs)
if action == 'sync':
_file_sync()
return
if action == 'check':
_check_sum()
return
_file_sync()
_check_sum()
| true | true |
f72e8e927897fafefe42d0edd70415afe4ce8502 | 2,987 | py | Python | mailparser/const.py | nitishkansal/mail-parser | d10e6dbec5c37ca740f5ef2edc70eba2c05fdaf5 | [
"Apache-2.0"
] | 1 | 2021-12-04T14:57:49.000Z | 2021-12-04T14:57:49.000Z | mailparser/const.py | KonstantinKlepikov/mail-parser | c90cb1cd72fcf986fa013acbbc1277b7b6fa2e84 | [
"Apache-2.0"
] | 48 | 2020-02-17T07:43:41.000Z | 2021-08-02T05:42:12.000Z | mailparser/const.py | KonstantinKlepikov/mail-parser | c90cb1cd72fcf986fa013acbbc1277b7b6fa2e84 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Fedele Mantuano (https://twitter.com/fedelemantuano)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
REGXIP = re.compile(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
JUNK_PATTERN = r'[ \(\)\[\]\t\n]+'
# Patterns for receiveds
RECEIVED_PATTERNS = [
# each pattern handles matching a single clause
# need to exclude withs followed by cipher (e.g., google); (?! cipher)
# TODO: ideally would do negative matching for with in parens
# need the beginning or space to differentiate from envelope-from
(
r'(?:(?:^|\s)from\s+(?P<from>.+?)(?:\s*[(]?'
r'envelope-from|\s*[(]?envelope-sender|\s+'
r'by|\s+with(?! cipher)|\s+id|\s+for|\s+via|;))'
),
# need to make sure envelope-from comes before from to prevent mismatches
# envelope-from and -sender seem to optionally have space and/or
# ( before them other clauses must have whitespace before
(
r'(?:by\s+(?P<by>.+?)(?:\s*[(]?envelope-from|\s*'
r'[(]?envelope-sender|\s+from|\s+with'
r'(?! cipher)|\s+id|\s+for|\s+via|;))'
),
(
r'(?:with(?! cipher)\s+(?P<with>.+?)(?:\s*[(]?envelope-from|\s*[(]?'
r'envelope-sender|\s+from|\s+by|\s+id|\s+for|\s+via|;))'
),
(
r'[^\w](?:id\s+(?P<id>.+?)(?:\s*[(]?envelope-from|\s*'
r'[(]?envelope-sender|\s+from|\s+by|\s+with'
r'(?! cipher)|\s+for|\s+via|;))'
),
(
r'(?:for\s+(?P<for>.+?)(?:\s*[(]?envelope-from|\s*[(]?'
r'envelope-sender|\s+from|\s+by|\s+with'
r'(?! cipher)|\s+id|\s+via|;))'
),
(
r'(?:via\s+(?P<via>.+?)(?:\s*[(]?'
r'envelope-from|\s*[(]?envelope-sender|\s+'
r'from|\s+by|\s+id|\s+for|\s+with(?! cipher)|;))'
),
# assumes emails are always inside <>
r'(?:envelope-from\s+<(?P<envelope_from>.+?)>)',
r'(?:envelope-sender\s+<(?P<envelope_sender>.+?)>)',
# datetime comes after ; at the end
r';\s*(?P<date>.*)'
]
RECEIVED_COMPILED_LIST = [
re.compile(i, re.I | re.DOTALL) for i in RECEIVED_PATTERNS]
EPILOGUE_DEFECTS = {"StartBoundaryNotFoundDefect"}
ADDRESSES_HEADERS = set([
"bcc",
"cc",
"delivered-to",
"from",
"reply-to",
"to"])
# These parts are always returned
OTHERS_PARTS = set([
"attachments",
"body",
"date",
"message-id",
"received",
"subject",
"timezone",
"to_domains",
"user-agent",
"x-mailer",
"x-original-to",
])
| 28.721154 | 77 | 0.579846 |
import re
REGXIP = re.compile(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
JUNK_PATTERN = r'[ \(\)\[\]\t\n]+'
RECEIVED_PATTERNS = [
(
r'(?:(?:^|\s)from\s+(?P<from>.+?)(?:\s*[(]?'
r'envelope-from|\s*[(]?envelope-sender|\s+'
r'by|\s+with(?! cipher)|\s+id|\s+for|\s+via|;))'
),
(
r'(?:by\s+(?P<by>.+?)(?:\s*[(]?envelope-from|\s*'
r'[(]?envelope-sender|\s+from|\s+with'
r'(?! cipher)|\s+id|\s+for|\s+via|;))'
),
(
r'(?:with(?! cipher)\s+(?P<with>.+?)(?:\s*[(]?envelope-from|\s*[(]?'
r'envelope-sender|\s+from|\s+by|\s+id|\s+for|\s+via|;))'
),
(
r'[^\w](?:id\s+(?P<id>.+?)(?:\s*[(]?envelope-from|\s*'
r'[(]?envelope-sender|\s+from|\s+by|\s+with'
r'(?! cipher)|\s+for|\s+via|;))'
),
(
r'(?:for\s+(?P<for>.+?)(?:\s*[(]?envelope-from|\s*[(]?'
r'envelope-sender|\s+from|\s+by|\s+with'
r'(?! cipher)|\s+id|\s+via|;))'
),
(
r'(?:via\s+(?P<via>.+?)(?:\s*[(]?'
r'envelope-from|\s*[(]?envelope-sender|\s+'
r'from|\s+by|\s+id|\s+for|\s+with(?! cipher)|;))'
),
r'(?:envelope-from\s+<(?P<envelope_from>.+?)>)',
r'(?:envelope-sender\s+<(?P<envelope_sender>.+?)>)',
r';\s*(?P<date>.*)'
]
RECEIVED_COMPILED_LIST = [
re.compile(i, re.I | re.DOTALL) for i in RECEIVED_PATTERNS]
EPILOGUE_DEFECTS = {"StartBoundaryNotFoundDefect"}
ADDRESSES_HEADERS = set([
"bcc",
"cc",
"delivered-to",
"from",
"reply-to",
"to"])
OTHERS_PARTS = set([
"attachments",
"body",
"date",
"message-id",
"received",
"subject",
"timezone",
"to_domains",
"user-agent",
"x-mailer",
"x-original-to",
])
| true | true |
f72e9050f2607486b7faa4d1c4b1e9b2a220fdb9 | 360 | py | Python | yeelight/__init__.py | kmohrf/python-yeelight | 483019c074556b4c3d2f665398f0fc308afd6274 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | yeelight/__init__.py | kmohrf/python-yeelight | 483019c074556b4c3d2f665398f0fc308afd6274 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | yeelight/__init__.py | kmohrf/python-yeelight | 483019c074556b4c3d2f665398f0fc308afd6274 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # flake8: noqa
"""A Python library for controlling YeeLight RGB bulbs."""
from yeelight.enums import BulbType, CronType, LightType, PowerMode, SceneClass
from yeelight.flow import Flow, HSVTransition, RGBTransition, SleepTransition, TemperatureTransition
from yeelight.main import Bulb, BulbException, discover_bulbs
from yeelight.version import __version__
| 40 | 100 | 0.827778 |
from yeelight.enums import BulbType, CronType, LightType, PowerMode, SceneClass
from yeelight.flow import Flow, HSVTransition, RGBTransition, SleepTransition, TemperatureTransition
from yeelight.main import Bulb, BulbException, discover_bulbs
from yeelight.version import __version__
| true | true |
f72e9115ce15721094d17e92eb46738e05716f2a | 2,138 | py | Python | keras/utils/np_utils.py | Jallet/keras-jl-ac-mean | 2bbc1596192fb8c3aefc4a8126482a5283574a59 | [
"MIT"
] | 1 | 2016-12-15T07:20:42.000Z | 2016-12-15T07:20:42.000Z | keras/utils/np_utils.py | Jallet/keras-jl-ac-mean | 2bbc1596192fb8c3aefc4a8126482a5283574a59 | [
"MIT"
] | null | null | null | keras/utils/np_utils.py | Jallet/keras-jl-ac-mean | 2bbc1596192fb8c3aefc4a8126482a5283574a59 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import numpy as np
import scipy as sp
from six.moves import range
from six.moves import zip
def to_categorical(y, nb_classes=None):
'''Convert class vector (integers from 0 to nb_classes)
to binary class matrix, for use with categorical_crossentropy.
'''
if not nb_classes:
nb_classes = np.max(y)+1
Y = np.zeros((len(y), nb_classes))
for i in range(len(y)):
Y[i, y[i]] = 1.
return Y
def normalize(a, axis=-1, order=2):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[l2 == 0] = 1
return a / np.expand_dims(l2, axis)
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
def multiclass_logloss(P, Y):
npreds = [P[i][Y[i]-1] for i in range(len(Y))]
score = -(1. / len(Y)) * np.sum(np.log(npreds))
return score
def accuracy(p, y):
return np.mean([a == b for a, b in zip(p, y)])
def probas_to_classes(y_pred):
if len(y_pred.shape) > 1 and y_pred.shape[1] > 1:
return categorical_probas_to_classes(y_pred)
return np.array([1 if p > 0.5 else 0 for p in y_pred])
def categorical_probas_to_classes(p):
return np.argmax(p, axis=1)
def convert_kernel(kernel, dim_ordering='th'):
'''Converts a kernel matrix (numpy array)
from Theano format to TensorFlow format
(or reciprocally, since the transformation
is its own inverse).
'''
new_kernel = np.copy(kernel)
if dim_ordering == 'th':
w = kernel.shape[2]
h = kernel.shape[3]
for i in range(w):
for j in range(h):
new_kernel[:, :, i, j] = kernel[:, :, w - i - 1, h - j - 1]
elif dim_ordering == 'tf':
w = kernel.shape[0]
h = kernel.shape[1]
for i in range(w):
for j in range(h):
new_kernel[i, j, :, :] = kernel[w - i - 1, h - j - 1, :, :]
else:
raise Exception('Invalid dim_ordering: ' + str(dim_ordering))
return new_kernel
| 27.766234 | 76 | 0.596819 | from __future__ import absolute_import
import numpy as np
import scipy as sp
from six.moves import range
from six.moves import zip
def to_categorical(y, nb_classes=None):
if not nb_classes:
nb_classes = np.max(y)+1
Y = np.zeros((len(y), nb_classes))
for i in range(len(y)):
Y[i, y[i]] = 1.
return Y
def normalize(a, axis=-1, order=2):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[l2 == 0] = 1
return a / np.expand_dims(l2, axis)
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
def multiclass_logloss(P, Y):
npreds = [P[i][Y[i]-1] for i in range(len(Y))]
score = -(1. / len(Y)) * np.sum(np.log(npreds))
return score
def accuracy(p, y):
return np.mean([a == b for a, b in zip(p, y)])
def probas_to_classes(y_pred):
if len(y_pred.shape) > 1 and y_pred.shape[1] > 1:
return categorical_probas_to_classes(y_pred)
return np.array([1 if p > 0.5 else 0 for p in y_pred])
def categorical_probas_to_classes(p):
return np.argmax(p, axis=1)
def convert_kernel(kernel, dim_ordering='th'):
new_kernel = np.copy(kernel)
if dim_ordering == 'th':
w = kernel.shape[2]
h = kernel.shape[3]
for i in range(w):
for j in range(h):
new_kernel[:, :, i, j] = kernel[:, :, w - i - 1, h - j - 1]
elif dim_ordering == 'tf':
w = kernel.shape[0]
h = kernel.shape[1]
for i in range(w):
for j in range(h):
new_kernel[i, j, :, :] = kernel[w - i - 1, h - j - 1, :, :]
else:
raise Exception('Invalid dim_ordering: ' + str(dim_ordering))
return new_kernel
| true | true |
f72e9130a037400e6880119aa046276237c3a43a | 827 | py | Python | conman/routes/migrations/0003_add_validators.py | meshy/django-conman | c739d09250d02d99068358e925ed8298a2a37a75 | [
"BSD-2-Clause"
] | null | null | null | conman/routes/migrations/0003_add_validators.py | meshy/django-conman | c739d09250d02d99068358e925ed8298a2a37a75 | [
"BSD-2-Clause"
] | 81 | 2015-07-27T23:21:49.000Z | 2018-05-21T22:06:09.000Z | conman/routes/migrations/0003_add_validators.py | meshy/django-conman | c739d09250d02d99068358e925ed8298a2a37a75 | [
"BSD-2-Clause"
] | 2 | 2015-10-06T09:18:06.000Z | 2017-03-17T08:51:56.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import conman.routes.validators
class Migration(migrations.Migration):
dependencies = [
('routes', '0002_remove_slug_parent'),
]
operations = [
migrations.AlterField(
model_name='route',
name='url',
field=models.TextField(db_index=True, help_text='The operative URL for this Route.', validators=[conman.routes.validators.validate_end_in_slash, conman.routes.validators.validate_start_in_slash, conman.routes.validators.validate_no_dotty_subpaths, conman.routes.validators.validate_no_double_slashes, conman.routes.validators.validate_no_hash_symbol, conman.routes.validators.validate_no_questionmark], unique=True, verbose_name='URL'),
),
]
| 39.380952 | 448 | 0.740024 |
from __future__ import unicode_literals
from django.db import models, migrations
import conman.routes.validators
class Migration(migrations.Migration):
dependencies = [
('routes', '0002_remove_slug_parent'),
]
operations = [
migrations.AlterField(
model_name='route',
name='url',
field=models.TextField(db_index=True, help_text='The operative URL for this Route.', validators=[conman.routes.validators.validate_end_in_slash, conman.routes.validators.validate_start_in_slash, conman.routes.validators.validate_no_dotty_subpaths, conman.routes.validators.validate_no_double_slashes, conman.routes.validators.validate_no_hash_symbol, conman.routes.validators.validate_no_questionmark], unique=True, verbose_name='URL'),
),
]
| true | true |
f72e91674e57314a4df5f2282bf245e700c13ea3 | 1,852 | py | Python | pycs/character.py | dwagon/pycs | 4d02acbf380526d3bf0380f6bb8b757a827024b8 | [
"MIT"
] | null | null | null | pycs/character.py | dwagon/pycs | 4d02acbf380526d3bf0380f6bb8b757a827024b8 | [
"MIT"
] | null | null | null | pycs/character.py | dwagon/pycs | 4d02acbf380526d3bf0380f6bb8b757a827024b8 | [
"MIT"
] | null | null | null | """ Base Player character """
from pycs.creature import Creature
from pycs.spell import SpellAction
from pycs.races import Human
from pycs.util import check_args
from pycs.constant import Condition
from pycs.constant import DamageType
##############################################################################
class Character(Creature):
"""Base character class"""
def __init__(self, **kwargs):
check_args(self._valid_args(), self.__class__.__name__, kwargs)
self.level = kwargs.get("level", 1)
self.race = kwargs.get("race", Human)()
self.race.owner = self
if "prof_bonus" not in kwargs:
profb = int((kwargs.get("level", 1) - 1) / 4) + 2
kwargs.update({"prof_bonus": profb})
super().__init__(**kwargs)
##########################################################################
def _valid_args(self):
"""What is valid in this class for kwargs"""
return super()._valid_args() | {"level", "race"}
##########################################################################
def shortrepr(self):
"""Arena repr"""
##########################################################################
def spell_actions(self):
"""Return a list of actions that are spells"""
return [_ for _ in self.actions if issubclass(_.__class__, SpellAction)]
##########################################################################
def creature_fallen_unconscious(
self, dmg: int, dmg_type: DamageType, critical: bool
) -> None:
"""Character has fallen unconscious"""
self.hp = 0
if self.has_condition(Condition.UNCONSCIOUS):
return
self.remove_concentration()
self.add_condition(Condition.UNCONSCIOUS)
self.remove_condition(Condition.OK)
# EOF
| 35.615385 | 80 | 0.50432 | from pycs.creature import Creature
from pycs.spell import SpellAction
from pycs.races import Human
from pycs.util import check_args
from pycs.constant import Condition
from pycs.constant import DamageType
| true | true |
f72e91692ce19b47db39e089f77ace71680d7ddd | 1,089 | py | Python | sdk/python/pulumi_azure_native/storsimple/v20161001/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storsimple/v20161001/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storsimple/v20161001/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .access_control_record import *
from .backup_schedule_group import *
from .chap_setting import *
from .file_server import *
from .file_share import *
from .get_access_control_record import *
from .get_backup_schedule_group import *
from .get_chap_setting import *
from .get_file_server import *
from .get_file_share import *
from .get_iscsi_disk import *
from .get_iscsi_server import *
from .get_manager import *
from .get_manager_encryption_key import *
from .get_manager_extended_info import *
from .get_storage_account_credential import *
from .get_storage_domain import *
from .iscsi_disk import *
from .iscsi_server import *
from .manager import *
from .manager_extended_info import *
from .storage_account_credential import *
from .storage_domain import *
from ._inputs import *
from . import outputs
| 32.029412 | 80 | 0.790634 |
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .access_control_record import *
from .backup_schedule_group import *
from .chap_setting import *
from .file_server import *
from .file_share import *
from .get_access_control_record import *
from .get_backup_schedule_group import *
from .get_chap_setting import *
from .get_file_server import *
from .get_file_share import *
from .get_iscsi_disk import *
from .get_iscsi_server import *
from .get_manager import *
from .get_manager_encryption_key import *
from .get_manager_extended_info import *
from .get_storage_account_credential import *
from .get_storage_domain import *
from .iscsi_disk import *
from .iscsi_server import *
from .manager import *
from .manager_extended_info import *
from .storage_account_credential import *
from .storage_domain import *
from ._inputs import *
from . import outputs
| true | true |
f72e91c889dbea08c3f350b640e6e04270a313e4 | 1,951 | py | Python | data/logs_model/__init__.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | 1 | 2020-10-16T19:30:41.000Z | 2020-10-16T19:30:41.000Z | data/logs_model/__init__.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | 15 | 2020-06-18T15:32:06.000Z | 2022-03-03T23:06:24.000Z | data/logs_model/__init__.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | import logging
from data.logs_model.table_logs_model import TableLogsModel
from data.logs_model.document_logs_model import DocumentLogsModel
from data.logs_model.combined_model import CombinedLogsModel
logger = logging.getLogger(__name__)
def _transition_model(*args, **kwargs):
return CombinedLogsModel(DocumentLogsModel(*args, **kwargs), TableLogsModel(*args, **kwargs),)
_LOG_MODELS = {
"database": TableLogsModel,
"transition_reads_both_writes_es": _transition_model,
"elasticsearch": DocumentLogsModel,
}
_PULL_LOG_KINDS = {"pull_repo", "repo_verb"}
class LogsModelProxy(object):
def __init__(self):
self._model = None
def initialize(self, model):
self._model = model
logger.info("===============================")
logger.info("Using logs model `%s`", self._model)
logger.info("===============================")
def __getattr__(self, attr):
if not self._model:
raise AttributeError("LogsModelProxy is not initialized")
return getattr(self._model, attr)
logs_model = LogsModelProxy()
def configure(app_config):
logger.debug("Configuring log lodel")
model_name = app_config.get("LOGS_MODEL", "database")
model_config = app_config.get("LOGS_MODEL_CONFIG", {})
def should_skip_logging(kind_name, namespace_name, is_free_namespace):
if namespace_name and namespace_name in app_config.get("DISABLED_FOR_AUDIT_LOGS", {}):
return True
if kind_name in _PULL_LOG_KINDS:
if namespace_name and namespace_name in app_config.get("DISABLED_FOR_PULL_LOGS", {}):
return True
if app_config.get("FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES"):
if is_free_namespace:
return True
return False
model_config["should_skip_logging"] = should_skip_logging
logs_model.initialize(_LOG_MODELS[model_name](**model_config))
| 30.968254 | 98 | 0.682727 | import logging
from data.logs_model.table_logs_model import TableLogsModel
from data.logs_model.document_logs_model import DocumentLogsModel
from data.logs_model.combined_model import CombinedLogsModel
logger = logging.getLogger(__name__)
def _transition_model(*args, **kwargs):
return CombinedLogsModel(DocumentLogsModel(*args, **kwargs), TableLogsModel(*args, **kwargs),)
_LOG_MODELS = {
"database": TableLogsModel,
"transition_reads_both_writes_es": _transition_model,
"elasticsearch": DocumentLogsModel,
}
_PULL_LOG_KINDS = {"pull_repo", "repo_verb"}
class LogsModelProxy(object):
def __init__(self):
self._model = None
def initialize(self, model):
self._model = model
logger.info("===============================")
logger.info("Using logs model `%s`", self._model)
logger.info("===============================")
def __getattr__(self, attr):
if not self._model:
raise AttributeError("LogsModelProxy is not initialized")
return getattr(self._model, attr)
logs_model = LogsModelProxy()
def configure(app_config):
logger.debug("Configuring log lodel")
model_name = app_config.get("LOGS_MODEL", "database")
model_config = app_config.get("LOGS_MODEL_CONFIG", {})
def should_skip_logging(kind_name, namespace_name, is_free_namespace):
if namespace_name and namespace_name in app_config.get("DISABLED_FOR_AUDIT_LOGS", {}):
return True
if kind_name in _PULL_LOG_KINDS:
if namespace_name and namespace_name in app_config.get("DISABLED_FOR_PULL_LOGS", {}):
return True
if app_config.get("FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES"):
if is_free_namespace:
return True
return False
model_config["should_skip_logging"] = should_skip_logging
logs_model.initialize(_LOG_MODELS[model_name](**model_config))
| true | true |
f72e91dd9b9f3ef8e8276dee26fe5cfef7d818db | 1,160 | py | Python | nornir/failed_tasks/partial_fail.py | twin-bridges/pynet-ons | f3abe14a6760bddea2addab75e6d87d5f5454b7b | [
"Apache-2.0"
] | 1 | 2021-01-11T23:17:26.000Z | 2021-01-11T23:17:26.000Z | nornir/failed_tasks/partial_fail.py | twin-bridges/pynet-ons | f3abe14a6760bddea2addab75e6d87d5f5454b7b | [
"Apache-2.0"
] | null | null | null | nornir/failed_tasks/partial_fail.py | twin-bridges/pynet-ons | f3abe14a6760bddea2addab75e6d87d5f5454b7b | [
"Apache-2.0"
] | 7 | 2020-07-21T17:15:08.000Z | 2021-12-14T01:13:56.000Z | from nornir import InitNornir
from nornir.plugins.tasks.networking import netmiko_send_command
from nornir.plugins.functions.text import print_result
def failed_task(task):
print()
print("-" * 60)
print(f"This is a host that earlier failed: {task.host.name}")
print("-" * 60)
print()
if __name__ == "__main__":
import ipdb
ipdb.set_trace()
nr = InitNornir(config_file="config2.yaml")
aggr_result = nr.run(task=netmiko_send_command, command_string="show configuration")
print(aggr_result.failed)
print(aggr_result.failed_hosts.keys())
# Run second task on only successful hosts
aggr_result = nr.run(task=netmiko_send_command, command_string="show arp")
print_result(aggr_result)
# Run a task on the failed hosts
aggr_result = nr.run(task=failed_task, on_failed=True, on_good=False)
# Recover specific host
print(f"Failed Hosts: {nr.data.failed_hosts}")
nr.data.recover_host("vmx2")
# Reset failed hosts
print(f"Failed Hosts: {nr.data.failed_hosts}")
print("Reset failed hosts")
nr.data.reset_failed_hosts()
print(f"Failed Hosts: {nr.data.failed_hosts}")
| 28.292683 | 88 | 0.711207 | from nornir import InitNornir
from nornir.plugins.tasks.networking import netmiko_send_command
from nornir.plugins.functions.text import print_result
def failed_task(task):
print()
print("-" * 60)
print(f"This is a host that earlier failed: {task.host.name}")
print("-" * 60)
print()
if __name__ == "__main__":
import ipdb
ipdb.set_trace()
nr = InitNornir(config_file="config2.yaml")
aggr_result = nr.run(task=netmiko_send_command, command_string="show configuration")
print(aggr_result.failed)
print(aggr_result.failed_hosts.keys())
aggr_result = nr.run(task=netmiko_send_command, command_string="show arp")
print_result(aggr_result)
aggr_result = nr.run(task=failed_task, on_failed=True, on_good=False)
print(f"Failed Hosts: {nr.data.failed_hosts}")
nr.data.recover_host("vmx2")
print(f"Failed Hosts: {nr.data.failed_hosts}")
print("Reset failed hosts")
nr.data.reset_failed_hosts()
print(f"Failed Hosts: {nr.data.failed_hosts}")
| true | true |
f72e93a0f4fd80901bd47d30bfa9f5cea0f21a98 | 663 | py | Python | schematic/utils/__init__.py | nf-osi/schematic | b59856f40c613a43d117fe3fafa2ca5ba5bbf8d6 | [
"MIT"
] | null | null | null | schematic/utils/__init__.py | nf-osi/schematic | b59856f40c613a43d117fe3fafa2ca5ba5bbf8d6 | [
"MIT"
] | 6 | 2020-10-08T19:53:47.000Z | 2021-05-07T14:50:39.000Z | schematic/utils/__init__.py | nf-osi/schematic | b59856f40c613a43d117fe3fafa2ca5ba5bbf8d6 | [
"MIT"
] | null | null | null | from schematic.utils.curie_utils import expand_curie_to_uri, expand_curies_in_schema, extract_name_from_uri_or_curie, uri2label
from schematic.utils.df_utils import update_df
from schematic.utils.general import dict2list, find_duplicates, str2list, unlist
from schematic.utils.google_api_utils import download_creds_file, execute_google_api_requests
from schematic.utils.io_utils import export_json, load_default, load_json, load_schemaorg
from schematic.utils.schema_utils import load_schema_into_networkx
from schematic.utils.validate_utils import validate_class_schema, validate_property_schema, validate_schema
from schematic.utils.viz_utils import visualize
| 73.666667 | 127 | 0.891403 | from schematic.utils.curie_utils import expand_curie_to_uri, expand_curies_in_schema, extract_name_from_uri_or_curie, uri2label
from schematic.utils.df_utils import update_df
from schematic.utils.general import dict2list, find_duplicates, str2list, unlist
from schematic.utils.google_api_utils import download_creds_file, execute_google_api_requests
from schematic.utils.io_utils import export_json, load_default, load_json, load_schemaorg
from schematic.utils.schema_utils import load_schema_into_networkx
from schematic.utils.validate_utils import validate_class_schema, validate_property_schema, validate_schema
from schematic.utils.viz_utils import visualize
| true | true |
f72e93d2aa15998810aa737c1e59d2250453be8f | 2,173 | py | Python | os_ken/lib/sockopt.py | faucetsdn/python3-os-ken | 31037f6388b7885c859391802451b867c30f1694 | [
"Apache-2.0"
] | 4 | 2018-10-25T08:42:56.000Z | 2019-04-24T04:01:26.000Z | os_ken/lib/sockopt.py | anlaneg/os-ken | 379a7694c3129cc0156343af71f4fca8830d9de5 | [
"Apache-2.0"
] | 1 | 2021-05-09T06:14:16.000Z | 2021-05-09T06:14:18.000Z | os_ken/lib/sockopt.py | anlaneg/os-ken | 379a7694c3129cc0156343af71f4fca8830d9de5 | [
"Apache-2.0"
] | 5 | 2019-04-24T04:01:01.000Z | 2020-06-20T14:38:04.000Z | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
import socket
import struct
from os_ken.lib import sockaddr
TCP_MD5SIG_LINUX = 0x0e
TCP_MD5SIG_BSD = 0x10
def _set_tcp_md5sig_linux(s, addr, key):
# struct tcp_md5sig {
# struct sockaddr_storage addr;
# u16 pad1;
# u16 keylen;
# u32 pad2;
# u8 key[80];
# }
af = s.family
if af == socket.AF_INET:
sa = sockaddr.sa_in4(addr)
elif af == socket.AF_INET6:
sa = sockaddr.sa_in6(addr)
else:
raise ValueError("unsupported af %s" % (af,))
ss = sockaddr.sa_to_ss(sa)
tcp_md5sig = ss + struct.pack("2xH4x80s", len(key), key)
s.setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG_LINUX, tcp_md5sig)
def _set_tcp_md5sig_bsd(s, _addr, _key):
# NOTE: On this platform, address and key need to be set using setkey(8).
tcp_md5sig = struct.pack("I", 1)
s.setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG_BSD, tcp_md5sig)
def set_tcp_md5sig(s, addr, key):
"""Enable TCP-MD5 on the given socket.
:param s: Socket
:param addr: Associated address. On some platforms, this has no effect.
:param key: Key. On some platforms, this has no effect.
"""
impls = {
'FreeBSD': _set_tcp_md5sig_bsd,
'Linux': _set_tcp_md5sig_linux,
'NetBSD': _set_tcp_md5sig_bsd,
}
system = platform.system()
try:
impl = impls[system]
except KeyError:
raise NotImplementedError("TCP-MD5 unsupported on this platform")
impl(s, addr, key)
| 30.180556 | 77 | 0.682006 |
import platform
import socket
import struct
from os_ken.lib import sockaddr
TCP_MD5SIG_LINUX = 0x0e
TCP_MD5SIG_BSD = 0x10
def _set_tcp_md5sig_linux(s, addr, key):
af = s.family
if af == socket.AF_INET:
sa = sockaddr.sa_in4(addr)
elif af == socket.AF_INET6:
sa = sockaddr.sa_in6(addr)
else:
raise ValueError("unsupported af %s" % (af,))
ss = sockaddr.sa_to_ss(sa)
tcp_md5sig = ss + struct.pack("2xH4x80s", len(key), key)
s.setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG_LINUX, tcp_md5sig)
def _set_tcp_md5sig_bsd(s, _addr, _key):
tcp_md5sig = struct.pack("I", 1)
s.setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG_BSD, tcp_md5sig)
def set_tcp_md5sig(s, addr, key):
impls = {
'FreeBSD': _set_tcp_md5sig_bsd,
'Linux': _set_tcp_md5sig_linux,
'NetBSD': _set_tcp_md5sig_bsd,
}
system = platform.system()
try:
impl = impls[system]
except KeyError:
raise NotImplementedError("TCP-MD5 unsupported on this platform")
impl(s, addr, key)
| true | true |
f72e9456c271e20b045e78bd9e5b9a814fc335e4 | 854 | py | Python | source/cmdline_test.py | birdwes/Asterisk_Google_Authenticator | 85d1eec3bcf8ca5a3b8df258e00096a4854101dc | [
"MIT"
] | 1 | 2022-02-13T19:03:17.000Z | 2022-02-13T19:03:17.000Z | source/cmdline_test.py | birdwes/Asterisk_Google_Authenticator | 85d1eec3bcf8ca5a3b8df258e00096a4854101dc | [
"MIT"
] | null | null | null | source/cmdline_test.py | birdwes/Asterisk_Google_Authenticator | 85d1eec3bcf8ca5a3b8df258e00096a4854101dc | [
"MIT"
] | 2 | 2022-02-14T09:02:48.000Z | 2022-02-16T06:50:40.000Z | #!/usr/bin/env python3
import sys
import hashlib
import hmac
import base64
import secrets
import re
import datetime
from datetime import timezone
import math
from GoogleOTP import GoogleOTP
# Implementation of Google Authenticator verification
# To generate secrets:
# secret = generateSecret()
# print( secret )
# Based on https://github.com/enquirer/enquirer?ref=hackernoon.com
# https://hackernoon.com/how-to-implement-google-authenticator-two-factor-auth-in-javascript-091wy3vh3
#
# License is MIT
class GoogleOTPfileLookup( GoogleOTP ):
def lookupUserSecret( self, userID ):
return 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
otp = GoogleOTPfileLookup()
#secret = otp.lookupUserSecret( '0001' )
pin = input("Enter 6 digit OTP: ")
bResult = otp.verify( "0001", pin )
#bResult = otp.verifyTOTP(pin, secret, window = 1)
print(bResult)
| 21.897436 | 102 | 0.758782 |
import sys
import hashlib
import hmac
import base64
import secrets
import re
import datetime
from datetime import timezone
import math
from GoogleOTP import GoogleOTP
class GoogleOTPfileLookup( GoogleOTP ):
def lookupUserSecret( self, userID ):
return 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
otp = GoogleOTPfileLookup()
pin = input("Enter 6 digit OTP: ")
bResult = otp.verify( "0001", pin )
print(bResult)
| true | true |
f72e9481f5e2c06c3f9ab22b5c21260254d1a13a | 22,798 | py | Python | django_sites/pegasus/venv/lib/python3.6/site-packages/postman/models.py | omaralaniz/pegasus | 410d0f0bcaace704a81e62eba68f76bf13a79399 | [
"MIT"
] | null | null | null | django_sites/pegasus/venv/lib/python3.6/site-packages/postman/models.py | omaralaniz/pegasus | 410d0f0bcaace704a81e62eba68f76bf13a79399 | [
"MIT"
] | 1 | 2020-07-20T01:03:42.000Z | 2020-07-20T01:03:42.000Z | django_sites/pegasus/venv/lib/python3.6/site-packages/postman/models.py | aguanlao/csc648-sp19-team104 | 5ebde40a2b93a9c0925ea422e9e173423f0fc940 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import hashlib
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module # Django 1.6 / py2.6
from django import VERSION
from django.conf import settings
from django.core.exceptions import ValidationError
if VERSION < (1, 10):
from django.core.urlresolvers import reverse
else:
from django.urls import reverse
from django.db import models
from django.db.models import IntegerField, Value
from django.db.models.expressions import RawSQL
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.text import Truncator
from django.utils.timezone import now
from django.utils.translation import ugettext, ugettext_lazy as _
if getattr(settings, 'POSTMAN_I18N_URLS', False):
from django.utils.translation import pgettext_lazy
else:
def pgettext_lazy(c, m): return m
from django.views.decorators.debug import sensitive_variables
from .query import PostmanQuery
from .utils import email_visitor, notify_user
# options
# Translators: keep consistency with the <option> parameter in url translations ; 'm' stands for 'messages'
OPTION_MESSAGES = pgettext_lazy('postman_url', 'm')
# moderation constants
STATUS_PENDING = 'p'
STATUS_ACCEPTED = 'a'
STATUS_REJECTED = 'r'
STATUS_CHOICES = (
(STATUS_PENDING, _('Pending')),
(STATUS_ACCEPTED, _('Accepted')),
(STATUS_REJECTED, _('Rejected')),
)
# ordering constants
ORDER_BY_KEY = 'o' # as 'order'
ORDER_BY_FIELDS = {} # setting is deferred in setup()
ORDER_BY_MAPPER = {'sender': 'f', 'recipient': 't', 'subject': 's', 'date': 'd'} # for templatetags usage
def setup():
"""
Deferred actions, that can not be done at import time since Django 1.7.
Normally called in AppConfig.ready().
For backwards compatibility, also called on first need.
"""
from django.contrib.auth import get_user_model
name_user_as = getattr(settings, 'POSTMAN_NAME_USER_AS', get_user_model().USERNAME_FIELD)
ORDER_BY_FIELDS.update({
'f': 'sender__' + name_user_as, # as 'from'
't': 'recipient__' + name_user_as, # as 'to'
's': 'subject', # as 'subject'
'd': 'sent_at', # as 'date'
})
def get_order_by(query_dict):
"""
Return a field name, optionally prefixed for descending order, or None if not found.
Argument:
``query_dict``: a dictionary to look for a key dedicated to ordering purpose
"""
if ORDER_BY_KEY in query_dict:
code = query_dict[ORDER_BY_KEY] # code may be uppercase or lowercase
if not ORDER_BY_FIELDS: # backwards compatibility, before Django 1.7
setup()
order_by_field = ORDER_BY_FIELDS.get(code.lower())
if order_by_field:
if code.isupper():
order_by_field = '-' + order_by_field
return order_by_field
def get_user_representation(user):
"""
Return a User representation for display, configurable through an optional setting.
"""
show_user_as = getattr(settings, 'POSTMAN_SHOW_USER_AS', None)
if isinstance(show_user_as, six.string_types):
if '.' in show_user_as:
mod_path, _, attr_name = show_user_as.rpartition('.')
try:
return force_text(getattr(import_module(mod_path), attr_name)(user))
except: # ImportError, AttributeError, TypeError (not callable)
pass
else:
attr = getattr(user, show_user_as, None)
if callable(attr):
attr = attr()
if attr:
return force_text(attr)
elif callable(show_user_as):
try:
return force_text(show_user_as(user))
except:
pass
return force_text(user) # default value, or in case of empty attribute or exception
def get_user_name(user):
"""
Return the identifying name for a User.
"""
name_user_as = getattr(settings, 'POSTMAN_NAME_USER_AS', None)
if name_user_as:
return force_text(getattr(user, name_user_as))
return user.get_username() # default
class MessageManager(models.Manager):
"""The manager for Message."""
def _folder(self, related, filters, option=None, order_by=None):
"""Base code, in common to the folders."""
qs = self.all() if option == OPTION_MESSAGES else QuerySet(self.model, PostmanQuery(self.model), using=self._db)
if related:
qs = qs.select_related(*related)
if order_by:
qs = qs.order_by(order_by)
if isinstance(filters, (list, tuple)):
lookups = models.Q()
for filter in filters:
lookups |= models.Q(**filter)
else:
lookups = models.Q(**filters)
if option == OPTION_MESSAGES:
return qs.filter(lookups)
# Adding a 'count' attribute, to be similar to the by-conversation case,
# should not be necessary. Otherwise add:
# .extra(select={'count': 'SELECT 1'})
else:
qs = qs.annotate(count=RawSQL('{0}.count'.format(qs.query.pm_alias_prefix), ()))
qs.query.pm_set_extra(table=(
self.filter(lookups, thread_id__isnull=True).annotate(count=Value(0, IntegerField()))\
.values_list('id', 'count').order_by(),
# use separate annotate() to keep control of the necessary order
self.filter(lookups, thread_id__isnull=False).values('thread').annotate(id=models.Max('pk')).annotate(count=models.Count('pk'))\
.values_list('id', 'count').order_by(),
))
return qs
def inbox(self, user, related=True, **kwargs):
"""
Return accepted messages received by a user but not marked as archived or deleted.
"""
related = ('sender',) if related else None
filters = {
'recipient': user,
'recipient_archived': False,
'recipient_deleted_at__isnull': True,
'moderation_status': STATUS_ACCEPTED,
}
return self._folder(related, filters, **kwargs)
def inbox_unread_count(self, user):
"""
Return the number of unread messages for a user.
Designed for context_processors.py and templatetags/postman_tags.py.
"""
return self.inbox(user, related=False, option=OPTION_MESSAGES).filter(read_at__isnull=True).count()
def sent(self, user, **kwargs):
"""
Return all messages sent by a user but not marked as archived or deleted.
"""
related = ('recipient',)
filters = {
'sender': user,
'sender_archived': False,
'sender_deleted_at__isnull': True,
# allow to see pending and rejected messages as well
}
return self._folder(related, filters, **kwargs)
def archives(self, user, **kwargs):
"""
Return messages belonging to a user and marked as archived.
"""
related = ('sender', 'recipient')
filters = ({
'recipient': user,
'recipient_archived': True,
'recipient_deleted_at__isnull': True,
'moderation_status': STATUS_ACCEPTED,
}, {
'sender': user,
'sender_archived': True,
'sender_deleted_at__isnull': True,
})
return self._folder(related, filters, **kwargs)
def trash(self, user, **kwargs):
"""
Return messages belonging to a user and marked as deleted.
"""
related = ('sender', 'recipient')
filters = ({
'recipient': user,
'recipient_deleted_at__isnull': False,
'moderation_status': STATUS_ACCEPTED,
}, {
'sender': user,
'sender_deleted_at__isnull': False,
})
return self._folder(related, filters, **kwargs)
def thread(self, user, filter):
"""
Return message/conversation for display.
"""
return self.select_related('sender', 'recipient').filter(
filter,
(models.Q(recipient=user) & models.Q(moderation_status=STATUS_ACCEPTED)) | models.Q(sender=user),
).order_by('sent_at')
def as_recipient(self, user, filter):
"""
Return messages matching a filter AND being visible to a user as the recipient.
"""
return self.filter(filter, recipient=user, moderation_status=STATUS_ACCEPTED)
def as_sender(self, user, filter):
"""
Return messages matching a filter AND being visible to a user as the sender.
"""
return self.filter(filter, sender=user) # any status is fine
def perms(self, user):
"""
Return a field-lookups filter as a permission controller for a reply request.
The user must be the recipient of the accepted, non-deleted, message.
"""
return models.Q(recipient=user) & models.Q(moderation_status=STATUS_ACCEPTED) & models.Q(recipient_deleted_at__isnull=True)
def set_read(self, user, filter):
"""
Set messages as read.
"""
return self.filter(
filter,
recipient=user,
moderation_status=STATUS_ACCEPTED,
read_at__isnull=True,
).update(read_at=now())
@python_2_unicode_compatible
class Message(models.Model):
"""
A message between a User and another User or an AnonymousUser.
"""
SUBJECT_MAX_LENGTH = 120
subject = models.CharField(_("subject"), max_length=SUBJECT_MAX_LENGTH)
body = models.TextField(_("body"), blank=True)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='sent_messages',
null=True, blank=True, verbose_name=_("sender"))
recipient = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='received_messages',
null=True, blank=True, verbose_name=_("recipient"))
email = models.EmailField(_("visitor"), blank=True) # instead of either sender or recipient, for an AnonymousUser
parent = models.ForeignKey('self', on_delete=models.CASCADE, related_name='next_messages',
null=True, blank=True, verbose_name=_("parent message"))
thread = models.ForeignKey('self', on_delete=models.CASCADE, related_name='child_messages',
null=True, blank=True, verbose_name=_("root message"))
sent_at = models.DateTimeField(_("sent at"), default=now)
read_at = models.DateTimeField(_("read at"), null=True, blank=True)
replied_at = models.DateTimeField(_("replied at"), null=True, blank=True)
sender_archived = models.BooleanField(_("archived by sender"), default=False)
recipient_archived = models.BooleanField(_("archived by recipient"), default=False)
sender_deleted_at = models.DateTimeField(_("deleted by sender at"), null=True, blank=True)
recipient_deleted_at = models.DateTimeField(_("deleted by recipient at"), null=True, blank=True)
# moderation fields
moderation_status = models.CharField(_("status"), max_length=1, choices=STATUS_CHOICES, default=STATUS_PENDING)
moderation_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='moderated_messages',
null=True, blank=True, verbose_name=_("moderator"))
moderation_date = models.DateTimeField(_("moderated at"), null=True, blank=True)
moderation_reason = models.CharField(_("rejection reason"), max_length=120, blank=True)
objects = MessageManager()
class Meta:
verbose_name = _("message")
verbose_name_plural = _("messages")
ordering = ['-sent_at', '-id']
def __str__(self):
return "{0}>{1}:{2}".format(self.obfuscated_sender, self.obfuscated_recipient, Truncator(self.subject).words(5))
def get_absolute_url(self):
"Usage is deprecated since v3.3.0, because it doesn't integrate well with the addition of namespaces."
return reverse('postman:view', args=[self.pk])
def is_pending(self):
"""Tell if the message is in the pending state."""
return self.moderation_status == STATUS_PENDING
def is_rejected(self):
"""Tell if the message is in the rejected state."""
return self.moderation_status == STATUS_REJECTED
def is_accepted(self):
"""Tell if the message is in the accepted state."""
return self.moderation_status == STATUS_ACCEPTED
@property
def is_new(self):
"""Tell if the recipient has not yet read the message."""
return self.read_at is None
@property
def is_replied(self):
"""Tell if the recipient has written a reply to the message."""
return self.replied_at is not None
def _obfuscated_email(self):
"""
Return the email field as obfuscated, to keep it undisclosed.
Format is:
first 4 characters of the hash email + '..' + last 4 characters of the hash email + '@' + domain without TLD
Example:
foo@domain.com -> 1a2b..e8f9@domain
"""
email = self.email
data = email + settings.SECRET_KEY
digest = hashlib.md5(data.encode()).hexdigest() # encode(): py3 needs a buffer of bytes
shrunken_digest = '..'.join((digest[:4], digest[-4:])) # 32 characters is too long and is useless
bits = email.split('@')
if len(bits) != 2:
return ''
domain = bits[1]
return '@'.join((shrunken_digest, domain.rsplit('.', 1)[0])) # leave off the TLD to gain some space
def admin_sender(self):
"""
Return the sender either as a username or as a plain email.
Designed for the Admin site.
"""
if self.sender:
return str(self.sender)
else:
return '<{0}>'.format(self.email)
admin_sender.short_description = _("sender")
admin_sender.admin_order_field = 'sender'
# Give the sender either as a username or as a plain email.
clear_sender = property(admin_sender)
@property
def obfuscated_sender(self):
"""Return the sender either as a username or as an undisclosed email."""
if self.sender:
return get_user_representation(self.sender)
else:
return self._obfuscated_email()
def admin_recipient(self):
"""
Return the recipient either as a username or as a plain email.
Designed for the Admin site.
"""
if self.recipient:
return str(self.recipient)
else:
return '<{0}>'.format(self.email)
admin_recipient.short_description = _("recipient")
admin_recipient.admin_order_field = 'recipient'
# Give the recipient either as a username or as a plain email.
clear_recipient = property(admin_recipient)
@property
def obfuscated_recipient(self):
"""Return the recipient either as a username or as an undisclosed email."""
if self.recipient:
return get_user_representation(self.recipient)
else:
return self._obfuscated_email()
def get_replies_count(self):
"""Return the number of accepted responses."""
return self.next_messages.filter(moderation_status=STATUS_ACCEPTED).count()
@sensitive_variables('values')
def quote(self, format_subject, format_body=None):
"""Return a dictionary of quote values to initiate a reply."""
values = {'subject': format_subject(self.subject)[:self.SUBJECT_MAX_LENGTH]}
if format_body:
values['body'] = format_body(self.obfuscated_sender, self.body)
return values
def clean(self):
"""Check some validity constraints."""
if not (self.sender_id is not None or self.email):
raise ValidationError(ugettext("Undefined sender."))
def clean_moderation(self, initial_status, user=None):
"""Adjust automatically some fields, according to status workflow."""
if self.moderation_status != initial_status:
self.moderation_date = now()
self.moderation_by = user
if self.is_rejected():
# even if maybe previously deleted during a temporary 'accepted' stay
self.recipient_deleted_at = now()
elif initial_status == STATUS_REJECTED:
# rollback
self.recipient_deleted_at = None
def clean_for_visitor(self):
"""Do some auto-read and auto-delete, because there is no one to do it (no account)."""
if self.sender_id is None:
# no need to wait for a final moderation status to mark as deleted
if not self.sender_deleted_at:
self.sender_deleted_at = now()
elif self.recipient_id is None:
if self.is_accepted():
if not self.read_at:
self.read_at = now()
if not self.recipient_deleted_at:
self.recipient_deleted_at = now()
else:
# rollbacks
if self.read_at:
self.read_at = None
# but stay deleted if rejected
if self.is_pending() and self.recipient_deleted_at:
self.recipient_deleted_at = None
def update_parent(self, initial_status):
"""Update the parent to actualize its response state."""
if self.moderation_status != initial_status:
parent = self.parent
if self.is_accepted():
# keep the very first date; no need to do differently
if parent and (not parent.replied_at or self.sent_at < parent.replied_at):
parent.replied_at = self.sent_at
parent.save()
elif initial_status == STATUS_ACCEPTED:
if parent and parent.replied_at == self.sent_at:
# rollback, but there may be some other valid replies
try:
other_date = parent.next_messages\
.exclude(pk=self.pk).filter(moderation_status=STATUS_ACCEPTED)\
.values_list('sent_at', flat=True)\
.order_by('sent_at')[:1].get()
parent.replied_at = other_date
except Message.DoesNotExist:
parent.replied_at = None
parent.save()
def notify_users(self, initial_status, site, is_auto_moderated=True):
"""Notify the rejection (to sender) or the acceptance (to recipient) of the message."""
if initial_status == STATUS_PENDING:
if self.is_rejected():
# Bypass: for an online user, no need to notify when rejection is immediate.
# Only useful for a visitor as an archive copy of the message, otherwise lost.
if not (self.sender_id is not None and is_auto_moderated):
(notify_user if self.sender_id is not None else email_visitor)(self, 'rejection', site)
elif self.is_accepted():
(notify_user if self.recipient_id is not None else email_visitor)(self, 'acceptance', site)
def get_dates(self):
"""Get some dates to restore later."""
return (self.sender_deleted_at, self.recipient_deleted_at, self.read_at)
def set_dates(self, sender_deleted_at, recipient_deleted_at, read_at):
"""Restore some dates."""
self.sender_deleted_at = sender_deleted_at
self.recipient_deleted_at = recipient_deleted_at
self.read_at = read_at
def get_moderation(self):
"""Get moderation information to restore later."""
return (self.moderation_status, self.moderation_by_id, self.moderation_date, self.moderation_reason)
def set_moderation(self, status, by_id, date, reason):
"""Restore moderation information."""
self.moderation_status = status
self.moderation_by_id = by_id
self.moderation_date = date
self.moderation_reason = reason
def auto_moderate(self, moderators):
"""Run a chain of auto-moderators."""
auto = None
final_reason = ''
percents = []
reasons = []
if not isinstance(moderators, (list, tuple)):
moderators = (moderators,)
for moderator in moderators:
rating = moderator(self)
if rating is None: continue
if isinstance(rating, tuple):
percent, reason = rating
else:
percent = rating
reason = getattr(moderator, 'default_reason', '')
if percent is False: percent = 0
if percent is True: percent = 100
if not 0 <= percent <= 100: continue
if percent == 0:
auto = False
final_reason = reason
break
elif percent == 100:
auto = True
break
percents.append(percent)
reasons.append(reason)
if auto is None and percents:
average = float(sum(percents)) / len(percents)
final_reason = ', '.join([r for i, r in enumerate(reasons) if r and not r.isspace() and percents[i] < 50])
auto = average >= 50
if auto is None:
auto = getattr(settings, 'POSTMAN_AUTO_MODERATE_AS', None)
if auto is True:
self.moderation_status = STATUS_ACCEPTED
elif auto is False:
self.moderation_status = STATUS_REJECTED
self.moderation_reason = final_reason
class PendingMessageManager(models.Manager):
"""The manager for PendingMessage."""
def get_query_set(self): # for Django <= 1.5
return super(PendingMessageManager, self).get_query_set().filter(moderation_status=STATUS_PENDING)
def get_queryset(self): # changed in Django 1.6: "The get_queryset method was previously named get_query_set."
"""Filter to get only pending objects."""
return super(PendingMessageManager, self).get_queryset().filter(moderation_status=STATUS_PENDING)
class PendingMessage(Message):
"""
A proxy to Message, focused on pending objects to accept or reject.
"""
objects = PendingMessageManager()
class Meta:
verbose_name = _("pending message")
verbose_name_plural = _("pending messages")
proxy = True
def set_accepted(self):
"""Set the message as accepted."""
self.moderation_status = STATUS_ACCEPTED
def set_rejected(self):
"""Set the message as rejected."""
self.moderation_status = STATUS_REJECTED
| 39.71777 | 144 | 0.630055 | from __future__ import unicode_literals
import hashlib
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
from django import VERSION
from django.conf import settings
from django.core.exceptions import ValidationError
if VERSION < (1, 10):
from django.core.urlresolvers import reverse
else:
from django.urls import reverse
from django.db import models
from django.db.models import IntegerField, Value
from django.db.models.expressions import RawSQL
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.text import Truncator
from django.utils.timezone import now
from django.utils.translation import ugettext, ugettext_lazy as _
if getattr(settings, 'POSTMAN_I18N_URLS', False):
from django.utils.translation import pgettext_lazy
else:
def pgettext_lazy(c, m): return m
from django.views.decorators.debug import sensitive_variables
from .query import PostmanQuery
from .utils import email_visitor, notify_user
OPTION_MESSAGES = pgettext_lazy('postman_url', 'm')
STATUS_PENDING = 'p'
STATUS_ACCEPTED = 'a'
STATUS_REJECTED = 'r'
STATUS_CHOICES = (
(STATUS_PENDING, _('Pending')),
(STATUS_ACCEPTED, _('Accepted')),
(STATUS_REJECTED, _('Rejected')),
)
ORDER_BY_KEY = 'o'
ORDER_BY_FIELDS = {}
ORDER_BY_MAPPER = {'sender': 'f', 'recipient': 't', 'subject': 's', 'date': 'd'}
def setup():
from django.contrib.auth import get_user_model
name_user_as = getattr(settings, 'POSTMAN_NAME_USER_AS', get_user_model().USERNAME_FIELD)
ORDER_BY_FIELDS.update({
'f': 'sender__' + name_user_as,
't': 'recipient__' + name_user_as,
's': 'subject',
'd': 'sent_at',
})
def get_order_by(query_dict):
if ORDER_BY_KEY in query_dict:
code = query_dict[ORDER_BY_KEY]
if not ORDER_BY_FIELDS:
setup()
order_by_field = ORDER_BY_FIELDS.get(code.lower())
if order_by_field:
if code.isupper():
order_by_field = '-' + order_by_field
return order_by_field
def get_user_representation(user):
show_user_as = getattr(settings, 'POSTMAN_SHOW_USER_AS', None)
if isinstance(show_user_as, six.string_types):
if '.' in show_user_as:
mod_path, _, attr_name = show_user_as.rpartition('.')
try:
return force_text(getattr(import_module(mod_path), attr_name)(user))
except:
pass
else:
attr = getattr(user, show_user_as, None)
if callable(attr):
attr = attr()
if attr:
return force_text(attr)
elif callable(show_user_as):
try:
return force_text(show_user_as(user))
except:
pass
return force_text(user)
def get_user_name(user):
name_user_as = getattr(settings, 'POSTMAN_NAME_USER_AS', None)
if name_user_as:
return force_text(getattr(user, name_user_as))
return user.get_username()
class MessageManager(models.Manager):
def _folder(self, related, filters, option=None, order_by=None):
qs = self.all() if option == OPTION_MESSAGES else QuerySet(self.model, PostmanQuery(self.model), using=self._db)
if related:
qs = qs.select_related(*related)
if order_by:
qs = qs.order_by(order_by)
if isinstance(filters, (list, tuple)):
lookups = models.Q()
for filter in filters:
lookups |= models.Q(**filter)
else:
lookups = models.Q(**filters)
if option == OPTION_MESSAGES:
return qs.filter(lookups)
else:
qs = qs.annotate(count=RawSQL('{0}.count'.format(qs.query.pm_alias_prefix), ()))
qs.query.pm_set_extra(table=(
self.filter(lookups, thread_id__isnull=True).annotate(count=Value(0, IntegerField()))\
.values_list('id', 'count').order_by(),
self.filter(lookups, thread_id__isnull=False).values('thread').annotate(id=models.Max('pk')).annotate(count=models.Count('pk'))\
.values_list('id', 'count').order_by(),
))
return qs
def inbox(self, user, related=True, **kwargs):
related = ('sender',) if related else None
filters = {
'recipient': user,
'recipient_archived': False,
'recipient_deleted_at__isnull': True,
'moderation_status': STATUS_ACCEPTED,
}
return self._folder(related, filters, **kwargs)
def inbox_unread_count(self, user):
return self.inbox(user, related=False, option=OPTION_MESSAGES).filter(read_at__isnull=True).count()
def sent(self, user, **kwargs):
related = ('recipient',)
filters = {
'sender': user,
'sender_archived': False,
'sender_deleted_at__isnull': True,
}
return self._folder(related, filters, **kwargs)
def archives(self, user, **kwargs):
related = ('sender', 'recipient')
filters = ({
'recipient': user,
'recipient_archived': True,
'recipient_deleted_at__isnull': True,
'moderation_status': STATUS_ACCEPTED,
}, {
'sender': user,
'sender_archived': True,
'sender_deleted_at__isnull': True,
})
return self._folder(related, filters, **kwargs)
def trash(self, user, **kwargs):
related = ('sender', 'recipient')
filters = ({
'recipient': user,
'recipient_deleted_at__isnull': False,
'moderation_status': STATUS_ACCEPTED,
}, {
'sender': user,
'sender_deleted_at__isnull': False,
})
return self._folder(related, filters, **kwargs)
def thread(self, user, filter):
return self.select_related('sender', 'recipient').filter(
filter,
(models.Q(recipient=user) & models.Q(moderation_status=STATUS_ACCEPTED)) | models.Q(sender=user),
).order_by('sent_at')
def as_recipient(self, user, filter):
return self.filter(filter, recipient=user, moderation_status=STATUS_ACCEPTED)
def as_sender(self, user, filter):
return self.filter(filter, sender=user)
def perms(self, user):
return models.Q(recipient=user) & models.Q(moderation_status=STATUS_ACCEPTED) & models.Q(recipient_deleted_at__isnull=True)
def set_read(self, user, filter):
return self.filter(
filter,
recipient=user,
moderation_status=STATUS_ACCEPTED,
read_at__isnull=True,
).update(read_at=now())
@python_2_unicode_compatible
class Message(models.Model):
SUBJECT_MAX_LENGTH = 120
subject = models.CharField(_("subject"), max_length=SUBJECT_MAX_LENGTH)
body = models.TextField(_("body"), blank=True)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='sent_messages',
null=True, blank=True, verbose_name=_("sender"))
recipient = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='received_messages',
null=True, blank=True, verbose_name=_("recipient"))
email = models.EmailField(_("visitor"), blank=True)
parent = models.ForeignKey('self', on_delete=models.CASCADE, related_name='next_messages',
null=True, blank=True, verbose_name=_("parent message"))
thread = models.ForeignKey('self', on_delete=models.CASCADE, related_name='child_messages',
null=True, blank=True, verbose_name=_("root message"))
sent_at = models.DateTimeField(_("sent at"), default=now)
read_at = models.DateTimeField(_("read at"), null=True, blank=True)
replied_at = models.DateTimeField(_("replied at"), null=True, blank=True)
sender_archived = models.BooleanField(_("archived by sender"), default=False)
recipient_archived = models.BooleanField(_("archived by recipient"), default=False)
sender_deleted_at = models.DateTimeField(_("deleted by sender at"), null=True, blank=True)
recipient_deleted_at = models.DateTimeField(_("deleted by recipient at"), null=True, blank=True)
moderation_status = models.CharField(_("status"), max_length=1, choices=STATUS_CHOICES, default=STATUS_PENDING)
moderation_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='moderated_messages',
null=True, blank=True, verbose_name=_("moderator"))
moderation_date = models.DateTimeField(_("moderated at"), null=True, blank=True)
moderation_reason = models.CharField(_("rejection reason"), max_length=120, blank=True)
objects = MessageManager()
class Meta:
verbose_name = _("message")
verbose_name_plural = _("messages")
ordering = ['-sent_at', '-id']
def __str__(self):
return "{0}>{1}:{2}".format(self.obfuscated_sender, self.obfuscated_recipient, Truncator(self.subject).words(5))
def get_absolute_url(self):
return reverse('postman:view', args=[self.pk])
def is_pending(self):
return self.moderation_status == STATUS_PENDING
def is_rejected(self):
return self.moderation_status == STATUS_REJECTED
def is_accepted(self):
return self.moderation_status == STATUS_ACCEPTED
@property
def is_new(self):
return self.read_at is None
@property
def is_replied(self):
return self.replied_at is not None
def _obfuscated_email(self):
email = self.email
data = email + settings.SECRET_KEY
digest = hashlib.md5(data.encode()).hexdigest()
shrunken_digest = '..'.join((digest[:4], digest[-4:]))
bits = email.split('@')
if len(bits) != 2:
return ''
domain = bits[1]
return '@'.join((shrunken_digest, domain.rsplit('.', 1)[0]))
def admin_sender(self):
if self.sender:
return str(self.sender)
else:
return '<{0}>'.format(self.email)
admin_sender.short_description = _("sender")
admin_sender.admin_order_field = 'sender'
clear_sender = property(admin_sender)
@property
def obfuscated_sender(self):
if self.sender:
return get_user_representation(self.sender)
else:
return self._obfuscated_email()
def admin_recipient(self):
if self.recipient:
return str(self.recipient)
else:
return '<{0}>'.format(self.email)
admin_recipient.short_description = _("recipient")
admin_recipient.admin_order_field = 'recipient'
clear_recipient = property(admin_recipient)
@property
def obfuscated_recipient(self):
if self.recipient:
return get_user_representation(self.recipient)
else:
return self._obfuscated_email()
def get_replies_count(self):
return self.next_messages.filter(moderation_status=STATUS_ACCEPTED).count()
@sensitive_variables('values')
def quote(self, format_subject, format_body=None):
values = {'subject': format_subject(self.subject)[:self.SUBJECT_MAX_LENGTH]}
if format_body:
values['body'] = format_body(self.obfuscated_sender, self.body)
return values
def clean(self):
if not (self.sender_id is not None or self.email):
raise ValidationError(ugettext("Undefined sender."))
def clean_moderation(self, initial_status, user=None):
if self.moderation_status != initial_status:
self.moderation_date = now()
self.moderation_by = user
if self.is_rejected():
self.recipient_deleted_at = now()
elif initial_status == STATUS_REJECTED:
self.recipient_deleted_at = None
def clean_for_visitor(self):
if self.sender_id is None:
if not self.sender_deleted_at:
self.sender_deleted_at = now()
elif self.recipient_id is None:
if self.is_accepted():
if not self.read_at:
self.read_at = now()
if not self.recipient_deleted_at:
self.recipient_deleted_at = now()
else:
if self.read_at:
self.read_at = None
if self.is_pending() and self.recipient_deleted_at:
self.recipient_deleted_at = None
def update_parent(self, initial_status):
if self.moderation_status != initial_status:
parent = self.parent
if self.is_accepted():
if parent and (not parent.replied_at or self.sent_at < parent.replied_at):
parent.replied_at = self.sent_at
parent.save()
elif initial_status == STATUS_ACCEPTED:
if parent and parent.replied_at == self.sent_at:
try:
other_date = parent.next_messages\
.exclude(pk=self.pk).filter(moderation_status=STATUS_ACCEPTED)\
.values_list('sent_at', flat=True)\
.order_by('sent_at')[:1].get()
parent.replied_at = other_date
except Message.DoesNotExist:
parent.replied_at = None
parent.save()
def notify_users(self, initial_status, site, is_auto_moderated=True):
if initial_status == STATUS_PENDING:
if self.is_rejected():
if not (self.sender_id is not None and is_auto_moderated):
(notify_user if self.sender_id is not None else email_visitor)(self, 'rejection', site)
elif self.is_accepted():
(notify_user if self.recipient_id is not None else email_visitor)(self, 'acceptance', site)
def get_dates(self):
return (self.sender_deleted_at, self.recipient_deleted_at, self.read_at)
def set_dates(self, sender_deleted_at, recipient_deleted_at, read_at):
self.sender_deleted_at = sender_deleted_at
self.recipient_deleted_at = recipient_deleted_at
self.read_at = read_at
def get_moderation(self):
return (self.moderation_status, self.moderation_by_id, self.moderation_date, self.moderation_reason)
def set_moderation(self, status, by_id, date, reason):
self.moderation_status = status
self.moderation_by_id = by_id
self.moderation_date = date
self.moderation_reason = reason
def auto_moderate(self, moderators):
auto = None
final_reason = ''
percents = []
reasons = []
if not isinstance(moderators, (list, tuple)):
moderators = (moderators,)
for moderator in moderators:
rating = moderator(self)
if rating is None: continue
if isinstance(rating, tuple):
percent, reason = rating
else:
percent = rating
reason = getattr(moderator, 'default_reason', '')
if percent is False: percent = 0
if percent is True: percent = 100
if not 0 <= percent <= 100: continue
if percent == 0:
auto = False
final_reason = reason
break
elif percent == 100:
auto = True
break
percents.append(percent)
reasons.append(reason)
if auto is None and percents:
average = float(sum(percents)) / len(percents)
final_reason = ', '.join([r for i, r in enumerate(reasons) if r and not r.isspace() and percents[i] < 50])
auto = average >= 50
if auto is None:
auto = getattr(settings, 'POSTMAN_AUTO_MODERATE_AS', None)
if auto is True:
self.moderation_status = STATUS_ACCEPTED
elif auto is False:
self.moderation_status = STATUS_REJECTED
self.moderation_reason = final_reason
class PendingMessageManager(models.Manager):
def get_query_set(self):
return super(PendingMessageManager, self).get_query_set().filter(moderation_status=STATUS_PENDING)
def get_queryset(self):
return super(PendingMessageManager, self).get_queryset().filter(moderation_status=STATUS_PENDING)
class PendingMessage(Message):
objects = PendingMessageManager()
class Meta:
verbose_name = _("pending message")
verbose_name_plural = _("pending messages")
proxy = True
def set_accepted(self):
self.moderation_status = STATUS_ACCEPTED
def set_rejected(self):
self.moderation_status = STATUS_REJECTED
| true | true |
f72e9493dc3832bb184f22f70089b28ea0887e1a | 10,895 | py | Python | abraia/hsi.py | abraia/abraia-python | e49e3869b2ee7e6b1bcb41e0cc1ae126ac39e202 | [
"MIT"
] | 4 | 2018-03-23T22:32:53.000Z | 2020-08-25T12:42:00.000Z | abraia/hsi.py | abraia/abraia-multiple | e49e3869b2ee7e6b1bcb41e0cc1ae126ac39e202 | [
"MIT"
] | 5 | 2021-02-18T20:29:09.000Z | 2022-03-29T09:28:57.000Z | abraia/hsi.py | abraia/abraia-python | e49e3869b2ee7e6b1bcb41e0cc1ae126ac39e202 | [
"MIT"
] | 1 | 2021-01-22T23:51:14.000Z | 2021-01-22T23:51:14.000Z | import os
import wget
import tempfile
import numpy as np
import scipy.io as sio
import scipy.ndimage as nd
from PIL import Image
from sklearn.svm import SVC
from sklearn.utils import resample
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from tensorflow import keras
from keras.utils import np_utils
from keras.models import Model, load_model
from keras.layers import Input, Conv2D, Conv3D, Flatten, Dense, Reshape, Dropout
from .plot import plot_image, plot_images, plot_train_history
tempdir = tempfile.gettempdir()
def download(url):
basename = os.path.basename(url)
dest = os.path.join(tempdir, basename)
if not os.path.exists(dest):
wget.download(url, dest)
return dest
def load_dataset(dataset):
"""Load one of the available hyperspectral datasets (IP, PU, SA, KSC)."""
if dataset == 'IP':
data_hsi = sio.loadmat(download(
'http://www.ehu.eus/ccwintco/uploads/6/67/Indian_pines_corrected.mat'))['indian_pines_corrected']
gt_hsi = sio.loadmat(download(
'http://www.ehu.eus/ccwintco/uploads/c/c4/Indian_pines_gt.mat'))['indian_pines_gt']
class_names = ['', 'Alfalfa', 'Corn-notill', 'Corn-mintill', 'Corn', 'Grass-pasture',
'Grass-trees', 'Grass-pasture-mowed', 'Hay-windrowed', 'Oats', 'Soybean-notill',
'Soybean-mintill', 'Soybean-clean', 'Wheat', 'Woods', 'Buildings Grass Trees Drives',
'Stone Steel Towers']
return data_hsi, gt_hsi, class_names
if dataset == 'PU':
data_hsi = sio.loadmat(download(
'http://www.ehu.eus/ccwintco/uploads/e/ee/PaviaU.mat'))['paviaU']
gt_hsi = sio.loadmat(download(
'http://www.ehu.eus/ccwintco/uploads/5/50/PaviaU_gt.mat'))['paviaU_gt']
class_names = ['', 'Asphalt', 'Meadows', 'Gravel', 'Trees', 'Painted metal sheets',
'Bare Soil', 'Bitumen', 'Self-Blocking Bricks', 'Shadows']
return data_hsi, gt_hsi, class_names
if dataset == 'SA':
data_hsi = sio.loadmat(download(
'http://www.ehu.eus/ccwintco/uploads/a/a3/Salinas_corrected.mat'))['salinas_corrected']
gt_hsi = sio.loadmat(download(
'http://www.ehu.eus/ccwintco/uploads/f/fa/Salinas_gt.mat'))['salinas_gt']
class_names = ['', 'Brocoli_green_weeds_1', 'Brocoli_green_weeds_2', 'Fallow', 'Fallow_rough_plow',
'Fallow_smooth', 'Stubble', 'Celery', 'Grapes_untrained', 'Soil_vinyard_develop',
'Corn_senesced_green_weeds', 'Lettuce_romaine_4wk', 'Lettuce_romaine_5wk',
'Lettuce_romaine_6wk', 'Lettuce_romaine_7wk', 'Vinyard_untrained', 'Vinyard_vertical_trellis']
return data_hsi, gt_hsi, class_names
if dataset == 'KSC':
data_hsi = sio.loadmat(download(
'http://www.ehu.es/ccwintco/uploads/2/26/KSC.mat'))['KSC']
gt_hsi = sio.loadmat(download(
'http://www.ehu.es/ccwintco/uploads/a/a6/KSC_gt.mat'))['KSC_gt']
return data_hsi, gt_hsi
def random(img, n_bands=6, indexes=False):
"""Returns a list of random bands"""
bands = []
indexes = []
for i in range(n_bands):
q = np.random.randint(img.shape[2])
indexes.append(q)
bands.append(img[:, :, q])
if indexes:
return bands, indexes
return bands
def rgb(img, bands=None):
"""Returns the RGB image from the selected bands (R, G, B)"""
from spectral import get_rgb
return get_rgb(img, bands=bands)
def ndvi(img, red_band, nir_band):
"""Returns the NDVI image from the specified read and nir bands"""
from spectral import ndvi
return ndvi(img, red_band, nir_band)
def resample(img, n_samples=32):
"""Resamples the number of spectral bands (n_samples)"""
h, w, d = img.shape
X = img.reshape((h * w), d)
r = resample(np.transpose(X), n_samples=n_samples)
return np.transpose(r).reshape(h, w, n_samples)
def resize(img, size):
"""Resize the image to the given size (w, h)"""
return np.array(Image.fromarray(img).resize(size, resample=Image.LANCZOS))
def normalize(img):
"""Normalize the image to the range [0, 1]"""
min, max = np.amin(img), np.amax(img)
return (img - min) / (max - min)
def saliency(img):
"""Calculate saliency map of the image"""
smaps = []
for n in range(img.shape[2]):
band = img[:, :, n]
h, w = band.shape
fft = np.fft.fft2(resize(band, (64, 64)))
log_amplitude, phase = np.log(np.absolute(fft)), np.angle(fft)
spectral_residual = log_amplitude - nd.uniform_filter(log_amplitude, size=3, mode='nearest')
smap = np.absolute(np.fft.ifft2(np.exp(spectral_residual + 1.j * phase)))
smap = nd.gaussian_filter(smap, sigma=3)
smaps.append(normalize(resize(smap, (w, h))))
return np.sum(np.dstack(smaps), axis=2)
def spectrum(img, point=None):
"""Get the spectrum at a given point (x, y)
When a point is not specified the spectrum of the most salient point is returned.
"""
if point is None:
sal = saliency(img)
idx = np.unravel_index(np.argmax(sal), sal.shape)
point = (idx[1], idx[0])
return img[point[1], point[0], :]
def split_train_test(X, y, train_ratio=0.7):
"""Split data for training and test"""
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_ratio, stratify=y)
return X_train, X_test, y_train, y_test
def principal_components(img, n_components=3, spectrum=False):
"""Calculate principal components of the image"""
h, w, d = img.shape
X = img.reshape((h * w), d)
pca = PCA(n_components=n_components, whiten=True)
bands = pca.fit_transform(X).reshape(h, w, n_components)
if spectrum:
bands, pca.components_
return bands
def pad_with_zeros(X, margin=2):
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))
newX[margin:X.shape[0] + margin, margin:X.shape[1] + margin, :] = X
return newX
def create_patch(data, height_index, width_index, patch_size):
height_slice = slice(height_index, height_index + patch_size)
width_slice = slice(width_index, width_index + patch_size)
return data[height_slice, width_slice, :]
# TODO: Convert create patches to generator with batch_size parameter
def create_patches(X, patch_size):
patches = []
width, height = X.shape[1], X.shape[0]
X = pad_with_zeros(X, patch_size // 2)
for i in range(height):
for j in range(width):
image_patch = create_patch(X, i, j, patch_size)
patches.append(image_patch.reshape(image_patch.shape + (1,)).astype('float32'))
return np.array(patches)
def create_image_cubes(X, y, patch_size):
width, height = X.shape[1], X.shape[0]
patchesData = create_patches(X, patch_size)
labels = []
for i in range(height):
for j in range(width):
labels.append(y[i, j])
patchesLabels = np.array(labels)
return patchesData, patchesLabels
def generate_training_data(X, y, patch_size, train_ratio=0.7):
X, y = create_image_cubes(X, y, patch_size)
X_train, X_test, y_train, y_test = split_train_test(X, y, train_ratio)
X_train = X_train.reshape(-1, patch_size, patch_size, X.shape[-1], 1)
X_test = X_test.reshape(-1, patch_size, patch_size, X.shape[-1], 1)
return X_train, X_test, y_train, y_test
def create_hsn_model(input_shape, n_classes):
input_layer = Input((*input_shape, 1))
## convolutional layers
conv_layer1 = Conv3D(filters=8, kernel_size=(3, 3, 7), activation='relu')(input_layer)
conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 5), activation='relu')(conv_layer1)
conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation='relu')(conv_layer2)
conv_layer3 = Reshape((conv_layer3.shape[1], conv_layer3.shape[2], conv_layer3.shape[3] * conv_layer3.shape[4]))(conv_layer3)
conv_layer4 = Conv2D(filters=64, kernel_size=(3,3), activation='relu')(conv_layer3)
flatten_layer = Flatten()(conv_layer4)
## fully connected layers
dense_layer1 = Dense(units=256, activation='relu')(flatten_layer)
dense_layer1 = Dropout(0.4)(dense_layer1)
dense_layer2 = Dense(units=128, activation='relu')(dense_layer1)
dense_layer2 = Dropout(0.4)(dense_layer2)
output_layer = Dense(units=n_classes, activation='softmax')(dense_layer2)
# define and compile the model with input layer and output layer
model = Model(inputs=input_layer, outputs=output_layer)
adam = keras.optimizers.Adam(learning_rate=0.001, decay=1e-06)
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def predict_hsn_model(model, X, patch_size):
width, height = X.shape[1], X.shape[0]
X_pred = create_patches(X, patch_size)
y_pred = np.argmax(model.predict(X_pred), axis=1)
return y_pred.reshape(height, width).astype(int)
class HyperspectralModel:
def __init__(self, name, *args):
self.name = name
if self.name == 'svm':
self.model = SVC(C=150, kernel='rbf')
elif self.name == 'hsn':
self.input_shape, self.n_classes = args
self.model = create_hsn_model(self.input_shape, self.n_classes) # Hybrid Spectral Net
def train(self, X, y, train_ratio=0.7, epochs=50):
if self.name == 'svm':
X_train, X_test, y_train, y_test = train_test_split(X.reshape(-1, X.shape[-1]), y, train_size=train_ratio, stratify=y)
self.model.fit(X_train, y_train)
return y_test, self.model.predict(X_test)
elif self.name == 'hsn':
X = principal_components(X, n_components=self.input_shape[2])
X_train, X_test, y_train, y_test = generate_training_data(X, y, self.input_shape[0], train_ratio)
self.history = self.model.fit(x=X_train, y=np_utils.to_categorical(y_train), batch_size=256, epochs=epochs)
return y_test, np.argmax(self.model.predict(X_test), axis=1)
def predict(self, X):
if self.name == 'svm':
return self.model.predict(X.reshape(-1, X.shape[2])).reshape(X.shape[0], X.shape[1])
elif self.name == 'hsn':
X = principal_components(X, n_components=self.input_shape[2])
return predict_hsn_model(self.model, X, self.input_shape[0])
def plot_history():
if self.history:
plot_train_history(self.history)
def save(self, filename='model.h5'):
self.model.save(filename)
def load(self, filename='model.h5'):
self.model = load_model(filename)
def create_model(name, *args):
"""Create a new model: svm or hsn"""
return HyperspectralModel(name, *args)
| 40.055147 | 130 | 0.659752 | import os
import wget
import tempfile
import numpy as np
import scipy.io as sio
import scipy.ndimage as nd
from PIL import Image
from sklearn.svm import SVC
from sklearn.utils import resample
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from tensorflow import keras
from keras.utils import np_utils
from keras.models import Model, load_model
from keras.layers import Input, Conv2D, Conv3D, Flatten, Dense, Reshape, Dropout
from .plot import plot_image, plot_images, plot_train_history
tempdir = tempfile.gettempdir()
def download(url):
basename = os.path.basename(url)
dest = os.path.join(tempdir, basename)
if not os.path.exists(dest):
wget.download(url, dest)
return dest
def load_dataset(dataset):
if dataset == 'IP':
data_hsi = sio.loadmat(download(
'http://www.ehu.eus/ccwintco/uploads/6/67/Indian_pines_corrected.mat'))['indian_pines_corrected']
gt_hsi = sio.loadmat(download(
'http://www.ehu.eus/ccwintco/uploads/c/c4/Indian_pines_gt.mat'))['indian_pines_gt']
class_names = ['', 'Alfalfa', 'Corn-notill', 'Corn-mintill', 'Corn', 'Grass-pasture',
'Grass-trees', 'Grass-pasture-mowed', 'Hay-windrowed', 'Oats', 'Soybean-notill',
'Soybean-mintill', 'Soybean-clean', 'Wheat', 'Woods', 'Buildings Grass Trees Drives',
'Stone Steel Towers']
return data_hsi, gt_hsi, class_names
if dataset == 'PU':
data_hsi = sio.loadmat(download(
'http://www.ehu.eus/ccwintco/uploads/e/ee/PaviaU.mat'))['paviaU']
gt_hsi = sio.loadmat(download(
'http://www.ehu.eus/ccwintco/uploads/5/50/PaviaU_gt.mat'))['paviaU_gt']
class_names = ['', 'Asphalt', 'Meadows', 'Gravel', 'Trees', 'Painted metal sheets',
'Bare Soil', 'Bitumen', 'Self-Blocking Bricks', 'Shadows']
return data_hsi, gt_hsi, class_names
if dataset == 'SA':
data_hsi = sio.loadmat(download(
'http://www.ehu.eus/ccwintco/uploads/a/a3/Salinas_corrected.mat'))['salinas_corrected']
gt_hsi = sio.loadmat(download(
'http://www.ehu.eus/ccwintco/uploads/f/fa/Salinas_gt.mat'))['salinas_gt']
class_names = ['', 'Brocoli_green_weeds_1', 'Brocoli_green_weeds_2', 'Fallow', 'Fallow_rough_plow',
'Fallow_smooth', 'Stubble', 'Celery', 'Grapes_untrained', 'Soil_vinyard_develop',
'Corn_senesced_green_weeds', 'Lettuce_romaine_4wk', 'Lettuce_romaine_5wk',
'Lettuce_romaine_6wk', 'Lettuce_romaine_7wk', 'Vinyard_untrained', 'Vinyard_vertical_trellis']
return data_hsi, gt_hsi, class_names
if dataset == 'KSC':
data_hsi = sio.loadmat(download(
'http://www.ehu.es/ccwintco/uploads/2/26/KSC.mat'))['KSC']
gt_hsi = sio.loadmat(download(
'http://www.ehu.es/ccwintco/uploads/a/a6/KSC_gt.mat'))['KSC_gt']
return data_hsi, gt_hsi
def random(img, n_bands=6, indexes=False):
bands = []
indexes = []
for i in range(n_bands):
q = np.random.randint(img.shape[2])
indexes.append(q)
bands.append(img[:, :, q])
if indexes:
return bands, indexes
return bands
def rgb(img, bands=None):
from spectral import get_rgb
return get_rgb(img, bands=bands)
def ndvi(img, red_band, nir_band):
from spectral import ndvi
return ndvi(img, red_band, nir_band)
def resample(img, n_samples=32):
h, w, d = img.shape
X = img.reshape((h * w), d)
r = resample(np.transpose(X), n_samples=n_samples)
return np.transpose(r).reshape(h, w, n_samples)
def resize(img, size):
return np.array(Image.fromarray(img).resize(size, resample=Image.LANCZOS))
def normalize(img):
min, max = np.amin(img), np.amax(img)
return (img - min) / (max - min)
def saliency(img):
smaps = []
for n in range(img.shape[2]):
band = img[:, :, n]
h, w = band.shape
fft = np.fft.fft2(resize(band, (64, 64)))
log_amplitude, phase = np.log(np.absolute(fft)), np.angle(fft)
spectral_residual = log_amplitude - nd.uniform_filter(log_amplitude, size=3, mode='nearest')
smap = np.absolute(np.fft.ifft2(np.exp(spectral_residual + 1.j * phase)))
smap = nd.gaussian_filter(smap, sigma=3)
smaps.append(normalize(resize(smap, (w, h))))
return np.sum(np.dstack(smaps), axis=2)
def spectrum(img, point=None):
if point is None:
sal = saliency(img)
idx = np.unravel_index(np.argmax(sal), sal.shape)
point = (idx[1], idx[0])
return img[point[1], point[0], :]
def split_train_test(X, y, train_ratio=0.7):
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_ratio, stratify=y)
return X_train, X_test, y_train, y_test
def principal_components(img, n_components=3, spectrum=False):
h, w, d = img.shape
X = img.reshape((h * w), d)
pca = PCA(n_components=n_components, whiten=True)
bands = pca.fit_transform(X).reshape(h, w, n_components)
if spectrum:
bands, pca.components_
return bands
def pad_with_zeros(X, margin=2):
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))
newX[margin:X.shape[0] + margin, margin:X.shape[1] + margin, :] = X
return newX
def create_patch(data, height_index, width_index, patch_size):
height_slice = slice(height_index, height_index + patch_size)
width_slice = slice(width_index, width_index + patch_size)
return data[height_slice, width_slice, :]
def create_patches(X, patch_size):
patches = []
width, height = X.shape[1], X.shape[0]
X = pad_with_zeros(X, patch_size // 2)
for i in range(height):
for j in range(width):
image_patch = create_patch(X, i, j, patch_size)
patches.append(image_patch.reshape(image_patch.shape + (1,)).astype('float32'))
return np.array(patches)
def create_image_cubes(X, y, patch_size):
width, height = X.shape[1], X.shape[0]
patchesData = create_patches(X, patch_size)
labels = []
for i in range(height):
for j in range(width):
labels.append(y[i, j])
patchesLabels = np.array(labels)
return patchesData, patchesLabels
def generate_training_data(X, y, patch_size, train_ratio=0.7):
X, y = create_image_cubes(X, y, patch_size)
X_train, X_test, y_train, y_test = split_train_test(X, y, train_ratio)
X_train = X_train.reshape(-1, patch_size, patch_size, X.shape[-1], 1)
X_test = X_test.reshape(-1, patch_size, patch_size, X.shape[-1], 1)
return X_train, X_test, y_train, y_test
def create_hsn_model(input_shape, n_classes):
input_layer = Input((*input_shape, 1))
v3D(filters=8, kernel_size=(3, 3, 7), activation='relu')(input_layer)
conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 5), activation='relu')(conv_layer1)
conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation='relu')(conv_layer2)
conv_layer3 = Reshape((conv_layer3.shape[1], conv_layer3.shape[2], conv_layer3.shape[3] * conv_layer3.shape[4]))(conv_layer3)
conv_layer4 = Conv2D(filters=64, kernel_size=(3,3), activation='relu')(conv_layer3)
flatten_layer = Flatten()(conv_layer4)
e(units=256, activation='relu')(flatten_layer)
dense_layer1 = Dropout(0.4)(dense_layer1)
dense_layer2 = Dense(units=128, activation='relu')(dense_layer1)
dense_layer2 = Dropout(0.4)(dense_layer2)
output_layer = Dense(units=n_classes, activation='softmax')(dense_layer2)
model = Model(inputs=input_layer, outputs=output_layer)
adam = keras.optimizers.Adam(learning_rate=0.001, decay=1e-06)
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def predict_hsn_model(model, X, patch_size):
width, height = X.shape[1], X.shape[0]
X_pred = create_patches(X, patch_size)
y_pred = np.argmax(model.predict(X_pred), axis=1)
return y_pred.reshape(height, width).astype(int)
class HyperspectralModel:
def __init__(self, name, *args):
self.name = name
if self.name == 'svm':
self.model = SVC(C=150, kernel='rbf')
elif self.name == 'hsn':
self.input_shape, self.n_classes = args
self.model = create_hsn_model(self.input_shape, self.n_classes)
def train(self, X, y, train_ratio=0.7, epochs=50):
if self.name == 'svm':
X_train, X_test, y_train, y_test = train_test_split(X.reshape(-1, X.shape[-1]), y, train_size=train_ratio, stratify=y)
self.model.fit(X_train, y_train)
return y_test, self.model.predict(X_test)
elif self.name == 'hsn':
X = principal_components(X, n_components=self.input_shape[2])
X_train, X_test, y_train, y_test = generate_training_data(X, y, self.input_shape[0], train_ratio)
self.history = self.model.fit(x=X_train, y=np_utils.to_categorical(y_train), batch_size=256, epochs=epochs)
return y_test, np.argmax(self.model.predict(X_test), axis=1)
def predict(self, X):
if self.name == 'svm':
return self.model.predict(X.reshape(-1, X.shape[2])).reshape(X.shape[0], X.shape[1])
elif self.name == 'hsn':
X = principal_components(X, n_components=self.input_shape[2])
return predict_hsn_model(self.model, X, self.input_shape[0])
def plot_history():
if self.history:
plot_train_history(self.history)
def save(self, filename='model.h5'):
self.model.save(filename)
def load(self, filename='model.h5'):
self.model = load_model(filename)
def create_model(name, *args):
return HyperspectralModel(name, *args)
| true | true |
f72e951622f6044f08a25ce59529d0101e14827c | 1,346 | py | Python | adminmgr/media/code/A3/task2/BD_749_1674.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 9 | 2019-11-08T02:05:27.000Z | 2021-12-13T12:06:35.000Z | adminmgr/media/code/A3/task2/BD_749_1674.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 6 | 2019-11-27T03:23:16.000Z | 2021-06-10T19:15:13.000Z | adminmgr/media/code/A3/task2/BD_749_1674.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 4 | 2019-11-26T17:04:27.000Z | 2021-12-13T11:57:03.000Z | from pyspark.sql.types import StructType
import pyspark.sql.functions as F
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
from pyspark.sql import SparkSession,SQLContext
from pyspark import SparkContext,SparkConf
spark = SparkSession \
.builder \
.appName("task1") \
.getOrCreate()
userSchema = StructType().add("ID", "integer").add("lang","string").add("date","string").add("source","string").add("len",
"integer").add("likes","integer").add("RTs","integer").add("hashtags","string").add("usermentionnames","string").add("usermentionID",
"string").add("name","string").add("place","string").add("followers","integer").add("friends","integer")
csvDF = spark \
.readStream \
.option("sep", ";") \
.schema(userSchema) \
.csv("hdfs://localhost:9000/stream/")
#csvDF.dropDuplicates(["name"])
csvDF.createOrReplaceTempView("USER")
df1=spark.sql("SELECT DISTINCT name ,followers,friends FROM USER GROUP BY name,followers,friends")
#streamingDf.dropDuplicates("guid")
df1.dropDuplicates(["name"])
df1.createOrReplaceTempView("USER1")
df=spark.sql("SELECT name , followers / friends AS FRRatio FROM USER1 ORDER BY FRRatio desc limit 10")
query =df\
.writeStream \
.outputMode("complete")\
.format("console") \
.start()
query.awaitTermination(60)
query.stop()
| 28.638298 | 133 | 0.708024 | from pyspark.sql.types import StructType
import pyspark.sql.functions as F
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
from pyspark.sql import SparkSession,SQLContext
from pyspark import SparkContext,SparkConf
spark = SparkSession \
.builder \
.appName("task1") \
.getOrCreate()
userSchema = StructType().add("ID", "integer").add("lang","string").add("date","string").add("source","string").add("len",
"integer").add("likes","integer").add("RTs","integer").add("hashtags","string").add("usermentionnames","string").add("usermentionID",
"string").add("name","string").add("place","string").add("followers","integer").add("friends","integer")
csvDF = spark \
.readStream \
.option("sep", ";") \
.schema(userSchema) \
.csv("hdfs://localhost:9000/stream/")
csvDF.createOrReplaceTempView("USER")
df1=spark.sql("SELECT DISTINCT name ,followers,friends FROM USER GROUP BY name,followers,friends")
df1.dropDuplicates(["name"])
df1.createOrReplaceTempView("USER1")
df=spark.sql("SELECT name , followers / friends AS FRRatio FROM USER1 ORDER BY FRRatio desc limit 10")
query =df\
.writeStream \
.outputMode("complete")\
.format("console") \
.start()
query.awaitTermination(60)
query.stop()
| true | true |
f72e9525bfd75b58c874cba5b790cbac710cb9dd | 14,113 | py | Python | research/object_detection/export_tflite_ssd_graph_lib.py | gujralsanyam22/models | d96f8f043dbe2b5ca8ea1785f57df8faf68d8875 | [
"Apache-2.0"
] | 82,518 | 2016-02-05T12:07:23.000Z | 2022-03-31T23:09:47.000Z | research/object_detection/export_tflite_ssd_graph_lib.py | yangxl-2014-fe/models | 11ea5237818e791a5717716d5413977f4c4db1e3 | [
"Apache-2.0"
] | 9,021 | 2016-03-08T01:02:05.000Z | 2022-03-31T08:06:35.000Z | research/object_detection/export_tflite_ssd_graph_lib.py | yangxl-2014-fe/models | 11ea5237818e791a5717716d5413977f4c4db1e3 | [
"Apache-2.0"
] | 54,341 | 2016-02-06T17:19:55.000Z | 2022-03-31T10:27:44.000Z | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports an SSD detection model to use with tf-lite.
See export_tflite_ssd_graph.py for usage.
"""
import os
import tempfile
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import saver_pb2
from object_detection import exporter
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.builders import post_processing_builder
from object_detection.core import box_list
from object_detection.utils import tf_version
_DEFAULT_NUM_CHANNELS = 3
_DEFAULT_NUM_COORD_BOX = 4
if tf_version.is_tf1():
from tensorflow.tools.graph_transforms import TransformGraph # pylint: disable=g-import-not-at-top
def get_const_center_size_encoded_anchors(anchors):
"""Exports center-size encoded anchors as a constant tensor.
Args:
anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor
boxes
Returns:
encoded_anchors: a float32 constant tensor of shape [num_anchors, 4]
containing the anchor boxes.
"""
anchor_boxlist = box_list.BoxList(anchors)
y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes()
num_anchors = y.get_shape().as_list()
with tf.Session() as sess:
y_out, x_out, h_out, w_out = sess.run([y, x, h, w])
encoded_anchors = tf.constant(
np.transpose(np.stack((y_out, x_out, h_out, w_out))),
dtype=tf.float32,
shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX],
name='anchors')
return encoded_anchors
def append_postprocessing_op(frozen_graph_def,
max_detections,
max_classes_per_detection,
nms_score_threshold,
nms_iou_threshold,
num_classes,
scale_values,
detections_per_class=100,
use_regular_nms=False,
additional_output_tensors=()):
"""Appends postprocessing custom op.
Args:
frozen_graph_def: Frozen GraphDef for SSD model after freezing the
checkpoint
max_detections: Maximum number of detections (boxes) to show
max_classes_per_detection: Number of classes to display per detection
nms_score_threshold: Score threshold used in Non-maximal suppression in
post-processing
nms_iou_threshold: Intersection-over-union threshold used in Non-maximal
suppression in post-processing
num_classes: number of classes in SSD detector
scale_values: scale values is a dict with following key-value pairs
{y_scale: 10, x_scale: 10, h_scale: 5, w_scale: 5} that are used in decode
centersize boxes
detections_per_class: In regular NonMaxSuppression, number of anchors used
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of
Fast NMS.
additional_output_tensors: Array of additional tensor names to output.
Tensors are appended after postprocessing output.
Returns:
transformed_graph_def: Frozen GraphDef with postprocessing custom op
appended
TFLite_Detection_PostProcess custom op node has four outputs:
detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box
locations
detection_classes: a float32 tensor of shape [1, num_boxes]
with class indices
detection_scores: a float32 tensor of shape [1, num_boxes]
with class scores
num_boxes: a float32 tensor of size 1 containing the number of detected
boxes
"""
new_output = frozen_graph_def.node.add()
new_output.op = 'TFLite_Detection_PostProcess'
new_output.name = 'TFLite_Detection_PostProcess'
new_output.attr['_output_quantized'].CopyFrom(
attr_value_pb2.AttrValue(b=True))
new_output.attr['_output_types'].list.type.extend([
types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, types_pb2.DT_FLOAT,
types_pb2.DT_FLOAT
])
new_output.attr['_support_output_type_float_in_quantized_op'].CopyFrom(
attr_value_pb2.AttrValue(b=True))
new_output.attr['max_detections'].CopyFrom(
attr_value_pb2.AttrValue(i=max_detections))
new_output.attr['max_classes_per_detection'].CopyFrom(
attr_value_pb2.AttrValue(i=max_classes_per_detection))
new_output.attr['nms_score_threshold'].CopyFrom(
attr_value_pb2.AttrValue(f=nms_score_threshold.pop()))
new_output.attr['nms_iou_threshold'].CopyFrom(
attr_value_pb2.AttrValue(f=nms_iou_threshold.pop()))
new_output.attr['num_classes'].CopyFrom(
attr_value_pb2.AttrValue(i=num_classes))
new_output.attr['y_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['y_scale'].pop()))
new_output.attr['x_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['x_scale'].pop()))
new_output.attr['h_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['h_scale'].pop()))
new_output.attr['w_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['w_scale'].pop()))
new_output.attr['detections_per_class'].CopyFrom(
attr_value_pb2.AttrValue(i=detections_per_class))
new_output.attr['use_regular_nms'].CopyFrom(
attr_value_pb2.AttrValue(b=use_regular_nms))
new_output.input.extend(
['raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors'])
# Transform the graph to append new postprocessing op
input_names = []
output_names = ['TFLite_Detection_PostProcess'
] + list(additional_output_tensors)
transforms = ['strip_unused_nodes']
transformed_graph_def = TransformGraph(frozen_graph_def, input_names,
output_names, transforms)
return transformed_graph_def
def export_tflite_graph(pipeline_config,
trained_checkpoint_prefix,
output_dir,
add_postprocessing_op,
max_detections,
max_classes_per_detection,
detections_per_class=100,
use_regular_nms=False,
binary_graph_name='tflite_graph.pb',
txt_graph_name='tflite_graph.pbtxt',
additional_output_tensors=()):
"""Exports a tflite compatible graph and anchors for ssd detection model.
Anchors are written to a tensor and tflite compatible graph
is written to output_dir/tflite_graph.pb.
Args:
pipeline_config: a pipeline.proto object containing the configuration for
SSD model to export.
trained_checkpoint_prefix: a file prefix for the checkpoint containing the
trained parameters of the SSD model.
output_dir: A directory to write the tflite graph and anchor file to.
add_postprocessing_op: If add_postprocessing_op is true: frozen graph adds a
TFLite_Detection_PostProcess custom op
max_detections: Maximum number of detections (boxes) to show
max_classes_per_detection: Number of classes to display per detection
detections_per_class: In regular NonMaxSuppression, number of anchors used
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of
Fast NMS.
binary_graph_name: Name of the exported graph file in binary format.
txt_graph_name: Name of the exported graph file in text format.
additional_output_tensors: Array of additional tensor names to output.
Additional tensors are appended to the end of output tensor list.
Raises:
ValueError: if the pipeline config contains models other than ssd or uses an
fixed_shape_resizer and provides a shape as well.
"""
tf.gfile.MakeDirs(output_dir)
if pipeline_config.model.WhichOneof('model') != 'ssd':
raise ValueError('Only ssd models are supported in tflite. '
'Found {} in config'.format(
pipeline_config.model.WhichOneof('model')))
num_classes = pipeline_config.model.ssd.num_classes
nms_score_threshold = {
pipeline_config.model.ssd.post_processing.batch_non_max_suppression
.score_threshold
}
nms_iou_threshold = {
pipeline_config.model.ssd.post_processing.batch_non_max_suppression
.iou_threshold
}
scale_values = {}
scale_values['y_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale
}
scale_values['x_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale
}
scale_values['h_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale
}
scale_values['w_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale
}
image_resizer_config = pipeline_config.model.ssd.image_resizer
image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof')
num_channels = _DEFAULT_NUM_CHANNELS
if image_resizer == 'fixed_shape_resizer':
height = image_resizer_config.fixed_shape_resizer.height
width = image_resizer_config.fixed_shape_resizer.width
if image_resizer_config.fixed_shape_resizer.convert_to_grayscale:
num_channels = 1
shape = [1, height, width, num_channels]
else:
raise ValueError(
'Only fixed_shape_resizer'
'is supported with tflite. Found {}'.format(
image_resizer_config.WhichOneof('image_resizer_oneof')))
image = tf.placeholder(
tf.float32, shape=shape, name='normalized_input_image_tensor')
detection_model = model_builder.build(
pipeline_config.model, is_training=False)
predicted_tensors = detection_model.predict(image, true_image_shapes=None)
# The score conversion occurs before the post-processing custom op
_, score_conversion_fn = post_processing_builder.build(
pipeline_config.model.ssd.post_processing)
class_predictions = score_conversion_fn(
predicted_tensors['class_predictions_with_background'])
with tf.name_scope('raw_outputs'):
# 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4]
# containing the encoded box predictions. Note that these are raw
# predictions and no Non-Max suppression is applied on them and
# no decode center size boxes is applied to them.
tf.identity(predicted_tensors['box_encodings'], name='box_encodings')
# 'raw_outputs/class_predictions': a float32 tensor of shape
# [1, num_anchors, num_classes] containing the class scores for each anchor
# after applying score conversion.
tf.identity(class_predictions, name='class_predictions')
# 'anchors': a float32 tensor of shape
# [4, num_anchors] containing the anchors as a constant node.
tf.identity(
get_const_center_size_encoded_anchors(predicted_tensors['anchors']),
name='anchors')
# Add global step to the graph, so we know the training step number when we
# evaluate the model.
tf.train.get_or_create_global_step()
# graph rewriter
is_quantized = pipeline_config.HasField('graph_rewriter')
if is_quantized:
graph_rewriter_config = pipeline_config.graph_rewriter
graph_rewriter_fn = graph_rewriter_builder.build(
graph_rewriter_config, is_training=False)
graph_rewriter_fn()
if pipeline_config.model.ssd.feature_extractor.HasField('fpn'):
exporter.rewrite_nn_resize_op(is_quantized)
# freeze the graph
saver_kwargs = {}
if pipeline_config.eval_config.use_moving_averages:
saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
moving_average_checkpoint = tempfile.NamedTemporaryFile()
exporter.replace_variable_values_with_moving_averages(
tf.get_default_graph(), trained_checkpoint_prefix,
moving_average_checkpoint.name)
checkpoint_to_use = moving_average_checkpoint.name
else:
checkpoint_to_use = trained_checkpoint_prefix
saver = tf.train.Saver(**saver_kwargs)
input_saver_def = saver.as_saver_def()
frozen_graph_def = exporter.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_to_use,
output_node_names=','.join([
'raw_outputs/box_encodings', 'raw_outputs/class_predictions',
'anchors'
] + list(additional_output_tensors)),
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
clear_devices=True,
output_graph='',
initializer_nodes='')
# Add new operation to do post processing in a custom op (TF Lite only)
if add_postprocessing_op:
transformed_graph_def = append_postprocessing_op(
frozen_graph_def,
max_detections,
max_classes_per_detection,
nms_score_threshold,
nms_iou_threshold,
num_classes,
scale_values,
detections_per_class,
use_regular_nms,
additional_output_tensors=additional_output_tensors)
else:
# Return frozen without adding post-processing custom op
transformed_graph_def = frozen_graph_def
binary_graph = os.path.join(output_dir, binary_graph_name)
with tf.gfile.GFile(binary_graph, 'wb') as f:
f.write(transformed_graph_def.SerializeToString())
txt_graph = os.path.join(output_dir, txt_graph_name)
with tf.gfile.GFile(txt_graph, 'w') as f:
f.write(str(transformed_graph_def))
| 42.128358 | 101 | 0.728194 |
import os
import tempfile
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import saver_pb2
from object_detection import exporter
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.builders import post_processing_builder
from object_detection.core import box_list
from object_detection.utils import tf_version
_DEFAULT_NUM_CHANNELS = 3
_DEFAULT_NUM_COORD_BOX = 4
if tf_version.is_tf1():
from tensorflow.tools.graph_transforms import TransformGraph
def get_const_center_size_encoded_anchors(anchors):
anchor_boxlist = box_list.BoxList(anchors)
y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes()
num_anchors = y.get_shape().as_list()
with tf.Session() as sess:
y_out, x_out, h_out, w_out = sess.run([y, x, h, w])
encoded_anchors = tf.constant(
np.transpose(np.stack((y_out, x_out, h_out, w_out))),
dtype=tf.float32,
shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX],
name='anchors')
return encoded_anchors
def append_postprocessing_op(frozen_graph_def,
max_detections,
max_classes_per_detection,
nms_score_threshold,
nms_iou_threshold,
num_classes,
scale_values,
detections_per_class=100,
use_regular_nms=False,
additional_output_tensors=()):
new_output = frozen_graph_def.node.add()
new_output.op = 'TFLite_Detection_PostProcess'
new_output.name = 'TFLite_Detection_PostProcess'
new_output.attr['_output_quantized'].CopyFrom(
attr_value_pb2.AttrValue(b=True))
new_output.attr['_output_types'].list.type.extend([
types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, types_pb2.DT_FLOAT,
types_pb2.DT_FLOAT
])
new_output.attr['_support_output_type_float_in_quantized_op'].CopyFrom(
attr_value_pb2.AttrValue(b=True))
new_output.attr['max_detections'].CopyFrom(
attr_value_pb2.AttrValue(i=max_detections))
new_output.attr['max_classes_per_detection'].CopyFrom(
attr_value_pb2.AttrValue(i=max_classes_per_detection))
new_output.attr['nms_score_threshold'].CopyFrom(
attr_value_pb2.AttrValue(f=nms_score_threshold.pop()))
new_output.attr['nms_iou_threshold'].CopyFrom(
attr_value_pb2.AttrValue(f=nms_iou_threshold.pop()))
new_output.attr['num_classes'].CopyFrom(
attr_value_pb2.AttrValue(i=num_classes))
new_output.attr['y_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['y_scale'].pop()))
new_output.attr['x_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['x_scale'].pop()))
new_output.attr['h_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['h_scale'].pop()))
new_output.attr['w_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['w_scale'].pop()))
new_output.attr['detections_per_class'].CopyFrom(
attr_value_pb2.AttrValue(i=detections_per_class))
new_output.attr['use_regular_nms'].CopyFrom(
attr_value_pb2.AttrValue(b=use_regular_nms))
new_output.input.extend(
['raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors'])
input_names = []
output_names = ['TFLite_Detection_PostProcess'
] + list(additional_output_tensors)
transforms = ['strip_unused_nodes']
transformed_graph_def = TransformGraph(frozen_graph_def, input_names,
output_names, transforms)
return transformed_graph_def
def export_tflite_graph(pipeline_config,
trained_checkpoint_prefix,
output_dir,
add_postprocessing_op,
max_detections,
max_classes_per_detection,
detections_per_class=100,
use_regular_nms=False,
binary_graph_name='tflite_graph.pb',
txt_graph_name='tflite_graph.pbtxt',
additional_output_tensors=()):
tf.gfile.MakeDirs(output_dir)
if pipeline_config.model.WhichOneof('model') != 'ssd':
raise ValueError('Only ssd models are supported in tflite. '
'Found {} in config'.format(
pipeline_config.model.WhichOneof('model')))
num_classes = pipeline_config.model.ssd.num_classes
nms_score_threshold = {
pipeline_config.model.ssd.post_processing.batch_non_max_suppression
.score_threshold
}
nms_iou_threshold = {
pipeline_config.model.ssd.post_processing.batch_non_max_suppression
.iou_threshold
}
scale_values = {}
scale_values['y_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale
}
scale_values['x_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale
}
scale_values['h_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale
}
scale_values['w_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale
}
image_resizer_config = pipeline_config.model.ssd.image_resizer
image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof')
num_channels = _DEFAULT_NUM_CHANNELS
if image_resizer == 'fixed_shape_resizer':
height = image_resizer_config.fixed_shape_resizer.height
width = image_resizer_config.fixed_shape_resizer.width
if image_resizer_config.fixed_shape_resizer.convert_to_grayscale:
num_channels = 1
shape = [1, height, width, num_channels]
else:
raise ValueError(
'Only fixed_shape_resizer'
'is supported with tflite. Found {}'.format(
image_resizer_config.WhichOneof('image_resizer_oneof')))
image = tf.placeholder(
tf.float32, shape=shape, name='normalized_input_image_tensor')
detection_model = model_builder.build(
pipeline_config.model, is_training=False)
predicted_tensors = detection_model.predict(image, true_image_shapes=None)
_, score_conversion_fn = post_processing_builder.build(
pipeline_config.model.ssd.post_processing)
class_predictions = score_conversion_fn(
predicted_tensors['class_predictions_with_background'])
with tf.name_scope('raw_outputs'):
tf.identity(predicted_tensors['box_encodings'], name='box_encodings')
tf.identity(class_predictions, name='class_predictions')
tf.identity(
get_const_center_size_encoded_anchors(predicted_tensors['anchors']),
name='anchors')
tf.train.get_or_create_global_step()
is_quantized = pipeline_config.HasField('graph_rewriter')
if is_quantized:
graph_rewriter_config = pipeline_config.graph_rewriter
graph_rewriter_fn = graph_rewriter_builder.build(
graph_rewriter_config, is_training=False)
graph_rewriter_fn()
if pipeline_config.model.ssd.feature_extractor.HasField('fpn'):
exporter.rewrite_nn_resize_op(is_quantized)
saver_kwargs = {}
if pipeline_config.eval_config.use_moving_averages:
saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
moving_average_checkpoint = tempfile.NamedTemporaryFile()
exporter.replace_variable_values_with_moving_averages(
tf.get_default_graph(), trained_checkpoint_prefix,
moving_average_checkpoint.name)
checkpoint_to_use = moving_average_checkpoint.name
else:
checkpoint_to_use = trained_checkpoint_prefix
saver = tf.train.Saver(**saver_kwargs)
input_saver_def = saver.as_saver_def()
frozen_graph_def = exporter.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_to_use,
output_node_names=','.join([
'raw_outputs/box_encodings', 'raw_outputs/class_predictions',
'anchors'
] + list(additional_output_tensors)),
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
clear_devices=True,
output_graph='',
initializer_nodes='')
if add_postprocessing_op:
transformed_graph_def = append_postprocessing_op(
frozen_graph_def,
max_detections,
max_classes_per_detection,
nms_score_threshold,
nms_iou_threshold,
num_classes,
scale_values,
detections_per_class,
use_regular_nms,
additional_output_tensors=additional_output_tensors)
else:
transformed_graph_def = frozen_graph_def
binary_graph = os.path.join(output_dir, binary_graph_name)
with tf.gfile.GFile(binary_graph, 'wb') as f:
f.write(transformed_graph_def.SerializeToString())
txt_graph = os.path.join(output_dir, txt_graph_name)
with tf.gfile.GFile(txt_graph, 'w') as f:
f.write(str(transformed_graph_def))
| true | true |
f72e965b76a38bbb24d1f12720ee08067253c519 | 510 | py | Python | lab2/number_even.py | irisxiu666/5001 | 07ea4bac4fa7c1f961d93bdd9723716b2452adc4 | [
"MIT"
] | null | null | null | lab2/number_even.py | irisxiu666/5001 | 07ea4bac4fa7c1f961d93bdd9723716b2452adc4 | [
"MIT"
] | null | null | null | lab2/number_even.py | irisxiu666/5001 | 07ea4bac4fa7c1f961d93bdd9723716b2452adc4 | [
"MIT"
] | null | null | null | """ A program to judge whether an integer is odd or even."""
def main():
# Input the number with integer type.
number = int(input('Input number: '))
# Assuming the reminder of the number dividing by 2 is 0,
# the number is even and print it out.
if number % 2 == 0:
print(number, 'is even')
# Assuming the reminder of the number dividing by 2 isn't 0,
# the number is odd and print it out.
else:
print(number, 'is odd')
if __name__ == '__main__':
main()
| 26.842105 | 64 | 0.621569 |
def main():
number = int(input('Input number: '))
if number % 2 == 0:
print(number, 'is even')
# the number is odd and print it out.
else:
print(number, 'is odd')
if __name__ == '__main__':
main()
| true | true |
f72e96c7a9c0f18f6c980fcbc1de140fbd826ae4 | 3,370 | py | Python | vulnerabilities/importers/archlinux.py | ziadhany/vulnerablecode | c94ed5701a70bc836a66484f6d54aacc81c4288f | [
"Apache-2.0"
] | null | null | null | vulnerabilities/importers/archlinux.py | ziadhany/vulnerablecode | c94ed5701a70bc836a66484f6d54aacc81c4288f | [
"Apache-2.0"
] | null | null | null | vulnerabilities/importers/archlinux.py | ziadhany/vulnerablecode | c94ed5701a70bc836a66484f6d54aacc81c4288f | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# VulnerableCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/vulnerablecode for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import dataclasses
import json
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Set
from urllib.request import urlopen
from packageurl import PackageURL
from vulnerabilities import severity_systems
from vulnerabilities.importer import AdvisoryData
from vulnerabilities.importer import Importer
from vulnerabilities.importer import Reference
from vulnerabilities.importer import VulnerabilitySeverity
from vulnerabilities.utils import nearest_patched_package
class ArchlinuxImporter(Importer):
def __enter__(self):
self._api_response = self._fetch()
def updated_advisories(self) -> Set[AdvisoryData]:
advisories = []
for record in self._api_response:
advisories.extend(self._parse(record))
return self.batch_advisories(advisories)
def _fetch(self) -> Iterable[Mapping]:
with urlopen(self.config.archlinux_tracker_url) as response:
return json.load(response)
def _parse(self, record) -> List[AdvisoryData]:
advisories = []
for cve_id in record["issues"]:
affected_packages = []
for name in record["packages"]:
impacted_purls, resolved_purls = [], []
impacted_purls.append(
PackageURL(
name=name,
type="pacman",
namespace="archlinux",
version=record["affected"],
)
)
if record["fixed"]:
resolved_purls.append(
PackageURL(
name=name,
type="pacman",
namespace="archlinux",
version=record["fixed"],
)
)
affected_packages.extend(nearest_patched_package(impacted_purls, resolved_purls))
references = []
references.append(
Reference(
reference_id=record["name"],
url="https://security.archlinux.org/{}".format(record["name"]),
severities=[
VulnerabilitySeverity(
system=severity_systems.ARCHLINUX, value=record["severity"]
)
],
)
)
for ref in record["advisories"]:
references.append(
Reference(
reference_id=ref,
url="https://security.archlinux.org/{}".format(ref),
)
)
advisories.append(
Advisory(
vulnerability_id=cve_id,
summary="",
affected_packages=affected_packages,
references=references,
)
)
return advisories
| 33.366337 | 97 | 0.546884 |
import dataclasses
import json
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Set
from urllib.request import urlopen
from packageurl import PackageURL
from vulnerabilities import severity_systems
from vulnerabilities.importer import AdvisoryData
from vulnerabilities.importer import Importer
from vulnerabilities.importer import Reference
from vulnerabilities.importer import VulnerabilitySeverity
from vulnerabilities.utils import nearest_patched_package
class ArchlinuxImporter(Importer):
def __enter__(self):
self._api_response = self._fetch()
def updated_advisories(self) -> Set[AdvisoryData]:
advisories = []
for record in self._api_response:
advisories.extend(self._parse(record))
return self.batch_advisories(advisories)
def _fetch(self) -> Iterable[Mapping]:
with urlopen(self.config.archlinux_tracker_url) as response:
return json.load(response)
def _parse(self, record) -> List[AdvisoryData]:
advisories = []
for cve_id in record["issues"]:
affected_packages = []
for name in record["packages"]:
impacted_purls, resolved_purls = [], []
impacted_purls.append(
PackageURL(
name=name,
type="pacman",
namespace="archlinux",
version=record["affected"],
)
)
if record["fixed"]:
resolved_purls.append(
PackageURL(
name=name,
type="pacman",
namespace="archlinux",
version=record["fixed"],
)
)
affected_packages.extend(nearest_patched_package(impacted_purls, resolved_purls))
references = []
references.append(
Reference(
reference_id=record["name"],
url="https://security.archlinux.org/{}".format(record["name"]),
severities=[
VulnerabilitySeverity(
system=severity_systems.ARCHLINUX, value=record["severity"]
)
],
)
)
for ref in record["advisories"]:
references.append(
Reference(
reference_id=ref,
url="https://security.archlinux.org/{}".format(ref),
)
)
advisories.append(
Advisory(
vulnerability_id=cve_id,
summary="",
affected_packages=affected_packages,
references=references,
)
)
return advisories
| true | true |
f72e96f5af4b264e6fbcdc962ecc0d3a0e21838e | 14,369 | py | Python | jni-build/jni/include/tensorflow/python/training/server_lib_test.py | rcelebi/android-elfali | 4ea14a58a18356ef9e16aba2e7dae84c02afba12 | [
"Apache-2.0"
] | 680 | 2016-12-03T14:38:28.000Z | 2022-02-16T04:06:45.000Z | tensorflow/python/training/server_lib_test.py | alainrk/tensorflow | 314d9cd9b607460f8bfea80fc828b1521ca18443 | [
"Apache-2.0"
] | 38 | 2016-11-17T08:43:51.000Z | 2019-11-12T12:27:04.000Z | tensorflow/python/training/server_lib_test.py | alainrk/tensorflow | 314d9cd9b607460f8bfea80fc828b1521ca18443 | [
"Apache-2.0"
] | 250 | 2016-12-05T10:37:17.000Z | 2022-03-18T21:26:55.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
class GrpcServerTest(tf.test.TestCase):
def testRunStep(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
c = tf.constant([[2, 1]])
d = tf.constant([[1], [2]])
e = tf.matmul(c, d)
self.assertAllEqual([[4]], sess.run(e))
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
def testMultipleSessions(self):
server = tf.train.Server.create_local_server()
c = tf.constant([[2, 1]])
d = tf.constant([[1], [2]])
e = tf.matmul(c, d)
sess_1 = tf.Session(server.target)
sess_2 = tf.Session(server.target)
self.assertAllEqual([[4]], sess_1.run(e))
self.assertAllEqual([[4]], sess_2.run(e))
sess_1.close()
sess_2.close()
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
# Verifies behavior of multiple variables with multiple sessions connecting to
# the same server.
def testSameVariablesNoClear(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess_1:
v0 = tf.Variable([[2, 1]], name="v0")
v1 = tf.Variable([[1], [2]], name="v1")
v2 = tf.matmul(v0, v1)
sess_1.run([v0.initializer, v1.initializer])
self.assertAllEqual([[4]], sess_1.run(v2))
with tf.Session(server.target) as sess_2:
new_v0 = tf.get_default_graph().get_tensor_by_name("v0:0")
new_v1 = tf.get_default_graph().get_tensor_by_name("v1:0")
new_v2 = tf.matmul(new_v0, new_v1)
self.assertAllEqual([[4]], sess_2.run(new_v2))
# Verifies behavior of tf.Session.reset().
def testSameVariablesClear(self):
server = tf.train.Server.create_local_server()
# Creates a graph with 2 variables.
v0 = tf.Variable([[2, 1]], name="v0")
v1 = tf.Variable([[1], [2]], name="v1")
v2 = tf.matmul(v0, v1)
# Verifies that both sessions connecting to the same target return
# the same results.
sess_1 = tf.Session(server.target)
sess_2 = tf.Session(server.target)
sess_1.run(tf.initialize_all_variables())
self.assertAllEqual([[4]], sess_1.run(v2))
self.assertAllEqual([[4]], sess_2.run(v2))
# Resets target. sessions abort. Use sess_2 to verify.
tf.Session.reset(server.target)
with self.assertRaises(tf.errors.AbortedError):
self.assertAllEqual([[4]], sess_2.run(v2))
# Connects to the same target. Device memory for the variables would have
# been released, so they will be unitialized.
sess_2 = tf.Session(server.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess_2.run(v2)
# Reinitialzes the variables.
sess_2.run(tf.initialize_all_variables())
self.assertAllEqual([[4]], sess_2.run(v2))
sess_2.close()
# Verifies behavior of tf.Session.reset() with multiple containers using
# default container names as defined by the target name.
def testSameVariablesClearContainer(self):
# Starts two servers with different names so they map to different
# resource "containers".
server0 = tf.train.Server({"local0": ["localhost:0"]}, protocol="grpc",
start=True)
server1 = tf.train.Server({"local1": ["localhost:0"]}, protocol="grpc",
start=True)
# Creates a graph with 2 variables.
v0 = tf.Variable(1.0, name="v0")
v1 = tf.Variable(2.0, name="v0")
# Initializes the variables. Verifies that the values are correct.
sess_0 = tf.Session(server0.target)
sess_1 = tf.Session(server1.target)
sess_0.run(v0.initializer)
sess_1.run(v1.initializer)
self.assertAllEqual(1.0, sess_0.run(v0))
self.assertAllEqual(2.0, sess_1.run(v1))
# Resets container "local0". Verifies that v0 is no longer initialized.
tf.Session.reset(server0.target, ["local0"])
sess = tf.Session(server0.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(v0)
# Reinitializes v0 for the following test.
sess.run(v0.initializer)
# Verifies that v1 is still valid.
self.assertAllEqual(2.0, sess_1.run(v1))
# Resets container "local1". Verifies that v1 is no longer initialized.
tf.Session.reset(server1.target, ["local1"])
sess = tf.Session(server1.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(v1)
# Verifies that v0 is still valid.
sess = tf.Session(server0.target)
self.assertAllEqual(1.0, sess.run(v0))
# Verifies behavior of tf.Session.reset() with multiple containers using
# tf.container.
def testMultipleContainers(self):
with tf.container("test0"):
v0 = tf.Variable(1.0, name="v0")
with tf.container("test1"):
v1 = tf.Variable(2.0, name="v0")
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
sess.run(tf.initialize_all_variables())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
# Resets container. Session aborts.
tf.Session.reset(server.target, ["test0"])
with self.assertRaises(tf.errors.AbortedError):
sess.run(v1)
# Connects to the same target. Device memory for the v0 would have
# been released, so it will be unitialized. But v1 should still
# be valid.
sess = tf.Session(server.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(v0)
self.assertAllEqual(2.0, sess.run(v1))
# Verifies various reset failures.
def testResetFails(self):
# Creates variable with container name.
with tf.container("test0"):
v0 = tf.Variable(1.0, name="v0")
# Creates variable with default container.
v1 = tf.Variable(2.0, name="v1")
# Verifies resetting the non-existent target returns error.
with self.assertRaises(tf.errors.NotFoundError):
tf.Session.reset("nonexistent", ["test0"])
# Verifies resetting with config.
# Verifies that resetting target with no server times out.
with self.assertRaises(tf.errors.DeadlineExceededError):
tf.Session.reset("grpc://localhost:0", ["test0"],
config=tf.ConfigProto(operation_timeout_in_ms=5))
# Verifies no containers are reset with non-existent container.
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
sess.run(tf.initialize_all_variables())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
# No container is reset, but the server is reset.
tf.Session.reset(server.target, ["test1"])
# Verifies that both variables are still valid.
sess = tf.Session(server.target)
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
def testLargeConstant(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
const_val = np.empty([10000, 3000], dtype=np.float32)
const_val.fill(0.5)
c = tf.constant(const_val)
shape_t = tf.shape(c)
self.assertAllEqual([10000, 3000], sess.run(shape_t))
def testLargeFetch(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
c = tf.fill([10000, 3000], 0.5)
expected_val = np.empty([10000, 3000], dtype=np.float32)
expected_val.fill(0.5)
self.assertAllEqual(expected_val, sess.run(c))
def testLargeFeed(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
feed_val = np.empty([10000, 3000], dtype=np.float32)
feed_val.fill(0.5)
p = tf.placeholder(tf.float32, shape=[10000, 3000])
min_t = tf.reduce_min(p)
max_t = tf.reduce_max(p)
min_val, max_val = sess.run([min_t, max_t], feed_dict={p: feed_val})
self.assertEqual(0.5, min_val)
self.assertEqual(0.5, max_val)
def testCloseCancelsBlockingOperation(self):
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
q = tf.FIFOQueue(10, [tf.float32])
enqueue_op = q.enqueue(37.0)
dequeue_t = q.dequeue()
sess.run(enqueue_op)
sess.run(dequeue_t)
def blocking_dequeue():
with self.assertRaises(tf.errors.CancelledError):
sess.run(dequeue_t)
blocking_thread = self.checkedThread(blocking_dequeue)
blocking_thread.start()
time.sleep(0.5)
sess.close()
blocking_thread.join()
def testSetConfiguration(self):
config = tf.ConfigProto(
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.1))
# Configure a server using the default local server options.
server = tf.train.Server.create_local_server(config=config, start=False)
self.assertEqual(
0.1,
server.server_def.default_session_config
.gpu_options.per_process_gpu_memory_fraction)
# Configure a server using an explicit ServerDefd with an
# overridden config.
cluster_def = tf.train.ClusterSpec(
{"localhost": ["localhost:0"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="localhost", task_index=0,
protocol="grpc")
server = tf.train.Server(server_def, config=config, start=False)
self.assertEqual(
0.1,
server.server_def.default_session_config
.gpu_options.per_process_gpu_memory_fraction)
def testInvalidHostname(self):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "port"):
_ = tf.train.Server({"local": ["localhost"]},
job_name="local",
task_index=0)
def testInteractiveSession(self):
server = tf.train.Server.create_local_server()
# TODO(b/29900832): Remove this assertion when the bug is fixed.
a = tf.constant(1.0)
with self.assertRaisesRegexp(tf.errors.UnimplementedError, "pruned"):
sess = tf.InteractiveSession(target=server.target)
sess.run(a)
# TODO(b/29900832): The following code fails (without the unimplemented
# check in `tensorflow::MasterSession`):
# a = tf.constant(1.0)
# b = tf.constant(2.0)
# self.assertEqual(1.0, sess.run(a))
# self.assertEqual(2.0, sess.run(b))
class ServerDefTest(tf.test.TestCase):
def testLocalServer(self):
cluster_def = tf.train.ClusterSpec(
{"local": ["localhost:2222"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="local", task_index=0, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' } }
}
job_name: 'local' task_index: 0 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoProcesses(self):
cluster_def = tf.train.ClusterSpec(
{"local": ["localhost:2222", "localhost:2223"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="local", task_index=1, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' }
tasks { key: 1 value: 'localhost:2223' } }
}
job_name: 'local' task_index: 1 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoJobs(self):
cluster_def = tf.train.ClusterSpec(
{"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]}
).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="worker", task_index=2, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
}
job_name: 'worker' task_index: 2 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testClusterSpec(self):
cluster_spec = tf.train.ClusterSpec(
{"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]})
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, tf.train.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
tf.train.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
tf.train.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
if __name__ == "__main__":
tf.test.main()
| 37.322078 | 80 | 0.664416 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
class GrpcServerTest(tf.test.TestCase):
def testRunStep(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
c = tf.constant([[2, 1]])
d = tf.constant([[1], [2]])
e = tf.matmul(c, d)
self.assertAllEqual([[4]], sess.run(e))
def testMultipleSessions(self):
server = tf.train.Server.create_local_server()
c = tf.constant([[2, 1]])
d = tf.constant([[1], [2]])
e = tf.matmul(c, d)
sess_1 = tf.Session(server.target)
sess_2 = tf.Session(server.target)
self.assertAllEqual([[4]], sess_1.run(e))
self.assertAllEqual([[4]], sess_2.run(e))
sess_1.close()
sess_2.close()
def testSameVariablesNoClear(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess_1:
v0 = tf.Variable([[2, 1]], name="v0")
v1 = tf.Variable([[1], [2]], name="v1")
v2 = tf.matmul(v0, v1)
sess_1.run([v0.initializer, v1.initializer])
self.assertAllEqual([[4]], sess_1.run(v2))
with tf.Session(server.target) as sess_2:
new_v0 = tf.get_default_graph().get_tensor_by_name("v0:0")
new_v1 = tf.get_default_graph().get_tensor_by_name("v1:0")
new_v2 = tf.matmul(new_v0, new_v1)
self.assertAllEqual([[4]], sess_2.run(new_v2))
def testSameVariablesClear(self):
server = tf.train.Server.create_local_server()
v0 = tf.Variable([[2, 1]], name="v0")
v1 = tf.Variable([[1], [2]], name="v1")
v2 = tf.matmul(v0, v1)
sess_1 = tf.Session(server.target)
sess_2 = tf.Session(server.target)
sess_1.run(tf.initialize_all_variables())
self.assertAllEqual([[4]], sess_1.run(v2))
self.assertAllEqual([[4]], sess_2.run(v2))
tf.Session.reset(server.target)
with self.assertRaises(tf.errors.AbortedError):
self.assertAllEqual([[4]], sess_2.run(v2))
sess_2 = tf.Session(server.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess_2.run(v2)
sess_2.run(tf.initialize_all_variables())
self.assertAllEqual([[4]], sess_2.run(v2))
sess_2.close()
def testSameVariablesClearContainer(self):
server0 = tf.train.Server({"local0": ["localhost:0"]}, protocol="grpc",
start=True)
server1 = tf.train.Server({"local1": ["localhost:0"]}, protocol="grpc",
start=True)
v0 = tf.Variable(1.0, name="v0")
v1 = tf.Variable(2.0, name="v0")
sess_0 = tf.Session(server0.target)
sess_1 = tf.Session(server1.target)
sess_0.run(v0.initializer)
sess_1.run(v1.initializer)
self.assertAllEqual(1.0, sess_0.run(v0))
self.assertAllEqual(2.0, sess_1.run(v1))
tf.Session.reset(server0.target, ["local0"])
sess = tf.Session(server0.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(v0)
sess.run(v0.initializer)
self.assertAllEqual(2.0, sess_1.run(v1))
tf.Session.reset(server1.target, ["local1"])
sess = tf.Session(server1.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(v1)
sess = tf.Session(server0.target)
self.assertAllEqual(1.0, sess.run(v0))
def testMultipleContainers(self):
with tf.container("test0"):
v0 = tf.Variable(1.0, name="v0")
with tf.container("test1"):
v1 = tf.Variable(2.0, name="v0")
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
sess.run(tf.initialize_all_variables())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
tf.Session.reset(server.target, ["test0"])
with self.assertRaises(tf.errors.AbortedError):
sess.run(v1)
sess = tf.Session(server.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(v0)
self.assertAllEqual(2.0, sess.run(v1))
def testResetFails(self):
with tf.container("test0"):
v0 = tf.Variable(1.0, name="v0")
v1 = tf.Variable(2.0, name="v1")
with self.assertRaises(tf.errors.NotFoundError):
tf.Session.reset("nonexistent", ["test0"])
with self.assertRaises(tf.errors.DeadlineExceededError):
tf.Session.reset("grpc://localhost:0", ["test0"],
config=tf.ConfigProto(operation_timeout_in_ms=5))
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
sess.run(tf.initialize_all_variables())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
tf.Session.reset(server.target, ["test1"])
sess = tf.Session(server.target)
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
def testLargeConstant(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
const_val = np.empty([10000, 3000], dtype=np.float32)
const_val.fill(0.5)
c = tf.constant(const_val)
shape_t = tf.shape(c)
self.assertAllEqual([10000, 3000], sess.run(shape_t))
def testLargeFetch(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
c = tf.fill([10000, 3000], 0.5)
expected_val = np.empty([10000, 3000], dtype=np.float32)
expected_val.fill(0.5)
self.assertAllEqual(expected_val, sess.run(c))
def testLargeFeed(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
feed_val = np.empty([10000, 3000], dtype=np.float32)
feed_val.fill(0.5)
p = tf.placeholder(tf.float32, shape=[10000, 3000])
min_t = tf.reduce_min(p)
max_t = tf.reduce_max(p)
min_val, max_val = sess.run([min_t, max_t], feed_dict={p: feed_val})
self.assertEqual(0.5, min_val)
self.assertEqual(0.5, max_val)
def testCloseCancelsBlockingOperation(self):
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
q = tf.FIFOQueue(10, [tf.float32])
enqueue_op = q.enqueue(37.0)
dequeue_t = q.dequeue()
sess.run(enqueue_op)
sess.run(dequeue_t)
def blocking_dequeue():
with self.assertRaises(tf.errors.CancelledError):
sess.run(dequeue_t)
blocking_thread = self.checkedThread(blocking_dequeue)
blocking_thread.start()
time.sleep(0.5)
sess.close()
blocking_thread.join()
def testSetConfiguration(self):
config = tf.ConfigProto(
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.1))
server = tf.train.Server.create_local_server(config=config, start=False)
self.assertEqual(
0.1,
server.server_def.default_session_config
.gpu_options.per_process_gpu_memory_fraction)
cluster_def = tf.train.ClusterSpec(
{"localhost": ["localhost:0"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="localhost", task_index=0,
protocol="grpc")
server = tf.train.Server(server_def, config=config, start=False)
self.assertEqual(
0.1,
server.server_def.default_session_config
.gpu_options.per_process_gpu_memory_fraction)
def testInvalidHostname(self):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "port"):
_ = tf.train.Server({"local": ["localhost"]},
job_name="local",
task_index=0)
def testInteractiveSession(self):
server = tf.train.Server.create_local_server()
a = tf.constant(1.0)
with self.assertRaisesRegexp(tf.errors.UnimplementedError, "pruned"):
sess = tf.InteractiveSession(target=server.target)
sess.run(a)
class ServerDefTest(tf.test.TestCase):
def testLocalServer(self):
cluster_def = tf.train.ClusterSpec(
{"local": ["localhost:2222"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="local", task_index=0, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' } }
}
job_name: 'local' task_index: 0 protocol: 'grpc'
""", server_def)
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoProcesses(self):
cluster_def = tf.train.ClusterSpec(
{"local": ["localhost:2222", "localhost:2223"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="local", task_index=1, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' }
tasks { key: 1 value: 'localhost:2223' } }
}
job_name: 'local' task_index: 1 protocol: 'grpc'
""", server_def)
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoJobs(self):
cluster_def = tf.train.ClusterSpec(
{"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]}
).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="worker", task_index=2, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
}
job_name: 'worker' task_index: 2 protocol: 'grpc'
""", server_def)
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testClusterSpec(self):
cluster_spec = tf.train.ClusterSpec(
{"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]})
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, tf.train.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
tf.train.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
tf.train.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
if __name__ == "__main__":
tf.test.main()
| true | true |
f72e975ec68de10ec951352b654b92d2aa5f1c53 | 908 | py | Python | tests/xgboost_data/models.py | wmonteiro92/xmoai-examples | 0286d57e15cb60693f57cdff386cbb246787442b | [
"MIT"
] | 1 | 2021-03-22T11:31:00.000Z | 2021-03-22T11:31:00.000Z | tests/xgboost_data/models.py | wmonteiro92/xmoai-examples | 0286d57e15cb60693f57cdff386cbb246787442b | [
"MIT"
] | null | null | null | tests/xgboost_data/models.py | wmonteiro92/xmoai-examples | 0286d57e15cb60693f57cdff386cbb246787442b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 17 21:16:08 2020
@author: wmonteiro92
"""
from xgboost import XGBRegressor, XGBClassifier
def train_ml_model(X, y, algorithm, random_state=0):
"""Train one dataset in Python.
:param X: the input values.
:type X: np.array
:param y: the target values.
:type y: np.array
:param algorithm: the machine learning model to use. Allowed values are
`XGBClassifier` and `XGBRegressor`.
:type algorithm: str
:param random_state: the seed. Default is 0.
:type random_state: Integer
:return: the trained machine learning model.
:rtype: Object
"""
if algorithm == 'XGBClassifier':
model = XGBClassifier(random_state=random_state)
elif algorithm == 'XGBRegressor':
model = XGBRegressor(random_state=random_state)
model.fit(X, y)
return model | 27.515152 | 76 | 0.643172 |
from xgboost import XGBRegressor, XGBClassifier
def train_ml_model(X, y, algorithm, random_state=0):
if algorithm == 'XGBClassifier':
model = XGBClassifier(random_state=random_state)
elif algorithm == 'XGBRegressor':
model = XGBRegressor(random_state=random_state)
model.fit(X, y)
return model | true | true |
f72e97da7df88495c60eae65539637bc4aa922b9 | 18,895 | py | Python | timml/model.py | Huite/timml | 5eb52066be094326343fe26b46555253fef44dc9 | [
"MIT"
] | 24 | 2015-09-13T17:11:58.000Z | 2021-12-14T09:09:17.000Z | timml/model.py | Huite/timml | 5eb52066be094326343fe26b46555253fef44dc9 | [
"MIT"
] | 42 | 2015-09-23T19:29:34.000Z | 2022-01-17T09:13:14.000Z | timml/model.py | Huite/timml | 5eb52066be094326343fe26b46555253fef44dc9 | [
"MIT"
] | 26 | 2015-08-24T17:25:27.000Z | 2021-07-09T14:09:30.000Z | """
Model classes
"""
import numpy as np
import sys
import inspect # Used for storing the input
from .aquifer import Aquifer
from .aquifer_parameters import param_maq, param_3d
from .constant import ConstantStar
from .util import PlotTim
import multiprocessing as mp
__all__ = ['Model', 'ModelMaq', 'Model3D']
class Model(PlotTim):
"""
Model Class to create a model object consisting of an arbitrary
sequence of aquifer layers and leaky layers.
Use ModelMaq for regular sequence of aquifers and leaky layers.
Use Model3D for multi-layer model of a single aquifer
Parameters
----------
kaq : array
hydraulic conductivity of each aquifer from the top down
z : array
elevation tops and bottoms of all layers
layers may have zero thickness
c : array
resistance between two consecutive aquifer layers
if ltype[0]='a': length is number of aquifers - 1
if ltype[0]='l': length is number of aquifers
npor : array
porosity of all layers from the top down
ltype : array of characters
array indicating for each layer whether it is
'a' aquifer layer
'l' leaky layer
"""
def __init__(self, kaq, c, z, npor, ltype, f2py=False):
# All input variables are numpy arrays
# That should be checked outside this function
self.elementlist = []
self.elementdict = {} # only elements that have a label
self.aq = Aquifer(self, kaq, c, z, npor, ltype)
self.modelname = 'ml' # Used for writing out input
self.f2py = False
if f2py:
try:
from .src import besselaesnew
self.f2py = True
except:
print('FORTRAN extension not found while f2py=True')
print('Using Numba instead')
def initialize(self):
# remove inhomogeneity elements (they are added again)
self.elementlist = [e for e in self.elementlist if not e.inhomelement]
self.aq.initialize()
for e in self.elementlist:
e.initialize()
def add_element(self, e):
self.elementlist.append(e)
if e.label is not None: self.elementdict[e.label] = e
def remove_element(self, e):
"""Remove element `e` from model
"""
if e.label is not None: self.elementdict.pop(e.label)
self.elementlist.remove(e)
def storeinput(self, frame):
self.inputargs, _, _, self.inputvalues = inspect.getargvalues(frame)
def potential(self, x, y, aq=None):
if aq is None: aq = self.aq.find_aquifer_data(x, y)
pot = np.zeros(aq.naq)
for e in aq.elementlist:
pot += e.potential(x, y, aq)
rv = np.sum(pot * aq.eigvec, 1)
if aq.ltype[0] == 'l':
# potential for head above leaky layer
rv += aq.constantstar.potstar
return rv
def disvec(self, x, y, aq=None):
"""Discharge vector at `x`, `y`
Returns
-------
qxqy : array size (2, naq)
first row is Qx in each aquifer layer, second row is Qy
"""
if aq is None: aq = self.aq.find_aquifer_data(x, y)
rv = np.zeros((2, aq.naq))
for e in aq.elementlist:
rv += e.disvec(x, y, aq)
rv = np.sum(rv[:, np.newaxis, :] * aq.eigvec, 2)
return rv
def qztop(self, x, y, aq=None):
if aq is None: aq = self.aq.find_aquifer_data(x, y)
rv = 0.0
if aq.ltype[0] == 'a': # otherwise recharge cannot be added
for e in aq.elementlist:
rv += e.qztop(x, y)
return rv
def head(self, x, y, layers=None, aq=None):
"""Head at `x`, `y`
Returns
-------
h : array length `naq` or `len(layers)`
head in all `layers` (if not `None`),
or all layers of aquifer (otherwise)
"""
if aq is None: aq = self.aq.find_aquifer_data(x, y)
rv = self.potential(x, y, aq) / aq.T
if layers is None:
return rv
else:
return rv[layers]
def headgrid(self, xg, yg, layers=None, printrow=False):
"""Grid of heads
Parameters
----------
xg : array
x values of grid
yg : array
y values of grid
layers : integer, list or array, optional
layers for which grid is returned
printrow : boolean, optional
prints dot to screen for each row of grid if set to `True`
Returns
-------
h : array size `nlayers, ny, nx`
See also
--------
:func:`~timml.model.Model.headgrid2`
"""
nx, ny = len(xg), len(yg)
if layers is None:
Nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq
else:
Nlayers = len(np.atleast_1d(layers))
h = np.empty((Nlayers, ny, nx))
for j in range(ny):
if printrow:
print('.', end='', flush=True)
for i in range(nx):
h[:, j, i] = self.head(xg[i], yg[j], layers)
if printrow:
print('', flush=True)
return h
def headgrid2(self, x1, x2, nx, y1, y2, ny, layers=None, printrow=False):
"""Grid of heads
Parameters
----------
x1, x2, nx :
x values are generated as linspace(x1, x2, nx)
y1, y2, ny :
y values are generated as linspace(y1, y2, ny)
layers : integer, list or array, optional
layers for which grid is returned
printrow : boolean, optional
prints dot to screen for each row of grid if set to `True`
Returns
-------
h : array size `nlayers, ny, nx`
See also
--------
:func:`~timml.model.Model.headgrid`
"""
xg, yg = np.linspace(x1, x2, nx), np.linspace(y1, y2, ny)
return self.headgrid(xg, yg, layers=layers, printrow=printrow)
def headalongline(self, x, y, layers=None):
"""Head along line or curve
Parameters
----------
x : array
x values of line
y : array
y values of line
layers : integer, list or array, optional
layers for which grid is returned
Returns
-------
h : array size `nlayers, nx`
"""
xg, yg = np.atleast_1d(x), np.atleast_1d(y)
if layers is None:
Nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq
else:
Nlayers = len(np.atleast_1d(layers))
nx = len(xg)
if len(yg) == 1:
yg = yg * np.ones(nx)
h = np.zeros((Nlayers, nx))
for i in range(nx):
h[:, i] = self.head(xg[i], yg[i], layers)
return h
def disvecalongline(self, x, y, layers=None):
'''Returns Qx[Nlayers,len(x)], Qy[Nlayers,len(x)]
Assumes same number of layers for each x and y
layers may be None or list of layers for which head is computed'''
xg, yg = np.atleast_1d(x), np.atleast_1d(y)
if layers is None:
nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq
else:
nlayers = len(np.atleast_1d(layers))
nx = len(xg)
if len(yg) == 1:
yg = yg * np.ones(nx)
Qx = np.zeros((nlayers, nx))
Qy = np.zeros((nlayers, nx))
for i in range(nx):
Qx[:, i], Qy[:, 1] = self.disvec(xg[i], yg[i], layers)
return Qx, Qy
# def disvec_direction(self, s, x1, y1, cdirection):
# pass
#
# def discharge_across_line(self, x1, y1, x2, y2, layers=None):
# if layers is None:
# nlayers = self.aq.find_aquifer_data(x1, y1).naq
# else:
# nlayers = len(np.atleast_1d(layers))
# z1 = x1 + y1 * 1j
# z2 = x2 + y2 * 1j
# normvec = (z2 - z1) / np.abs(z2 - z1) * np.exp(-np.pi * 1j / 2)
# disvec = self.disvec(xg[i], yg[i], layers)
def velocity(self, x, y, z):
return self.velocomp(x, y, z)
def velocomp(self, x, y, z, aq=None, layer_ltype=None):
if aq is None: aq = self.aq.find_aquifer_data(x, y)
assert z <= aq.z[0] and z >= aq.z[-1], "z value not inside aquifer"
if layer_ltype is None:
layer, ltype, dummy = aq.findlayer(z)
else:
layer, ltype = layer_ltype
h = self.head(x, y, aq=aq)
# qz between aquifer layers
qzlayer = np.zeros(aq.naq + 1)
qzlayer[1:-1] = (h[1:] - h[:-1]) / aq.c[1:]
if aq.ltype[0] == 'l':
qzlayer[0] = (h[0] - aq.hstar) / aq.c[0]
if ltype == 'l':
vz = qzlayer[layer] / aq.nporll[layer]
vx = 0
vy = 0
else:
qzbot = qzlayer[layer + 1]
qztop = qzlayer[layer]
if layer == 0:
qztop += self.qztop(x, y)
vz = (qzbot + (z - aq.zaqbot[layer]) / aq.Haq[layer] * \
(qztop - qzbot)) / aq.nporaq[layer]
qx, qy = self.disvec(x, y, aq=aq)
vx = qx[layer] / (aq.Haq[layer] * aq.nporaq[layer])
vy = qy[layer] / (aq.Haq[layer] * aq.nporaq[layer])
return np.array([vx, vy, vz])
def solve(self, printmat=0, sendback=0, silent=False):
'''Compute solution'''
# Initialize elements
self.initialize()
# Compute number of equations
self.neq = np.sum([e.nunknowns for e in self.elementlist])
if self.neq == 0: return
if silent is False:
print('Number of elements, Number of equations:', len(
self.elementlist), ',', self.neq)
if self.neq == 0:
if silent is False: print('No unknowns. Solution complete')
return
mat = np.empty((self.neq, self.neq))
rhs = np.empty(self.neq)
ieq = 0
for e in self.elementlist:
if e.nunknowns > 0:
mat[ieq:ieq + e.nunknowns, :], rhs[ieq:ieq + e.nunknowns] = \
e.equation()
ieq += e.nunknowns
if silent is False:
print('.', end='', flush=True)
if printmat:
return mat, rhs
sol = np.linalg.solve(mat, rhs)
icount = 0
for e in self.elementlist:
if e.nunknowns > 0:
e.setparams(sol[icount:icount + e.nunknowns])
icount += e.nunknowns
if silent is False:
print() # needed cause the dots are printed
print('solution complete')
elif (silent == 'dot') or (silent == '.'):
print('.', end='', flush=True)
if sendback:
return sol
return
def solve_mp(self, nproc=4, printmat=0, sendback=0, silent=False):
'''Compute solution, multiprocessing implementation.
Note: estimated speedup approximately by factor of
number of physical cores. Virtual cores do not improve
calculation time.'''
# Initialize elements
self.initialize()
# Compute number of equations
self.neq = np.sum([e.nunknowns for e in self.elementlist])
if self.neq == 0: return
if silent is False:
print('Number of elements, Number of equations:', len(
self.elementlist), ',', self.neq)
if self.neq == 0:
if silent is False: print('No unknowns. Solution complete')
return
mat = np.empty((self.neq, self.neq))
rhs = np.empty(self.neq)
# start multiprocessing
if nproc is None:
nproc = mp.cpu_count() - 1 # make no. of processes equal to 1 less than no. of cores
elif nproc > mp.cpu_count():
print("Given 'nproc' larger than no. of cores on machine. Setting 'nproc' to {}.".format(mp.cpu_count()))
nproc = mp.cpu_count()
pool = mp.Pool(processes=nproc)
results = []
for e in self.elementlist:
if e.nunknowns > 0:
results.append(pool.apply_async(e.equation))
if silent is False:
print('.', end='', flush=True)
pool.close()
pool.join()
mat = np.empty((self.neq, self.neq))
rhs = np.zeros(self.neq)
ieq = 0
for p in results:
imat, irhs = p.get()
mat[ieq:ieq + imat.shape[0], :] = imat
rhs[ieq:ieq + irhs.shape[0]] = irhs
ieq += imat.shape[0]
# end multiprocessing
if printmat:
return mat, rhs
sol = np.linalg.solve(mat, rhs)
icount = 0
for e in self.elementlist:
if e.nunknowns > 0:
e.setparams(sol[icount:icount + e.nunknowns])
icount += e.nunknowns
if silent is False:
print() # needed cause the dots are printed
print('solution complete')
elif (silent == 'dot') or (silent == '.'):
print('.', end='', flush=True)
if sendback:
return sol
return
def write(self):
rv = self.modelname + ' = ' + self.name + '(\n'
for key in self.inputargs[1:]: # The first argument (self) is ignored
if isinstance(self.inputvalues[key], np.ndarray):
rv += key + ' = ' + np.array2string(self.inputvalues[key],
separator=',') + ',\n'
elif isinstance(self.inputvalues[key],str):
rv += key + " = '" + self.inputvalues[key] + "',\n"
else:
rv += key + ' = ' + str(self.inputvalues[key]) + ',\n'
rv += ')\n'
return rv
def writemodel(self, fname):
self.initialize() # So that the model can be written without solving first
f = open(fname, 'w')
f.write('from timml import *\n')
f.write(self.write())
for e in self.elementlist:
f.write(e.write())
f.close()
class ModelMaq(Model):
"""
Create a Model object by specifying a mult-aquifer sequence of
aquifer-leakylayer-aquifer-leakylayer-aquifer etc
Parameters
----------
kaq : float, array or list
Hydraulic conductivity of each aquifer from the top down.
If float, hydraulic conductivity is the same in all aquifers.
z : array or list
Elevation of tops and bottoms of the aquifers from the top down.
Leaky layers may have zero thickness.
* if topboundary='conf': length is 2 * number of aquifers
* if topboundary='semi': length is 2 * number of aquifers + 1
as top of leaky layer on top of systems needs to be specified
c : float, array or list
Resistance of leaky layers from the top down.
* if float, resistance is the same for all leaky layers
* if topboundary='conf': length is number of aquifers - 1
* if topboundary='semi': length is number of aquifers
npor : float, array or list
Porosity of all aquifers and leaky layers from the top down.
* if float, porosity is the same for all layers
* if topboundary='conf': length is 2 * number of aquifers - 1
* if topboundary='semi': length is 2 * number of aquifers
topboundary : string, 'conf' or 'semi' (default is 'conf')
Indicates whether the topboundary is confined ('conf') or
semi-confined ('semi').
hstar : float or None (default is None)
Head value above semi-confining top, only read if topboundary='semi'.
Examples
--------
>>> ml = ModelMaq(kaq=[10, 20], z=[20, 12, 10, 0], c=1000)
"""
def __init__(self, kaq=1, z=[1, 0], c=[], npor=0.3, topboundary='conf',
hstar=None, f2py=False):
self.storeinput(inspect.currentframe())
kaq, c, npor, ltype = param_maq(kaq, z, c, npor, topboundary)
Model.__init__(self, kaq, c, z, npor, ltype, f2py)
self.name = 'ModelMaq'
if self.aq.ltype[0] == 'l':
ConstantStar(self, hstar, aq=self.aq)
class Model3D(Model):
"""
Model3D Class to create a multi-layer model object consisting of
many aquifer layers. The resistance between the layers is computed
from the vertical hydraulic conductivity of the layers.
Parameters
----------
kaq : float, array or list
hydraulic conductivity of each layer from the top down
if float, hydraulic conductivity is the same in all aquifers
z : array or list
elevation of top of system followed by bottoms of all layers
from the top down
bottom of layer is automatically equal to top of layer below it
length is number of aquifer layers + 1
kzoverkh : float
vertical anisotropy ratio vertical k divided by horizontal k
if float, value is the same for all layers
length is number of layers
npor : float, array or list
porosity of all aquifer layers
from the top down
if float, porosity is the same for all layers
if topboundary='conf': length is number of layers
if topboundary='semi': length is number of layers + 1
topboundary : string, 'conf' or 'semi' (default is 'conf')
indicating whether the top is confined ('conf') or
semi-confined ('semi')
topres : float
resistance of top semi-confining layer (read if topboundary='semi')
topthick: float
thickness of top semi-confining layer (read if topboundary='semi')
hstar : float or None (default is None)
head value above semi-confining top (read if topboundary='semi')
Examples
--------
>>> ml = Model3D(kaq=10, z=np.arange(20, -1, -2), kzoverkh=0.1)
"""
def __init__(self, kaq=1, z=[1, 0], kzoverkh=1, npor=0.3,
topboundary='conf', topres=0, topthick=0, hstar=0,
f2py=False):
'''Model3D
for semi-confined aquifers, set top equal to 'semi' and provide
topres: resistance of top
tophick: thickness of top
hstar: head above top'''
self.storeinput(inspect.currentframe())
kaq, c, npor, ltype = param_3d(kaq, z, kzoverkh, npor, topboundary,
topres)
if topboundary == 'semi':
z = np.hstack((z[0] + topthick, z))
Model.__init__(self, kaq, c, z, npor, ltype, f2py)
self.name = 'Model3D'
if self.aq.ltype[0] == 'l':
ConstantStar(self, hstar, aq=self.aq)
| 35.450281 | 117 | 0.54136 |
import numpy as np
import sys
import inspect
from .aquifer import Aquifer
from .aquifer_parameters import param_maq, param_3d
from .constant import ConstantStar
from .util import PlotTim
import multiprocessing as mp
__all__ = ['Model', 'ModelMaq', 'Model3D']
class Model(PlotTim):
def __init__(self, kaq, c, z, npor, ltype, f2py=False):
self.elementlist = []
self.elementdict = {}
self.aq = Aquifer(self, kaq, c, z, npor, ltype)
self.modelname = 'ml'
self.f2py = False
if f2py:
try:
from .src import besselaesnew
self.f2py = True
except:
print('FORTRAN extension not found while f2py=True')
print('Using Numba instead')
def initialize(self):
self.elementlist = [e for e in self.elementlist if not e.inhomelement]
self.aq.initialize()
for e in self.elementlist:
e.initialize()
def add_element(self, e):
self.elementlist.append(e)
if e.label is not None: self.elementdict[e.label] = e
def remove_element(self, e):
if e.label is not None: self.elementdict.pop(e.label)
self.elementlist.remove(e)
def storeinput(self, frame):
self.inputargs, _, _, self.inputvalues = inspect.getargvalues(frame)
def potential(self, x, y, aq=None):
if aq is None: aq = self.aq.find_aquifer_data(x, y)
pot = np.zeros(aq.naq)
for e in aq.elementlist:
pot += e.potential(x, y, aq)
rv = np.sum(pot * aq.eigvec, 1)
if aq.ltype[0] == 'l':
rv += aq.constantstar.potstar
return rv
def disvec(self, x, y, aq=None):
if aq is None: aq = self.aq.find_aquifer_data(x, y)
rv = np.zeros((2, aq.naq))
for e in aq.elementlist:
rv += e.disvec(x, y, aq)
rv = np.sum(rv[:, np.newaxis, :] * aq.eigvec, 2)
return rv
def qztop(self, x, y, aq=None):
if aq is None: aq = self.aq.find_aquifer_data(x, y)
rv = 0.0
if aq.ltype[0] == 'a':
for e in aq.elementlist:
rv += e.qztop(x, y)
return rv
def head(self, x, y, layers=None, aq=None):
if aq is None: aq = self.aq.find_aquifer_data(x, y)
rv = self.potential(x, y, aq) / aq.T
if layers is None:
return rv
else:
return rv[layers]
def headgrid(self, xg, yg, layers=None, printrow=False):
nx, ny = len(xg), len(yg)
if layers is None:
Nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq
else:
Nlayers = len(np.atleast_1d(layers))
h = np.empty((Nlayers, ny, nx))
for j in range(ny):
if printrow:
print('.', end='', flush=True)
for i in range(nx):
h[:, j, i] = self.head(xg[i], yg[j], layers)
if printrow:
print('', flush=True)
return h
def headgrid2(self, x1, x2, nx, y1, y2, ny, layers=None, printrow=False):
xg, yg = np.linspace(x1, x2, nx), np.linspace(y1, y2, ny)
return self.headgrid(xg, yg, layers=layers, printrow=printrow)
def headalongline(self, x, y, layers=None):
xg, yg = np.atleast_1d(x), np.atleast_1d(y)
if layers is None:
Nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq
else:
Nlayers = len(np.atleast_1d(layers))
nx = len(xg)
if len(yg) == 1:
yg = yg * np.ones(nx)
h = np.zeros((Nlayers, nx))
for i in range(nx):
h[:, i] = self.head(xg[i], yg[i], layers)
return h
def disvecalongline(self, x, y, layers=None):
xg, yg = np.atleast_1d(x), np.atleast_1d(y)
if layers is None:
nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq
else:
nlayers = len(np.atleast_1d(layers))
nx = len(xg)
if len(yg) == 1:
yg = yg * np.ones(nx)
Qx = np.zeros((nlayers, nx))
Qy = np.zeros((nlayers, nx))
for i in range(nx):
Qx[:, i], Qy[:, 1] = self.disvec(xg[i], yg[i], layers)
return Qx, Qy
def velocity(self, x, y, z):
return self.velocomp(x, y, z)
def velocomp(self, x, y, z, aq=None, layer_ltype=None):
if aq is None: aq = self.aq.find_aquifer_data(x, y)
assert z <= aq.z[0] and z >= aq.z[-1], "z value not inside aquifer"
if layer_ltype is None:
layer, ltype, dummy = aq.findlayer(z)
else:
layer, ltype = layer_ltype
h = self.head(x, y, aq=aq)
qzlayer = np.zeros(aq.naq + 1)
qzlayer[1:-1] = (h[1:] - h[:-1]) / aq.c[1:]
if aq.ltype[0] == 'l':
qzlayer[0] = (h[0] - aq.hstar) / aq.c[0]
if ltype == 'l':
vz = qzlayer[layer] / aq.nporll[layer]
vx = 0
vy = 0
else:
qzbot = qzlayer[layer + 1]
qztop = qzlayer[layer]
if layer == 0:
qztop += self.qztop(x, y)
vz = (qzbot + (z - aq.zaqbot[layer]) / aq.Haq[layer] * \
(qztop - qzbot)) / aq.nporaq[layer]
qx, qy = self.disvec(x, y, aq=aq)
vx = qx[layer] / (aq.Haq[layer] * aq.nporaq[layer])
vy = qy[layer] / (aq.Haq[layer] * aq.nporaq[layer])
return np.array([vx, vy, vz])
def solve(self, printmat=0, sendback=0, silent=False):
self.initialize()
self.neq = np.sum([e.nunknowns for e in self.elementlist])
if self.neq == 0: return
if silent is False:
print('Number of elements, Number of equations:', len(
self.elementlist), ',', self.neq)
if self.neq == 0:
if silent is False: print('No unknowns. Solution complete')
return
mat = np.empty((self.neq, self.neq))
rhs = np.empty(self.neq)
ieq = 0
for e in self.elementlist:
if e.nunknowns > 0:
mat[ieq:ieq + e.nunknowns, :], rhs[ieq:ieq + e.nunknowns] = \
e.equation()
ieq += e.nunknowns
if silent is False:
print('.', end='', flush=True)
if printmat:
return mat, rhs
sol = np.linalg.solve(mat, rhs)
icount = 0
for e in self.elementlist:
if e.nunknowns > 0:
e.setparams(sol[icount:icount + e.nunknowns])
icount += e.nunknowns
if silent is False:
print()
print('solution complete')
elif (silent == 'dot') or (silent == '.'):
print('.', end='', flush=True)
if sendback:
return sol
return
def solve_mp(self, nproc=4, printmat=0, sendback=0, silent=False):
self.initialize()
self.neq = np.sum([e.nunknowns for e in self.elementlist])
if self.neq == 0: return
if silent is False:
print('Number of elements, Number of equations:', len(
self.elementlist), ',', self.neq)
if self.neq == 0:
if silent is False: print('No unknowns. Solution complete')
return
mat = np.empty((self.neq, self.neq))
rhs = np.empty(self.neq)
if nproc is None:
nproc = mp.cpu_count() - 1
elif nproc > mp.cpu_count():
print("Given 'nproc' larger than no. of cores on machine. Setting 'nproc' to {}.".format(mp.cpu_count()))
nproc = mp.cpu_count()
pool = mp.Pool(processes=nproc)
results = []
for e in self.elementlist:
if e.nunknowns > 0:
results.append(pool.apply_async(e.equation))
if silent is False:
print('.', end='', flush=True)
pool.close()
pool.join()
mat = np.empty((self.neq, self.neq))
rhs = np.zeros(self.neq)
ieq = 0
for p in results:
imat, irhs = p.get()
mat[ieq:ieq + imat.shape[0], :] = imat
rhs[ieq:ieq + irhs.shape[0]] = irhs
ieq += imat.shape[0]
if printmat:
return mat, rhs
sol = np.linalg.solve(mat, rhs)
icount = 0
for e in self.elementlist:
if e.nunknowns > 0:
e.setparams(sol[icount:icount + e.nunknowns])
icount += e.nunknowns
if silent is False:
print()
print('solution complete')
elif (silent == 'dot') or (silent == '.'):
print('.', end='', flush=True)
if sendback:
return sol
return
def write(self):
rv = self.modelname + ' = ' + self.name + '(\n'
for key in self.inputargs[1:]:
if isinstance(self.inputvalues[key], np.ndarray):
rv += key + ' = ' + np.array2string(self.inputvalues[key],
separator=',') + ',\n'
elif isinstance(self.inputvalues[key],str):
rv += key + " = '" + self.inputvalues[key] + "',\n"
else:
rv += key + ' = ' + str(self.inputvalues[key]) + ',\n'
rv += ')\n'
return rv
def writemodel(self, fname):
self.initialize()
f = open(fname, 'w')
f.write('from timml import *\n')
f.write(self.write())
for e in self.elementlist:
f.write(e.write())
f.close()
class ModelMaq(Model):
def __init__(self, kaq=1, z=[1, 0], c=[], npor=0.3, topboundary='conf',
hstar=None, f2py=False):
self.storeinput(inspect.currentframe())
kaq, c, npor, ltype = param_maq(kaq, z, c, npor, topboundary)
Model.__init__(self, kaq, c, z, npor, ltype, f2py)
self.name = 'ModelMaq'
if self.aq.ltype[0] == 'l':
ConstantStar(self, hstar, aq=self.aq)
class Model3D(Model):
def __init__(self, kaq=1, z=[1, 0], kzoverkh=1, npor=0.3,
topboundary='conf', topres=0, topthick=0, hstar=0,
f2py=False):
self.storeinput(inspect.currentframe())
kaq, c, npor, ltype = param_3d(kaq, z, kzoverkh, npor, topboundary,
topres)
if topboundary == 'semi':
z = np.hstack((z[0] + topthick, z))
Model.__init__(self, kaq, c, z, npor, ltype, f2py)
self.name = 'Model3D'
if self.aq.ltype[0] == 'l':
ConstantStar(self, hstar, aq=self.aq)
| true | true |
f72e99326922bbcda505ef3370008898b5c3b5e9 | 19,599 | py | Python | Lib/ntpath.py | jimmyyu2004/jython | 5b4dc2d54d01a6fda8c55d07b2608167e7a40769 | [
"CNRI-Jython"
] | 332 | 2015-08-22T12:43:56.000Z | 2022-03-17T01:05:43.000Z | Lib/ntpath.py | Pandinosaurus/jython3 | def4f8ec47cb7a9c799ea4c745f12badf92c5769 | [
"CNRI-Jython"
] | 36 | 2015-05-30T08:39:19.000Z | 2022-03-04T20:42:33.000Z | Lib/ntpath.py | Pandinosaurus/jython3 | def4f8ec47cb7a9c799ea4c745f12badf92c5769 | [
"CNRI-Jython"
] | 74 | 2015-05-29T17:18:53.000Z | 2022-01-15T14:06:44.000Z | # Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verifed in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
backslash, dot = ('\\', '.') if isinstance(path, str) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # no built-in nt module - maybe it's Jython ;)
if os._name == 'nt' :
# on Windows so Java version of sys deals in NT paths
def abspath(path):
"""Return the absolute version of a path."""
try:
if isinstance(path, str):
# Result must be unicode
if path:
path = sys.getPath(path)
else:
# Empty path must return current working directory
path = os.getcwd()
else:
# Result must be bytes
if path:
path = sys.getPath(path).encode('latin-1')
else:
# Empty path must return current working directory
path = os.getcwd()
except EnvironmentError:
pass # Bad path - return unchanged.
return normpath(path)
else:
# not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
try:
if isinstance(path, str):
# Result must be unicode
if path:
path = join(os.getcwd(), path)
else:
# Empty path must return current working directory
path = os.getcwd()
else:
# Result must be bytes
if path:
path = join(os.getcwd(), path)
else:
# Empty path must return current working directory
path = os.getcwd()
except EnvironmentError:
pass # Bad path - return unchanged.
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, str):
path = os.getcwd()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| 34.935829 | 80 | 0.542017 |
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
altsep = '/'
devnull = 'nul'
def normcase(s):
return s.replace("/", "\\").lower()
def isabs(s):
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
def join(a, *p):
path = a
for b in p:
b_wins = 0
if path == "":
b_wins = 1
elif isabs(b):
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
d, p = splitdrive(p)
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
def basename(p):
return split(p)[1]
def dirname(p):
return split(p)[0]
def islink(path):
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verifed in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'':
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%':
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$':
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
def normpath(path):
# Preserve unicode (if path is unicode)
backslash, dot = ('\\', '.') if isinstance(path, str) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
if prefix == '':
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
try:
from nt import _getfullpathname
except ImportError:
if os._name == 'nt' :
# on Windows so Java version of sys deals in NT paths
def abspath(path):
"""Return the absolute version of a path."""
try:
if isinstance(path, str):
# Result must be unicode
if path:
path = sys.getPath(path)
else:
# Empty path must return current working directory
path = os.getcwd()
else:
# Result must be bytes
if path:
path = sys.getPath(path).encode('latin-1')
else:
# Empty path must return current working directory
path = os.getcwd()
except EnvironmentError:
pass # Bad path - return unchanged.
return normpath(path)
else:
# not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
try:
if isinstance(path, str):
# Result must be unicode
if path:
path = join(os.getcwd(), path)
else:
# Empty path must return current working directory
path = os.getcwd()
else:
# Result must be bytes
if path:
path = join(os.getcwd(), path)
else:
# Empty path must return current working directory
path = os.getcwd()
except EnvironmentError:
pass # Bad path - return unchanged.
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, str):
path = os.getcwd()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| true | true |
f72e99b87018df0efd9d0456b5f140f1ca7e69aa | 1,994 | py | Python | apprentice/learners/when_learners/actor_critic.py | pearlfranz20/AL_Core | 6592079330c7ec3ca264b86f8414970ddab06c0e | [
"MIT"
] | 10 | 2019-11-01T01:09:57.000Z | 2022-02-17T09:15:12.000Z | apprentice/learners/when_learners/actor_critic.py | pearlfranz20/AL_Core | 6592079330c7ec3ca264b86f8414970ddab06c0e | [
"MIT"
] | 40 | 2019-08-06T18:01:31.000Z | 2021-07-15T12:38:56.000Z | apprentice/learners/when_learners/actor_critic.py | pearlfranz20/AL_Core | 6592079330c7ec3ca264b86f8414970ddab06c0e | [
"MIT"
] | 6 | 2019-08-15T01:45:19.000Z | 2021-06-01T19:54:29.000Z | import torch
import torch.nn as nn
class ValueNet(nn.Module):
"""
The part of the actor critic network that computes the state value. Also,
returns the hidden layer before state valuation, for use in action network.
"""
def __init__(self, n_inputs: int, n_hidden: int = None):
"""
Specify the number of inputs. Also, specify the number of nodes in each
hidden layer. If no value is provided for the number of hidden, then
it is set to half the number of inputs.
"""
super(ValueNet, self).__init__()
if n_hidden is None:
n_hidden = (n_inputs + 2) // 2
self.n_hidden = n_hidden
self.hidden = nn.Sequential(
nn.Linear(n_inputs, n_hidden),
nn.ReLU()
)
self.value = nn.Linear(n_hidden, 1)
def forward(self, x):
"""
Returns the value of the state and the hidden layer values.
"""
x = self.hidden(x)
return self.value(x), x
class ActionNet(nn.Module):
"""
The part of the actor critic network that computes the action value.
"""
def __init__(self, n_action_inputs: int, n_value_hidden: int,
n_action_hidden: int = None):
"""
Takes as input the action features and the hidden values from the value
net. Returns a value for the action.
"""
super(ActionNet, self).__init__()
if n_action_hidden is None:
n_action_hidden = (n_action_inputs + n_value_hidden + 2) // 2
self.hidden = nn.Sequential(
nn.Linear(n_action_inputs + n_value_hidden, n_action_hidden),
nn.ReLU()
)
self.action_value = nn.Linear(n_action_hidden, 1)
def forward(self, action_x, value_hidden):
"""
Returns the value of the state and the hidden layer values.
"""
x = self.hidden(torch.cat((action_x, value_hidden), 1))
return self.action_value(x)
| 29.323529 | 79 | 0.600802 | import torch
import torch.nn as nn
class ValueNet(nn.Module):
def __init__(self, n_inputs: int, n_hidden: int = None):
super(ValueNet, self).__init__()
if n_hidden is None:
n_hidden = (n_inputs + 2) // 2
self.n_hidden = n_hidden
self.hidden = nn.Sequential(
nn.Linear(n_inputs, n_hidden),
nn.ReLU()
)
self.value = nn.Linear(n_hidden, 1)
def forward(self, x):
x = self.hidden(x)
return self.value(x), x
class ActionNet(nn.Module):
def __init__(self, n_action_inputs: int, n_value_hidden: int,
n_action_hidden: int = None):
super(ActionNet, self).__init__()
if n_action_hidden is None:
n_action_hidden = (n_action_inputs + n_value_hidden + 2) // 2
self.hidden = nn.Sequential(
nn.Linear(n_action_inputs + n_value_hidden, n_action_hidden),
nn.ReLU()
)
self.action_value = nn.Linear(n_action_hidden, 1)
def forward(self, action_x, value_hidden):
x = self.hidden(torch.cat((action_x, value_hidden), 1))
return self.action_value(x)
| true | true |
f72e99c211ca13bdd9ce866e317332537579f66b | 413 | py | Python | Hackerrank-Solutions/Hackerrank-Python-Solutions/Introduction/Python If-Else.py | HetDaftary/Competitive-Coding-Solutions | a683fa11895410c6eef07b1a68054f3e90aa596b | [
"MIT"
] | null | null | null | Hackerrank-Solutions/Hackerrank-Python-Solutions/Introduction/Python If-Else.py | HetDaftary/Competitive-Coding-Solutions | a683fa11895410c6eef07b1a68054f3e90aa596b | [
"MIT"
] | null | null | null | Hackerrank-Solutions/Hackerrank-Python-Solutions/Introduction/Python If-Else.py | HetDaftary/Competitive-Coding-Solutions | a683fa11895410c6eef07b1a68054f3e90aa596b | [
"MIT"
] | null | null | null | #!/bin/python
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input().strip())
if n % 2: # odd.
print("Weird")
elif n < 5: # All the elif never come if number is odd.
print("Not Weird")
elif n < 21: # If number in 2 to 5 than above will execute and this will not be.
print("Weird")
else:
print("Not Weird")
| 18.772727 | 84 | 0.581114 |
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input().strip())
if n % 2:
print("Weird")
elif n < 5:
print("Not Weird")
elif n < 21:
print("Weird")
else:
print("Not Weird")
| true | true |
f72e9a5c6673bca078109fe2cc00c921bf182f20 | 6,244 | py | Python | Surround.py | jcartledge/sublime-surround | 204831d3b9f5219c155b43363c17d6eb606bbe7b | [
"MIT"
] | 22 | 2015-03-19T09:27:26.000Z | 2021-12-04T14:04:39.000Z | Surround.py | jcartledge/sublime-surround | 204831d3b9f5219c155b43363c17d6eb606bbe7b | [
"MIT"
] | 2 | 2016-02-05T11:19:22.000Z | 2017-04-09T05:44:56.000Z | Surround.py | jcartledge/sublime-surround | 204831d3b9f5219c155b43363c17d6eb606bbe7b | [
"MIT"
] | 5 | 2015-01-09T08:37:31.000Z | 2020-07-11T12:33:33.000Z | import sublime
import sublime_plugin
import re
class SurroundWindowCommand(sublime_plugin.WindowCommand):
""" Base class for surround window commands """
def run(self, sel=None):
self.sel = sel
self.window.show_input_panel(
self.caption(), "", self.callback, None, None)
class SurroundSelectionWindowCommand(SurroundWindowCommand):
""" Surround the current selection(s) with something """
def caption(self):
return "Surround with:"
def callback(self, surround):
args = {"surround": surround, "sel": self.sel}
self.window.active_view().run_command("surround_selection", args)
class SurroundChangeCommand(SurroundWindowCommand):
""" Change the surrounding of the current selection """
def caption(self):
return "Match"
def callback(self, match):
self.match = match
self.window.show_input_panel(
"Replace with:", "", self.replace_callback, None, None)
def replace_callback(self, replacement):
args = {"match": self.match, "replacement": replacement}
self.window.active_view().run_command("surround_change_text", args)
class SurroundDeleteCommand(SurroundWindowCommand):
""" Delete something surrounding something """
def caption(self):
return "Delete:"
def callback(self, match):
args = {"match": match, "replacement": ""}
self.window.active_view().run_command("surround_change_text", args)
class SurroundTextCommand(sublime_plugin.TextCommand):
""" Base class for surround text commands """
def __init__(self, _):
self.settings = sublime.load_settings("surround.sublime-settings")
super(SurroundTextCommand, self).__init__(_)
def pairs_for_replacement(self, surround):
pairs = self.settings.get("surround_pairs_for_replacement")
return self.pair(surround, pairs)
def pair(self, surround, pairs):
if surround[0] in pairs:
return pairs[surround[0]]
else:
return surround
def tags_for_replacement(self, surround):
matches = re.search(r"<([\S]+)[^>]*>", surround[0])
if matches:
return [surround[0], "</" + matches.group(1) + ">"]
else:
return surround
def preprocess_replacement(self, surround):
return self.tags_for_replacement(
self.pairs_for_replacement([surround, surround]))
class SurroundSelectionCommand(SurroundTextCommand):
""" Surround the current selection(s) with something """
def run(self, edit, surround=None, sel=None):
view = self.view
# Vintage needs a text command for `ys<motion>`
if(surround is None):
sel = [[region.begin(), region.end()] for region in view.sel()]
args = {"sel": sel}
return view.window().run_command("surround_selection_window", args)
if(sel is None):
sel = view.sel()
else:
sel = [
sublime.Region(int(region[0]), int(region[1]))
for region in sel
]
surround = self.preprocess_replacement(surround)
for region in reversed(sel):
view.insert(edit, region.end(), surround[1])
view.insert(edit, region.begin(), surround[0])
class SurroundChangeTextCommand(SurroundTextCommand):
""" Change something surrounding the current insertion point(s) to something else """
def run(self, edit, match, replacement):
search = self.search_patterns_for_surround(match)
replacement = self.preprocess_replacement(replacement)
view = self.view
try:
for region in reversed(view.sel()):
end = self.find_end(region.end(), search)
if end:
start = self.find_start(region.begin(), search)
if start:
self.view.replace(edit, end, replacement[1])
self.view.replace(edit, start, replacement[0])
except RuntimeError as err:
sublime.error_message(str(err))
def find_start(self, to_pos, search):
matches = self.find_between(0, to_pos, search)
if len(matches) is 0:
raise RuntimeError("Starting pair not found: " + search[0])
previous = matches.pop()
# balance pairs
close_search = [search[1], search[0], search[2]]
count_pairs = len(self.find_between(previous.end(), to_pos, close_search))
if count_pairs % 2 is 0:
return previous
else:
return self.find_start(previous.begin(), search)
def find_end(self, from_pos, search):
next = self.view.find(search[1], from_pos, search[2])
if next is None:
raise RuntimeError("Ending pair not found: " + search[1])
# balance pairs
count_pairs = len(self.find_between(from_pos, next.begin(), search))
if count_pairs % 2 is 0:
return next
else:
return self.find_end(next.end(), search)
def find_between(self, from_pos, to_pos, search):
return [
find for find in self.view.find_all(search[0], search[2])
if find.end() <= to_pos
and find.begin() >= from_pos
]
def search_patterns_for_surround(self, surround):
surround = [surround, surround]
surround = self.pairs_for_search(surround)
surround = self.tags_for_search(surround)
if len(surround[0]) <= 1:
flag = sublime.LITERAL
else:
flag = 0
surround.append(flag)
return surround
def pairs_for_search(self, surround):
pairs = self.settings.get("surround_pairs_for_search")
return self.pair(surround, pairs)
def tags_for_search(self, surround):
matches = re.search(r"<([\S]+)([^>]*)>", surround[0])
if matches:
attrs = matches.group(2)
if len(attrs) == 0:
attrs = "([\s]+[^>]*)?"
open_tag = str("<" + matches.group(1) + attrs + ">")
close_tag = str("</" + matches.group(1) + ">")
return [open_tag, close_tag]
else:
return surround
| 34.120219 | 89 | 0.604901 | import sublime
import sublime_plugin
import re
class SurroundWindowCommand(sublime_plugin.WindowCommand):
def run(self, sel=None):
self.sel = sel
self.window.show_input_panel(
self.caption(), "", self.callback, None, None)
class SurroundSelectionWindowCommand(SurroundWindowCommand):
def caption(self):
return "Surround with:"
def callback(self, surround):
args = {"surround": surround, "sel": self.sel}
self.window.active_view().run_command("surround_selection", args)
class SurroundChangeCommand(SurroundWindowCommand):
def caption(self):
return "Match"
def callback(self, match):
self.match = match
self.window.show_input_panel(
"Replace with:", "", self.replace_callback, None, None)
def replace_callback(self, replacement):
args = {"match": self.match, "replacement": replacement}
self.window.active_view().run_command("surround_change_text", args)
class SurroundDeleteCommand(SurroundWindowCommand):
def caption(self):
return "Delete:"
def callback(self, match):
args = {"match": match, "replacement": ""}
self.window.active_view().run_command("surround_change_text", args)
class SurroundTextCommand(sublime_plugin.TextCommand):
def __init__(self, _):
self.settings = sublime.load_settings("surround.sublime-settings")
super(SurroundTextCommand, self).__init__(_)
def pairs_for_replacement(self, surround):
pairs = self.settings.get("surround_pairs_for_replacement")
return self.pair(surround, pairs)
def pair(self, surround, pairs):
if surround[0] in pairs:
return pairs[surround[0]]
else:
return surround
def tags_for_replacement(self, surround):
matches = re.search(r"<([\S]+)[^>]*>", surround[0])
if matches:
return [surround[0], "</" + matches.group(1) + ">"]
else:
return surround
def preprocess_replacement(self, surround):
return self.tags_for_replacement(
self.pairs_for_replacement([surround, surround]))
class SurroundSelectionCommand(SurroundTextCommand):
def run(self, edit, surround=None, sel=None):
view = self.view
if(surround is None):
sel = [[region.begin(), region.end()] for region in view.sel()]
args = {"sel": sel}
return view.window().run_command("surround_selection_window", args)
if(sel is None):
sel = view.sel()
else:
sel = [
sublime.Region(int(region[0]), int(region[1]))
for region in sel
]
surround = self.preprocess_replacement(surround)
for region in reversed(sel):
view.insert(edit, region.end(), surround[1])
view.insert(edit, region.begin(), surround[0])
class SurroundChangeTextCommand(SurroundTextCommand):
def run(self, edit, match, replacement):
search = self.search_patterns_for_surround(match)
replacement = self.preprocess_replacement(replacement)
view = self.view
try:
for region in reversed(view.sel()):
end = self.find_end(region.end(), search)
if end:
start = self.find_start(region.begin(), search)
if start:
self.view.replace(edit, end, replacement[1])
self.view.replace(edit, start, replacement[0])
except RuntimeError as err:
sublime.error_message(str(err))
def find_start(self, to_pos, search):
matches = self.find_between(0, to_pos, search)
if len(matches) is 0:
raise RuntimeError("Starting pair not found: " + search[0])
previous = matches.pop()
close_search = [search[1], search[0], search[2]]
count_pairs = len(self.find_between(previous.end(), to_pos, close_search))
if count_pairs % 2 is 0:
return previous
else:
return self.find_start(previous.begin(), search)
def find_end(self, from_pos, search):
next = self.view.find(search[1], from_pos, search[2])
if next is None:
raise RuntimeError("Ending pair not found: " + search[1])
count_pairs = len(self.find_between(from_pos, next.begin(), search))
if count_pairs % 2 is 0:
return next
else:
return self.find_end(next.end(), search)
def find_between(self, from_pos, to_pos, search):
return [
find for find in self.view.find_all(search[0], search[2])
if find.end() <= to_pos
and find.begin() >= from_pos
]
def search_patterns_for_surround(self, surround):
surround = [surround, surround]
surround = self.pairs_for_search(surround)
surround = self.tags_for_search(surround)
if len(surround[0]) <= 1:
flag = sublime.LITERAL
else:
flag = 0
surround.append(flag)
return surround
def pairs_for_search(self, surround):
pairs = self.settings.get("surround_pairs_for_search")
return self.pair(surround, pairs)
def tags_for_search(self, surround):
matches = re.search(r"<([\S]+)([^>]*)>", surround[0])
if matches:
attrs = matches.group(2)
if len(attrs) == 0:
attrs = "([\s]+[^>]*)?"
open_tag = str("<" + matches.group(1) + attrs + ">")
close_tag = str("</" + matches.group(1) + ">")
return [open_tag, close_tag]
else:
return surround
| true | true |
f72e9b984ac71cf8b83b845a772ada6289eeca8c | 1,947 | py | Python | src/pysilk/__main__.py | wyapx/Python-Silk-Module | 65619c79f78ac930073a8424ddb981b66d97ecde | [
"BSD-2-Clause"
] | null | null | null | src/pysilk/__main__.py | wyapx/Python-Silk-Module | 65619c79f78ac930073a8424ddb981b66d97ecde | [
"BSD-2-Clause"
] | null | null | null | src/pysilk/__main__.py | wyapx/Python-Silk-Module | 65619c79f78ac930073a8424ddb981b66d97ecde | [
"BSD-2-Clause"
] | null | null | null | import argparse
import time
from . import encode_file, decode_file
from .utils import get_file
from .wav import Wave
parser = argparse.ArgumentParser("pysilk", description="encode/decode your silk file")
parser.add_argument("-sr", "--sample-rate", default=24000, help="set pcm samplerate")
parser.add_argument("-q", "--quiet", action="store_const", const=bool, default=False, help="reduce console output")
parser.add_argument("input", action="store", help="input file path")
parser.add_argument("output", action="store", help="output file path")
def get_suffix(path: str):
sp = path.rsplit(".", 1)
if len(sp) == 1:
raise ValueError("cannot parse suffix")
elif sp[1] not in ("wav", "pcm", "silk"):
raise TypeError("%s format not supported" % sp[1])
else:
return sp[1]
def log(*content_args):
if not args.quiet:
print(*content_args)
if __name__ == '__main__':
st = time.time()
args = parser.parse_args()
i_suffix, o_suffix = get_suffix(args.input), get_suffix(args.output)
if i_suffix == o_suffix:
print("nothing can do.")
elif i_suffix == "pcm" and not args.sample_rate:
raise ValueError("--sample-rate must be set")
else:
with open(args.output, "wb") as f:
source = args.input
if i_suffix == "wav" and o_suffix == "pcm":
f.write(Wave.wav2pcm(get_file(source)))
elif i_suffix == "pcm" and o_suffix == "wav":
f.write(Wave.pcm2wav(get_file(source), args.sample_rate))
elif i_suffix in ("pcm", "wav") and o_suffix == "silk":
f.write(encode_file(source))
elif i_suffix == "silk" and o_suffix in ("pcm", "wav"):
f.write(decode_file(source, to_wav=args.output == "wav"))
else:
print("Unknown operation:", i_suffix, "to", o_suffix)
log(f"done, {round((time.time() - st) * 1000, 2)}ms used")
| 37.442308 | 115 | 0.618387 | import argparse
import time
from . import encode_file, decode_file
from .utils import get_file
from .wav import Wave
parser = argparse.ArgumentParser("pysilk", description="encode/decode your silk file")
parser.add_argument("-sr", "--sample-rate", default=24000, help="set pcm samplerate")
parser.add_argument("-q", "--quiet", action="store_const", const=bool, default=False, help="reduce console output")
parser.add_argument("input", action="store", help="input file path")
parser.add_argument("output", action="store", help="output file path")
def get_suffix(path: str):
sp = path.rsplit(".", 1)
if len(sp) == 1:
raise ValueError("cannot parse suffix")
elif sp[1] not in ("wav", "pcm", "silk"):
raise TypeError("%s format not supported" % sp[1])
else:
return sp[1]
def log(*content_args):
if not args.quiet:
print(*content_args)
if __name__ == '__main__':
st = time.time()
args = parser.parse_args()
i_suffix, o_suffix = get_suffix(args.input), get_suffix(args.output)
if i_suffix == o_suffix:
print("nothing can do.")
elif i_suffix == "pcm" and not args.sample_rate:
raise ValueError("--sample-rate must be set")
else:
with open(args.output, "wb") as f:
source = args.input
if i_suffix == "wav" and o_suffix == "pcm":
f.write(Wave.wav2pcm(get_file(source)))
elif i_suffix == "pcm" and o_suffix == "wav":
f.write(Wave.pcm2wav(get_file(source), args.sample_rate))
elif i_suffix in ("pcm", "wav") and o_suffix == "silk":
f.write(encode_file(source))
elif i_suffix == "silk" and o_suffix in ("pcm", "wav"):
f.write(decode_file(source, to_wav=args.output == "wav"))
else:
print("Unknown operation:", i_suffix, "to", o_suffix)
log(f"done, {round((time.time() - st) * 1000, 2)}ms used")
| true | true |
f72e9bc5fdf1a9d36616a60693fa75d25f25a068 | 42,688 | py | Python | src/framework.py | peter-clemenko/ptf | b250de6b68159a98936df17c6c0a179e6e0010e4 | [
"FTL"
] | null | null | null | src/framework.py | peter-clemenko/ptf | b250de6b68159a98936df17c6c0a179e6e0010e4 | [
"FTL"
] | null | null | null | src/framework.py | peter-clemenko/ptf | b250de6b68159a98936df17c6c0a179e6e0010e4 | [
"FTL"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# PTF Main framework look and feel
#
# main module imports
from src.core import *
import sys
import readline
import os
import time
import getpass
try:
import pexpect
pexpect_check = 1
except:
print("[!] python-pexpect not installed, gitlab will not work")
print("[!] Run pip install pexpect to install pexpect for gitlab support.")
pexpect_check = 0
# python 2 compatibility
try: input = raw_input
except NameError: pass
# If user does not want the awesome banner, do not print it out
if '-nb' in sys.argv or '--no-banner' in sys.argv:
pass
else:
# print the main welcome banner
print (banner)
# funny random banner
import random
funny = random.sample(["Aliens", "Clowns", "Mr. Robot","Zero Cool", "Goats", "Hackers", "Unicorns"], 1)[0]
# blank variables used later
deb_modules = ""
arch_modules = ""
fedora_modules = ""
openbsd_modules = ""
if check_kali() == "Kali": os_profile = "Kali"
else: os_profile = profile_os()
print_status("Operating system detected as: " + bcolors.BOLD + os_profile + bcolors.ENDC)
# main intro here
if profile_os() == "DEBIAN":
subprocess.Popen("sudo dpkg --add-architecture i386", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print_status("Welcome to PTF - where everything just works...Because.." +
bcolors.BOLD + funny + bcolors.ENDC)
print ("""
For a list of available commands type ? or help
""")
"""
This will ignore specific modules upon install_update_all - noisy installation ones. Can still be installed manually and updated that way.
"""
ignore_update_these = []
if check_config("IGNORE_UPDATE_ALL_MODULES") is not None:
ignore_update_these = check_config("IGNORE_UPDATE_ALL_MODULES").split(",")
def ignore_update_all_module(module):
result = False
for check in ignore_update_these:
if "/*" in check:
if check[:-1] in module:
result = True
else:
if (os.getcwd() + "/" + check + ".py") == module:
result = True
return result
ignore_these = []
if check_config("IGNORE_THESE_MODULES") is not None:
ignore_these = check_config("IGNORE_THESE_MODULES").split(",")
if ignore_these[0] != "":
if ignore_these[0] != '"':
print_info("Ignoring the following modules: " +
(", ").join(ignore_these))
# ignore modules if they are specified in the ptf.config
def ignore_module(module):
result = False
for check in ignore_these:
if "/*" in check:
if check[:-1] in module:
result = True
else:
if (os.getcwd() + "/" + check + ".py") == module:
result = True
if result:
print_warning("Ignoring module: " + module)
return result
include_these = []
if check_config("INCLUDE_ONLY_THESE_MODULES") is not None:
include_these = check_config("INCLUDE_ONLY_THESE_MODULES").split(",")
if include_these[0] != "":
if include_these[0] != '"':
print_info("Including only the following modules: " +
(", ").join(include_these))
else:
include_these = []
else:
include_these = []
# include only particular modules if they are specified in the ptf.config
def include_module(module):
if not include_these:
return True
result = False
for check in include_these:
if "/*" in check:
if check[:-1] in module:
result = True
else:
if (os.getcwd() + "/" + check + ".py") == module:
result = True
if result:
print_status("Including module: " + module)
return result
# check the folder structure
def show_module():
modules_path = os.getcwd() + "/modules/"
print ("\n")
print((bcolors.BOLD + "The PenTesters Framework Modules" + bcolors.ENDC))
print(("""=================================
""") + (bcolors.BOLD) + ("""Name Description """) + (bcolors.ENDC) + ("""
---- ---------------
"""))
print (
" modules/install_update_all This will install or update all tools with modules within PTF")
print (
" modules/update_installed This will update all installed tools within PTF")
for path, subdirs, files in os.walk(modules_path):
for name in sorted(files):
# join the structure
filename = os.path.join(path, name)
# strip un-needed files
if not name in ('__init__.py', 'install_update_all.py', 'update_installed.py'):
# shorten it up a little bit
filename_short = filename.replace(os.getcwd() + "/", "")
filename_short = filename_short.replace(".py", "")
filename_short = filename_short.replace(".txt", "")
filename_short = str(filename_short)
description = module_parser(filename, "DESCRIPTION")
# print the module name
if description != None:
temp_number = 53 - len(filename_short)
print(" " + filename_short + " " * temp_number + description)
print("\n")
def show_new_modules():
modules_path = os.getcwd() + "/modules/"
for path, subdirs, files in os.walk(modules_path):
for name in sorted(files):
filename = os.path.join(path, name)
if not name in ('__init__.py', 'install_update_all.py', 'update_installed.py'):
module = filename_to_module(filename)
description = module_parser(filename, "DESCRIPTION")
location = module_parser(filename,"INSTALL_LOCATION")
if not ((location is None) or (os.path.exists(os.path.join(path.replace("ptf/modules/",""), location)))):
if description != None:
temp_number = 53 - len(module)
print(" " + module + " " * temp_number + description)
print("\n")
# this is here if you need to access to a gitlab with a password for your keyphrase
def get_password_gitlab():
if not 'password_gitlab' in globals():
global password_gitlab
password_gitlab = ""
if password_gitlab == "":
password_gitlab = getpass.getpass('Enter passphrase for Gitlab modules key (let blank if no passphrase) : ')
def discover_module_filename(module):
SPECIAL_MODULE_NAMES = ("install_update_all", "update_installed", "custom_list", "__init__",)
module_suffix = ".txt" if "custom_list" in module else ".py"
# is module already a path?
if '/' in module or any(map(module.__contains__, SPECIAL_MODULE_NAMES)):
return definepath() + "/" + module + module_suffix
# find module
modules_path = os.getcwd() + "/modules/"
for path, subdirs, files in os.walk(modules_path):
for name in sorted(files):
if name in ('__init__.py', 'install_update_all.py', 'update_installed.py'):
continue
name_short = name.replace(".py","")
if name_short == module:
return os.path.join(path, name)
raise Exception("module not found")
def filename_to_module(filename):
module = filename.replace(os.getcwd() + "/", "").replace(".py","")
module = module.replace(os.getcwd() + "/", "").replace(".txt", "")
return str(module)
# this is when a use <module> command is initiated
def use_module(module, all_trigger):
prompt = ("")
# if we aren't using all
if not "install_update_all" in module and not "update_installed" in module and not "__init__" in module and not "custom_list" in module:
# set terminal title
set_title("ptf - %s" % module)
# if we are using a normal module
if int(all_trigger) == 0 or int(all_trigger) == 1 or int(all_trigger) == 2:
filename = discover_module_filename(module)
module = filename_to_module(filename)
# grab the author
try:
author = module_parser(filename, "AUTHOR")
except TypeError:
author = "Invalid"
# grab the description
description = module_parser(filename, "DESCRIPTION")
# grab install type
install_type = module_parser(filename, "INSTALL_TYPE")
# if were are tool depends for other modules prior to install
tool_depend = module_parser(filename, "TOOL_DEPEND")
# Since unicorn requires metasploit to be installed in order to generate the payloads,
# by default PTF will install or update metasploit.
# Here it will ask what the user wants to do for if they already have msf installed
# If they do, it will skip, else it will install
if 'metasploit' in tool_depend and 'unicorn' in module:
print_warning("Unicorn requires Metasploit Framework to be installed.")
# Check if metasploit is installed
if os.path.isdir("/opt/metasploit-framework/"):
print_info("Seems like you have Metasploit Framework already installed")
install_unicorn = input("Do you want to update metasploit? (y/n) (default is yes) ").lower()
# Do we want to update metasploit now or later
# If yes, then this will run as this part never existed
if install_unicorn == 'y':
print_info("Once you enter run, update, install or upgrade I will install metasploit for you")
pass
# If we do not want to update, then it will skip metasploit update
elif install_unicorn == 'n':
print_info("Skipping metasploit installation/update")
tool_depend = ""
else:
# If we enter anything but 'y' or 'n', it will continue like normal
print_info("No input detected. I will continue as normal and update metasploit")
pass
else:
# If metasploit is not installed, then we will run as this part never existed
print_warning("Metasploit Framework is NOT installed. Therefore, I will install it for you")
pass
else:
pass
# if the module path is wrong, throw a warning
try:
if not os.path.isfile(tool_depend + ".py"):
if len(tool_depend) > 1: print_warning("Tool depend: " + tool_depend + " not found. Ensure the module is pointing to a module location.")
except TypeError: pass
# grab repository location
repository_location = module_parser(filename, "REPOSITORY_LOCATION")
# custom work for zaproxy
if "zaproxy" in repository_location: repository_location = zaproxy()
# here we check if we need to do x86 or x64
if module_parser(filename, "X64_LOCATION") != "":
# grab architecture
arch_detect = arch()
if "64bit" in arch_detect:
repository_location = module_parser(filename, "X64_LOCATION")
# grab install path
base_install = check_config("BASE_INSTALL_PATH=")
strorganize_dirs = check_config("USE_DIRECTORY_ORGANIZATION=")
install_base_location = module_parser(filename, "INSTALL_LOCATION")
module_split = module.split("/")
module_split = module_split[1]
if strorganize_dirs == "False":
organize_dirs = False
else:
# Default to True
organize_dirs = True
if bool(organize_dirs) == True: install_location = os.path.expanduser(base_install + "/" + module_split + "/" + install_base_location + "/")
else:
install_location = base_install + "/" + install_base_location + "/"
while 1:
# if we aren't doing update/install all
if int(all_trigger) == 0:
try:
prompt = input(bcolors.BOLD + "ptf:" + bcolors.ENDC + "(" + bcolors.RED + "%s" % module + bcolors.ENDC + ")>")
except EOFError:
prompt = "back"
print("")
# exit if we need to
if prompt == "back" or prompt == "quit" or prompt == "exit":
return "None"
# show the help menu
if prompt == "?" or prompt == "help":
show_help_menu()
# show modules
if prompt == "show modules": print_warning("In order to show modules, you must type 'back' first")
# if we are using a module within a module we return our prompt
if "use " in prompt:
return prompt
# if we are searching for something
if "search " in prompt:
search(prompt)
if "show " in prompt:
prompt = split("/","")[1]
search(prompt)
# options menu - was a choice here to load upon initial load of dynamically pull each time
# if changes are made, it makes sense to keep it loading each time
if prompt.lower() == "show options":
print("Module options (%s):" % module)
# if we are using a normal module
if module != "modules/install_update_all":
print("\n\n")
print(bcolors.BOLD + "Module Author: " + bcolors.ENDC + author)
print(bcolors.BOLD + "Module Description: " + bcolors.ENDC + description)
print("-------------------------------------------------------------------------------------")
print(bcolors.BOLD + "INSTALL_TYPE: " + bcolors.ENDC + install_type)
print(bcolors.BOLD + "REPOSITORY_LOCATION: " + bcolors.ENDC + repository_location)
print(bcolors.BOLD + "INSTALL_LOCATION: " + bcolors.ENDC + install_location)
print("-------------------------------------------------------------------------------------")
# if we are setting the command now
if prompt.lower().startswith("set"):
# need to grab the options
set_breakout = prompt.split(" ")
# here we rewrite the options for the menu
if set_breakout[1].upper() == "INSTALL_TYPE":
install_type = set_breakout[2]
if set_breakout[1].upper() == "REPOSITORY_LOCATION":
repository_location = set_breakout[2]
if set_breakout[1].upper() == "INSTALL_LOCATION":
install_location = set_breakout[2]
# tool depend is if there is a tool for example like veil that has a depend of Metasploit - can put TOOL_DEPEND = the tool or tools here
if not "show options" in prompt.lower():
if len(tool_depend) > 1:
try:
if " " in tool_depend:
tool_depend = tool_depend.split(" ")
for tool in tool_depend: use_module(tool, "1")
elif "," in tool_depend:
tool_depend = tool_depend.split(",")
for tool in tool_depend: use_module(tool, "1")
else:
use_module(tool_depend, "1")
except: pass
if len(tool_depend) < 1:
if int(all_trigger) == 1:
prompt = "run"
if int(all_trigger) == 2:
prompt = "update"
# if we are using run, check first to see if its there, if so, do
# an upgrade
if prompt.lower() == "run":
# check if empty directory - if so purge it before anything
# else
check_blank_dir(install_location)
if os.path.isdir(install_location):
print_status(
"Detected installation already. Going to upgrade for you.")
prompt = "update"
else:
print_status(
"Tool not installed yet, will run through install routine")
prompt = "install"
# check to see if we need to bypass after commands for certain
# files - this is needed when using FILE and others where after
# commands need to be run
if module_parser(filename, "BYPASS_UPDATE") == "YES":
if prompt.lower() == "update":
prompt = "install"
# if we are updating the tools
if prompt.lower() == "update" or prompt.lower() == "upgrade":
# if we are using ignore modules then don't process
if not "__init__.py" in filename and not ignore_module(filename):
# move to the location
if os.path.isdir(install_location):
if install_type.lower() == "git":
print_status("Updating the tool, be patient while git pull is initiated.")
proc = subprocess.Popen("cd %s;git pull" % (install_location), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# check launcher
launcher(filename, install_location)
# here we check to see if we need anything we need to
# run after things are updated
update_counter = 0
if not "Already up-to-date." in proc.communicate():
after_commands(filename, install_location)
update_counter = 1
else: print_status("Tool already up-to-date!")
print_status("Finished Installing! Enjoy the tool installed under: " + (install_location))
# run after commands
if update_counter == 0:
after_commands(filename, install_location)
if install_type.lower() == "gitlab":
if pexpect_check == 0:
print("[!] You can't use gitlab features unless you install pexpect. Please install pexpect in order to use these features. Install option: pip install python-pexpect.")
else:
print_status("Updating the tool, be patient while git pull is initiated.")
get_password_gitlab()
proc = pexpect.spawn('git -C %s pull' % (install_location))
proc.expect('passphrase')
proc.sendline('%s' % password_gitlab)
proc.expect(pexpect.EOF)
# check launcher
launcher(filename, install_location)
# here we check to see if we need anything we need to
# run after things are updated
update_counter = 0
i = proc.expect(['Already up-to-date!', pexpect.EOF])
if i == 1:
after_commands(filename, install_location)
update_counter = 1
elif i == 0:
print_status("Tool already up-to-date!")
print_status("Finished Installing! Enjoy the tool installed under: " + (install_location))
# run after commands
if update_counter == 0:
after_commands(filename, install_location)
if install_type.lower() == "svn":
print_status("Updating the tool, be patient while svn pull is initiated.")
proc = subprocess.Popen("cd %s;svn update" % (install_location), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# here we do some funky stuff to store old
# revisions
try:
if not os.path.isfile(install_location + "/.goatsvn_storage"):
filewrite = open(install_location + "/.goatsvn_storage", "w")
filewrite.write(proc.communicate()[0])
filewrite.close()
if os.path.isfile(install_location + "/.goatsvn_storage"):
cmp = open(install_location +
"/.goatsvn_storage", "r").read()
# if we are at a new revision
if cmp != proc.communicate()[0]:
# change prompt to something other than
# update
prompt = "goat"
except:
pass
finally:
proc.wait()
print_status("Finished Installing! Enjoy the tool installed under: " + (install_location))
# check launcher
launcher(filename, install_location)
# run after commands
if prompt != "update": after_commands(filename, install_location)
print_status("Running updatedb to tidy everything up.")
subprocess.Popen("updatedb", shell=True).wait()
if not os.path.isdir(install_location):
print_error("The tool was not found in the install location. Try running install first!")
# if we want to install it
if prompt.lower() == "install":
# if we are using ignore modules then don't process
if not "__init__.py" in filename and not ignore_module(filename):
# grab the OS type, DEBIAN, FEDORA, CUSTOM, BSD!!!! WOW!!,
ostype = profile_os()
# if OSTYPE is DEBIAN
if ostype == "DEBIAN":
print_status("Preparing dependencies for module: " + module)
from src.platforms.debian import base_install_modules
# grab all the modules we need
deb_modules = module_parser(filename, "DEBIAN")
base_install_modules(deb_modules)
print_status("Pre-reqs for %s have been installed." % (module))
# if OSTYPE is ARCHLINUX
if ostype == "ARCHLINUX":
print_status("Preparing dependencies for module: " + module)
from src.platforms.archlinux import base_install_modules
# grab all the modules we need
arch_modules = module_parser(filename, "ARCHLINUX")
base_install_modules(arch_modules)
print_status("Pre-reqs for %s have been installed." % (module))
# if OSTYPE is FEDORA
if ostype == "FEDORA":
print_status("Preparing dependencies for module: " + module)
from src.platforms.fedora import base_install_modules
# grab all the modules we need
fedora_modules = module_parser(filename, "FEDORA")
base_install_modules(fedora_modules)
print_status("Pre-reqs for %s have been installed." % (module))
# if OSTYPE is OPENBSD
if ostype == "OPENBSD":
print_status("Preparing dependencies for module: " + module)
from src.platforms.openbsd import base_install_modules
# grab all the modules we need
openbsd_modules = module_parser(filename, "OPENBSD")
base_install_modules(openbsd_modules)
print_status("Pre-reqs for %s have been installed." % (module))
print_status("Making the appropriate directory structure first")
subprocess.Popen("mkdir -p %s" % install_location, shell=True).wait()
# if we are using git
if install_type.lower() in ["git","gitlab"]:
# if there are files in the install_location, we'll update.
if os.listdir(install_location):
print_status("Installation already exists, going to git pull then run after commands..")
if install_type.lower() == "gitlab":
get_password_gitlab()
proc = pexpect.spawn('git -C %s pull' % (install_location))
proc.expect('passphrase')
proc.sendline('%s' % password_gitlab)
proc.expect(pexpect.EOF)
proc.wait()
else:
subprocess.Popen("cd %s;git pull" % (install_location), stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print_status("Finished updating the tool located in:" + install_location)
else:
print_status("%s was the selected method for installation... Using %s to install." % (install_type.upper(), install_type.upper()))
print_status("Installing now.. be patient...")
if install_type.lower() == "gitlab":
get_password_gitlab()
proc = pexpect.spawn('git clone --depth=1 %s %s' % (repository_location, install_location))
proc.expect('passphrase')
proc.sendline('%s' % password_gitlab)
proc.expect(pexpect.EOF)
else:
subprocess.Popen("git clone --depth=1 %s %s" % (repository_location, install_location), stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print_status("Finished Installing! Enjoy the tool located under: " + install_location)
after_commands(filename, install_location)
launcher(filename, install_location)
# if we are using svn
if install_type.lower() == "svn":
print_status("SVN was the selected method for installation... Using SVN to install.")
subprocess.Popen("svn co %s %s" % (
repository_location, install_location), stderr=subprocess.PIPE, shell=True).wait()
print_status(
"Finished Installing! Enjoy the tool located under: " + install_location)
launcher(filename, install_location)
after_commands(filename, install_location)
# if we are using file
if install_type.lower() == "file":
print_status("FILE was the selected method for installation... Using curl -o to install.")
repository_file = repository_location.split("/")[-1]
subprocess.Popen('curl -k -L -A "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.112 Safari/534.30" -o %s%s %s' % (
install_location, repository_file, repository_location), stderr=subprocess.PIPE, shell=True).wait()
print_status("Finished Installing! Enjoy the tool located under: " + install_location)
launcher(filename, install_location)
after_commands(filename, install_location)
# if we are using wget
if install_type.lower() == "wget":
print_status("WGET was the selected method for installation because it plays better than curl -l with recursive URLs.")
subprocess.Popen("cd %s && wget -q %s" % (install_location, repository_location), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print_status("Finished Installing! Enjoy the tool located under: " + install_location)
launcher(filename, install_location)
after_commands(filename, install_location)
print_status("Running updatedb to tidy everything up.")
subprocess.Popen("updatedb", shell=True).wait()
# if we update all we need to break out until finished
if int(all_trigger) == 1 or int(all_trigger) == 2:
break
# searches in the directory to find a file that references the location
def find_containing_file(directory, location):
try:
print("Finding %s in %s"%(location, directory))
for file in [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]:
with open(os.path.join(directory,file)) as handle:
for line in handle:
if ('INSTALL_LOCATION="%s"'%location) in line:
return os.path.splitext(file)[0]
except OSError:
print_warning("%s is not managed by PTF"%(location))
# Didn't find anything, returning None
return None
def handle_prompt(prompt, force=False):
# specify no commands, if counter increments then a command was found
base_counter = 0
# main help menu
if prompt == "?" or prompt == "help":
show_help_menu()
base_counter = 1
# if we want to exit out
if prompt == "quit" or prompt == "exit" or prompt == "back":
base_counter = 1
exit_ptf()
sys.exit()
# if we want to see the modules
if prompt == "show modules":
base_counter = 1
show_module()
# list new modules
if prompt == "show new modules":
base_counter = 1
show_new_modules()
# inside joke
if prompt == "install sleeves":
print_error("Scott White? Sleeves? F Sleeves. Scott Rules.")
base_counter = 1
# search functionality here
if prompt.startswith("search"):
base_counter = 1
search(prompt)
# if we want to use a module
if prompt.startswith("use"):
base_counter = 1
counter = 0
prompt = prompt.split(" ")
# do a quick sanity check to see if the module is there first
if "install_update_all" in prompt[1] or "custom_list" in prompt[1]:
counter = 3
try:
if not force: install_query = input("[*] You are about to install/update everything. Proceed? [yes/no]:")
else:
print("[*] You are about to install/update everything. Proceed? [yes/no]:yes")
install_query = "yes"
except EOFError:
install_query = "no"
print("")
if install_query.lower() == "yes" or install_query.lower() == "y":
# do auto update check first
auto_update()
if not "custom_list" in prompt[1]: modules_path = definepath() + "/" + (prompt[1])[:-18]
# else: modules_path = definepath() + "/modules/"
else: modules_path = prompt[1] + ".txt"
# base holder for all debian packages
deb_modules = ""
# base holder for all arch packages
arch_modules = ""
# base holder for all fedora packages
fedora_modules = ""
# base holder for all openbsd packages
openbsd_modules = ""
# first we install all depends for all applications
print_status("We are going to first install all prereqs using apt before installing..")
print_status("Cycling through modules and grabbing requirements...")
# if we install the custom_list tools
if "custom_list" in modules_path:
if os.path.isfile(modules_path):
fileopen = open(modules_path).readlines()
for tools in fileopen:
print_status("Installing and/or updating: " + tools.rstrip())
# run the module for install
use_module(tools.rstrip().replace(".py", ""), "1")
time.sleep(0.2)
for path, subdirs, files in os.walk(modules_path):
for name in files:
if "custom_list" in prompt[1] and name[:-4] not in open(definepath() + "/" + prompt[1] + ".txt").read(): break
# join the structure
filename = os.path.join(path, name)
# strip un-needed files
if not "__init__.py" in filename and not ignore_module(filename) and include_module(filename) and ".py" in filename and not ".pyc" in filename and not ignore_update_all_module(filename):
print("!!!***!!!installing deps for module: " + filename)
# shorten it up a little bit
filename_short = filename.replace(os.getcwd() + "/", "")
# update depend modules
filename_short = str(filename_short)
ostype = profile_os()
# DEBIAN
if ostype == "DEBIAN":
if not "install_update_all" in filename_short and not "custom_list" in filename:
from src.platforms.debian import base_install_modules
# grab all the modules we need
deb_modules = deb_modules + "," + module_parser(filename_short, "DEBIAN")
# archlinux
if ostype == "ARCHLINUX":
if not "install_update_all" in filename_short and not "custom_list" in filename:
from src.platforms.archlinux import base_install_modules
# grab all the modules we need
arch_modules = ""
arch_modules = arch_modules + "," + \
module_parser(filename_short, "ARCHLINUX")
# fedora
if ostype == "FEDORA":
if not "install_update_all" in filename_short and not "custom_list" in filename:
from src.platforms.fedora import base_install_modules
# grab all the modules we need
fedora_modules = fedora_modules + "," + \
module_parser(filename_short, "FEDORA")
# openbsd
if ostype == "OPENSBD":
if not "install_update_all" in filename_short and not "custom_list" in filename:
from src.platforms.openbsd import base_install_modules
# grab all the modules we need
openbsd_modules = openbsd_modules + "," + \
module_parser(filename_short, "OPENBSD")
# install all of the packages at once
ostype = profile_os()
if ostype == "DEBIAN":
deb_modules = deb_modules.replace(",", " ")
if deb_modules != "":
base_install_modules(deb_modules)
print_status("Finished updating depends for modules.")
if ostype == "ARCHLINUX":
arch_modules = arch_modules.replace(",", " ")
if arch_modules != "":
base_install_modules(arch_modules)
print_status("Finished updating depends for modules.")
if ostype == "FEDORA":
fedora_modules = fedora_modules.replace(",", " ")
if fedora_modules != "":
base_install_modules(fedora_modules)
print_status("Finished updating depends for modules.")
if ostype == "OPENBSD":
openbsd_modules = openbsd_modules.replace(",", " ")
if openbsd_modules != "":
base_install_modules(openbsd_modules)
print_status("Finished updating depends for modules.")
for path, subdirs, files in os.walk(modules_path):
for name in files:
if "custom_list" in prompt[1] and name[:-4] not in open(definepath() + "/" + prompt[1] + ".txt").read(): break
# join the structure
filename = os.path.join(path, name)
if not "__init__.py" in filename and not ignore_module(filename) and include_module(filename) and ".py" in filename and not ".pyc" in filename and not "install_update_all" in filename and not "__init__" in filename and not "custom_list" in filename:
# strip un-needed files
# if not "__init__.py" in filename and not ignore_module(filename):
# shorten it up a little bit
filename_short = filename.replace(os.getcwd() + "/", "")
filename_short = filename_short.replace(".py", "")
# check if empty directory - if so purge it before
# anything else
check_blank_dir(path)
print_status("Installing and/or updating: " + filename_short)
# run the module for install
use_module(filename_short, "1")
# sleep a sec
time.sleep(0.2)
# clear the screen
os.system("clear")
print ("\n")
print (
""" _ _ _ _ _ ____ _ _""")
print (
"""| | | | __ _ ___| | __ | |_| |__ ___ | _ \| | __ _ _ __ ___| |_""")
print (
"""| |_| |/ _` |/ __| |/ / | __| '_ \ / _ \ | |_) | |/ _` | '_ \ / _ \ __|""")
print (
"""| _ | (_| | (__| < | |_| | | | __/ | __/| | (_| | | | | __/ |_ """)
print (
"""|_| |_|\__,_|\___|_|\_\ \__|_| |_|\___| |_| |_|\__,_|_| |_|\___|\__|\n\n""")
print_status("All finished installing/and or updating.. All shiny again.\n")
else:
print_status("Alright boss. Not installing right now. Tell me when. I want that shiny. I want it now.")
if "update_installed" in prompt[1]:
counter = 3
base_install = check_config("BASE_INSTALL_PATH=")
for dir in os.listdir(base_install): # ptes dir
# ignore PTF directory
if not 'ptf' == dir and not os.path.isfile(dir):
for subdir in os.listdir(os.path.join(base_install, dir)): # module
# Ignore normal files
if not os.path.isfile(subdir):
module = "modules/%s/%s"%(dir,subdir)
# If the install file and install directory differ, search the correct file
if(not os.path.isfile(module + '.py')):
install_file = find_containing_file("modules/%s"%dir,subdir)
module = "modules/%s/%s"%(dir,install_file)
# Only update if we have an install file
if not 'None' in module:
print(("Updating %s") % module)
use_module(module, 2)
if os.path.isfile(discover_module_filename(prompt[1])):
counter = 1
if counter == 1:
while 1:
try:
module = use_module(prompt[1], "0")
if "use " in module:
prompt = module.split(" ")
else: break
except Exception: break
if counter == 0:
print_error("Module name was not found, try retyping it again.")
# if blanks are used
if prompt == "":
base_counter = 1
if base_counter == 0:
print_warning("Command was not found, try help or ? for more information.")
# start the main loop
def mainloop():
while 1:
# set title
set_title("The PenTesters Framework (PTF) v%s" % grab_version)
try:
prompt = input(bcolors.BOLD + "ptf" + bcolors.ENDC + "> ")
except EOFError:
prompt = "quit"
print("")
handle_prompt(prompt)
| 52.377914 | 273 | 0.513517 |
from src.core import *
import sys
import readline
import os
import time
import getpass
try:
import pexpect
pexpect_check = 1
except:
print("[!] python-pexpect not installed, gitlab will not work")
print("[!] Run pip install pexpect to install pexpect for gitlab support.")
pexpect_check = 0
try: input = raw_input
except NameError: pass
if '-nb' in sys.argv or '--no-banner' in sys.argv:
pass
else:
print (banner)
import random
funny = random.sample(["Aliens", "Clowns", "Mr. Robot","Zero Cool", "Goats", "Hackers", "Unicorns"], 1)[0]
deb_modules = ""
arch_modules = ""
fedora_modules = ""
openbsd_modules = ""
if check_kali() == "Kali": os_profile = "Kali"
else: os_profile = profile_os()
print_status("Operating system detected as: " + bcolors.BOLD + os_profile + bcolors.ENDC)
if profile_os() == "DEBIAN":
subprocess.Popen("sudo dpkg --add-architecture i386", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print_status("Welcome to PTF - where everything just works...Because.." +
bcolors.BOLD + funny + bcolors.ENDC)
print ("""
For a list of available commands type ? or help
""")
ignore_update_these = []
if check_config("IGNORE_UPDATE_ALL_MODULES") is not None:
ignore_update_these = check_config("IGNORE_UPDATE_ALL_MODULES").split(",")
def ignore_update_all_module(module):
result = False
for check in ignore_update_these:
if "/*" in check:
if check[:-1] in module:
result = True
else:
if (os.getcwd() + "/" + check + ".py") == module:
result = True
return result
ignore_these = []
if check_config("IGNORE_THESE_MODULES") is not None:
ignore_these = check_config("IGNORE_THESE_MODULES").split(",")
if ignore_these[0] != "":
if ignore_these[0] != '"':
print_info("Ignoring the following modules: " +
(", ").join(ignore_these))
# ignore modules if they are specified in the ptf.config
def ignore_module(module):
result = False
for check in ignore_these:
if "/*" in check:
if check[:-1] in module:
result = True
else:
if (os.getcwd() + "/" + check + ".py") == module:
result = True
if result:
print_warning("Ignoring module: " + module)
return result
include_these = []
if check_config("INCLUDE_ONLY_THESE_MODULES") is not None:
include_these = check_config("INCLUDE_ONLY_THESE_MODULES").split(",")
if include_these[0] != "":
if include_these[0] != '"':
print_info("Including only the following modules: " +
(", ").join(include_these))
else:
include_these = []
else:
include_these = []
def include_module(module):
if not include_these:
return True
result = False
for check in include_these:
if "/*" in check:
if check[:-1] in module:
result = True
else:
if (os.getcwd() + "/" + check + ".py") == module:
result = True
if result:
print_status("Including module: " + module)
return result
def show_module():
modules_path = os.getcwd() + "/modules/"
print ("\n")
print((bcolors.BOLD + "The PenTesters Framework Modules" + bcolors.ENDC))
print(("""=================================
""") + (bcolors.BOLD) + ("""Name Description """) + (bcolors.ENDC) + ("""
---- ---------------
"""))
print (
" modules/install_update_all This will install or update all tools with modules within PTF")
print (
" modules/update_installed This will update all installed tools within PTF")
for path, subdirs, files in os.walk(modules_path):
for name in sorted(files):
filename = os.path.join(path, name)
if not name in ('__init__.py', 'install_update_all.py', 'update_installed.py'):
filename_short = filename.replace(os.getcwd() + "/", "")
filename_short = filename_short.replace(".py", "")
filename_short = filename_short.replace(".txt", "")
filename_short = str(filename_short)
description = module_parser(filename, "DESCRIPTION")
if description != None:
temp_number = 53 - len(filename_short)
print(" " + filename_short + " " * temp_number + description)
print("\n")
def show_new_modules():
modules_path = os.getcwd() + "/modules/"
for path, subdirs, files in os.walk(modules_path):
for name in sorted(files):
filename = os.path.join(path, name)
if not name in ('__init__.py', 'install_update_all.py', 'update_installed.py'):
module = filename_to_module(filename)
description = module_parser(filename, "DESCRIPTION")
location = module_parser(filename,"INSTALL_LOCATION")
if not ((location is None) or (os.path.exists(os.path.join(path.replace("ptf/modules/",""), location)))):
if description != None:
temp_number = 53 - len(module)
print(" " + module + " " * temp_number + description)
print("\n")
def get_password_gitlab():
if not 'password_gitlab' in globals():
global password_gitlab
password_gitlab = ""
if password_gitlab == "":
password_gitlab = getpass.getpass('Enter passphrase for Gitlab modules key (let blank if no passphrase) : ')
def discover_module_filename(module):
SPECIAL_MODULE_NAMES = ("install_update_all", "update_installed", "custom_list", "__init__",)
module_suffix = ".txt" if "custom_list" in module else ".py"
if '/' in module or any(map(module.__contains__, SPECIAL_MODULE_NAMES)):
return definepath() + "/" + module + module_suffix
modules_path = os.getcwd() + "/modules/"
for path, subdirs, files in os.walk(modules_path):
for name in sorted(files):
if name in ('__init__.py', 'install_update_all.py', 'update_installed.py'):
continue
name_short = name.replace(".py","")
if name_short == module:
return os.path.join(path, name)
raise Exception("module not found")
def filename_to_module(filename):
module = filename.replace(os.getcwd() + "/", "").replace(".py","")
module = module.replace(os.getcwd() + "/", "").replace(".txt", "")
return str(module)
def use_module(module, all_trigger):
prompt = ("")
if not "install_update_all" in module and not "update_installed" in module and not "__init__" in module and not "custom_list" in module:
# set terminal title
set_title("ptf - %s" % module)
# if we are using a normal module
if int(all_trigger) == 0 or int(all_trigger) == 1 or int(all_trigger) == 2:
filename = discover_module_filename(module)
module = filename_to_module(filename)
# grab the author
try:
author = module_parser(filename, "AUTHOR")
except TypeError:
author = "Invalid"
# grab the description
description = module_parser(filename, "DESCRIPTION")
# grab install type
install_type = module_parser(filename, "INSTALL_TYPE")
# if were are tool depends for other modules prior to install
tool_depend = module_parser(filename, "TOOL_DEPEND")
# Since unicorn requires metasploit to be installed in order to generate the payloads,
# by default PTF will install or update metasploit.
# Here it will ask what the user wants to do for if they already have msf installed
# If they do, it will skip, else it will install
if 'metasploit' in tool_depend and 'unicorn' in module:
print_warning("Unicorn requires Metasploit Framework to be installed.")
# Check if metasploit is installed
if os.path.isdir("/opt/metasploit-framework/"):
print_info("Seems like you have Metasploit Framework already installed")
install_unicorn = input("Do you want to update metasploit? (y/n) (default is yes) ").lower()
# Do we want to update metasploit now or later
# If yes, then this will run as this part never existed
if install_unicorn == 'y':
print_info("Once you enter run, update, install or upgrade I will install metasploit for you")
pass
# If we do not want to update, then it will skip metasploit update
elif install_unicorn == 'n':
print_info("Skipping metasploit installation/update")
tool_depend = ""
else:
# If we enter anything but 'y' or 'n', it will continue like normal
print_info("No input detected. I will continue as normal and update metasploit")
pass
else:
# If metasploit is not installed, then we will run as this part never existed
print_warning("Metasploit Framework is NOT installed. Therefore, I will install it for you")
pass
else:
pass
# if the module path is wrong, throw a warning
try:
if not os.path.isfile(tool_depend + ".py"):
if len(tool_depend) > 1: print_warning("Tool depend: " + tool_depend + " not found. Ensure the module is pointing to a module location.")
except TypeError: pass
# grab repository location
repository_location = module_parser(filename, "REPOSITORY_LOCATION")
# custom work for zaproxy
if "zaproxy" in repository_location: repository_location = zaproxy()
# here we check if we need to do x86 or x64
if module_parser(filename, "X64_LOCATION") != "":
# grab architecture
arch_detect = arch()
if "64bit" in arch_detect:
repository_location = module_parser(filename, "X64_LOCATION")
# grab install path
base_install = check_config("BASE_INSTALL_PATH=")
strorganize_dirs = check_config("USE_DIRECTORY_ORGANIZATION=")
install_base_location = module_parser(filename, "INSTALL_LOCATION")
module_split = module.split("/")
module_split = module_split[1]
if strorganize_dirs == "False":
organize_dirs = False
else:
# Default to True
organize_dirs = True
if bool(organize_dirs) == True: install_location = os.path.expanduser(base_install + "/" + module_split + "/" + install_base_location + "/")
else:
install_location = base_install + "/" + install_base_location + "/"
while 1:
# if we aren't doing update/install all
if int(all_trigger) == 0:
try:
prompt = input(bcolors.BOLD + "ptf:" + bcolors.ENDC + "(" + bcolors.RED + "%s" % module + bcolors.ENDC + ")>")
except EOFError:
prompt = "back"
print("")
if prompt == "back" or prompt == "quit" or prompt == "exit":
return "None"
if prompt == "?" or prompt == "help":
show_help_menu()
if prompt == "show modules": print_warning("In order to show modules, you must type 'back' first")
if "use " in prompt:
return prompt
if "search " in prompt:
search(prompt)
if "show " in prompt:
prompt = split("/","")[1]
search(prompt)
if prompt.lower() == "show options":
print("Module options (%s):" % module)
if module != "modules/install_update_all":
print("\n\n")
print(bcolors.BOLD + "Module Author: " + bcolors.ENDC + author)
print(bcolors.BOLD + "Module Description: " + bcolors.ENDC + description)
print("-------------------------------------------------------------------------------------")
print(bcolors.BOLD + "INSTALL_TYPE: " + bcolors.ENDC + install_type)
print(bcolors.BOLD + "REPOSITORY_LOCATION: " + bcolors.ENDC + repository_location)
print(bcolors.BOLD + "INSTALL_LOCATION: " + bcolors.ENDC + install_location)
print("-------------------------------------------------------------------------------------")
if prompt.lower().startswith("set"):
set_breakout = prompt.split(" ")
if set_breakout[1].upper() == "INSTALL_TYPE":
install_type = set_breakout[2]
if set_breakout[1].upper() == "REPOSITORY_LOCATION":
repository_location = set_breakout[2]
if set_breakout[1].upper() == "INSTALL_LOCATION":
install_location = set_breakout[2]
if not "show options" in prompt.lower():
if len(tool_depend) > 1:
try:
if " " in tool_depend:
tool_depend = tool_depend.split(" ")
for tool in tool_depend: use_module(tool, "1")
elif "," in tool_depend:
tool_depend = tool_depend.split(",")
for tool in tool_depend: use_module(tool, "1")
else:
use_module(tool_depend, "1")
except: pass
if len(tool_depend) < 1:
if int(all_trigger) == 1:
prompt = "run"
if int(all_trigger) == 2:
prompt = "update"
if prompt.lower() == "run":
check_blank_dir(install_location)
if os.path.isdir(install_location):
print_status(
"Detected installation already. Going to upgrade for you.")
prompt = "update"
else:
print_status(
"Tool not installed yet, will run through install routine")
prompt = "install"
if module_parser(filename, "BYPASS_UPDATE") == "YES":
if prompt.lower() == "update":
prompt = "install"
if prompt.lower() == "update" or prompt.lower() == "upgrade":
if not "__init__.py" in filename and not ignore_module(filename):
# move to the location
if os.path.isdir(install_location):
if install_type.lower() == "git":
print_status("Updating the tool, be patient while git pull is initiated.")
proc = subprocess.Popen("cd %s;git pull" % (install_location), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# check launcher
launcher(filename, install_location)
# here we check to see if we need anything we need to
# run after things are updated
update_counter = 0
if not "Already up-to-date." in proc.communicate():
after_commands(filename, install_location)
update_counter = 1
else: print_status("Tool already up-to-date!")
print_status("Finished Installing! Enjoy the tool installed under: " + (install_location))
# run after commands
if update_counter == 0:
after_commands(filename, install_location)
if install_type.lower() == "gitlab":
if pexpect_check == 0:
print("[!] You can't use gitlab features unless you install pexpect. Please install pexpect in order to use these features. Install option: pip install python-pexpect.")
else:
print_status("Updating the tool, be patient while git pull is initiated.")
get_password_gitlab()
proc = pexpect.spawn('git -C %s pull' % (install_location))
proc.expect('passphrase')
proc.sendline('%s' % password_gitlab)
proc.expect(pexpect.EOF)
launcher(filename, install_location)
update_counter = 0
i = proc.expect(['Already up-to-date!', pexpect.EOF])
if i == 1:
after_commands(filename, install_location)
update_counter = 1
elif i == 0:
print_status("Tool already up-to-date!")
print_status("Finished Installing! Enjoy the tool installed under: " + (install_location))
if update_counter == 0:
after_commands(filename, install_location)
if install_type.lower() == "svn":
print_status("Updating the tool, be patient while svn pull is initiated.")
proc = subprocess.Popen("cd %s;svn update" % (install_location), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
try:
if not os.path.isfile(install_location + "/.goatsvn_storage"):
filewrite = open(install_location + "/.goatsvn_storage", "w")
filewrite.write(proc.communicate()[0])
filewrite.close()
if os.path.isfile(install_location + "/.goatsvn_storage"):
cmp = open(install_location +
"/.goatsvn_storage", "r").read()
if cmp != proc.communicate()[0]:
prompt = "goat"
except:
pass
finally:
proc.wait()
print_status("Finished Installing! Enjoy the tool installed under: " + (install_location))
launcher(filename, install_location)
if prompt != "update": after_commands(filename, install_location)
print_status("Running updatedb to tidy everything up.")
subprocess.Popen("updatedb", shell=True).wait()
if not os.path.isdir(install_location):
print_error("The tool was not found in the install location. Try running install first!")
if prompt.lower() == "install":
if not "__init__.py" in filename and not ignore_module(filename):
# grab the OS type, DEBIAN, FEDORA, CUSTOM, BSD!!!! WOW!!,
ostype = profile_os()
# if OSTYPE is DEBIAN
if ostype == "DEBIAN":
print_status("Preparing dependencies for module: " + module)
from src.platforms.debian import base_install_modules
# grab all the modules we need
deb_modules = module_parser(filename, "DEBIAN")
base_install_modules(deb_modules)
print_status("Pre-reqs for %s have been installed." % (module))
# if OSTYPE is ARCHLINUX
if ostype == "ARCHLINUX":
print_status("Preparing dependencies for module: " + module)
from src.platforms.archlinux import base_install_modules
# grab all the modules we need
arch_modules = module_parser(filename, "ARCHLINUX")
base_install_modules(arch_modules)
print_status("Pre-reqs for %s have been installed." % (module))
# if OSTYPE is FEDORA
if ostype == "FEDORA":
print_status("Preparing dependencies for module: " + module)
from src.platforms.fedora import base_install_modules
# grab all the modules we need
fedora_modules = module_parser(filename, "FEDORA")
base_install_modules(fedora_modules)
print_status("Pre-reqs for %s have been installed." % (module))
# if OSTYPE is OPENBSD
if ostype == "OPENBSD":
print_status("Preparing dependencies for module: " + module)
from src.platforms.openbsd import base_install_modules
# grab all the modules we need
openbsd_modules = module_parser(filename, "OPENBSD")
base_install_modules(openbsd_modules)
print_status("Pre-reqs for %s have been installed." % (module))
print_status("Making the appropriate directory structure first")
subprocess.Popen("mkdir -p %s" % install_location, shell=True).wait()
# if we are using git
if install_type.lower() in ["git","gitlab"]:
# if there are files in the install_location, we'll update.
if os.listdir(install_location):
print_status("Installation already exists, going to git pull then run after commands..")
if install_type.lower() == "gitlab":
get_password_gitlab()
proc = pexpect.spawn('git -C %s pull' % (install_location))
proc.expect('passphrase')
proc.sendline('%s' % password_gitlab)
proc.expect(pexpect.EOF)
proc.wait()
else:
subprocess.Popen("cd %s;git pull" % (install_location), stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print_status("Finished updating the tool located in:" + install_location)
else:
print_status("%s was the selected method for installation... Using %s to install." % (install_type.upper(), install_type.upper()))
print_status("Installing now.. be patient...")
if install_type.lower() == "gitlab":
get_password_gitlab()
proc = pexpect.spawn('git clone --depth=1 %s %s' % (repository_location, install_location))
proc.expect('passphrase')
proc.sendline('%s' % password_gitlab)
proc.expect(pexpect.EOF)
else:
subprocess.Popen("git clone --depth=1 %s %s" % (repository_location, install_location), stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print_status("Finished Installing! Enjoy the tool located under: " + install_location)
after_commands(filename, install_location)
launcher(filename, install_location)
if install_type.lower() == "svn":
print_status("SVN was the selected method for installation... Using SVN to install.")
subprocess.Popen("svn co %s %s" % (
repository_location, install_location), stderr=subprocess.PIPE, shell=True).wait()
print_status(
"Finished Installing! Enjoy the tool located under: " + install_location)
launcher(filename, install_location)
after_commands(filename, install_location)
if install_type.lower() == "file":
print_status("FILE was the selected method for installation... Using curl -o to install.")
repository_file = repository_location.split("/")[-1]
subprocess.Popen('curl -k -L -A "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.112 Safari/534.30" -o %s%s %s' % (
install_location, repository_file, repository_location), stderr=subprocess.PIPE, shell=True).wait()
print_status("Finished Installing! Enjoy the tool located under: " + install_location)
launcher(filename, install_location)
after_commands(filename, install_location)
if install_type.lower() == "wget":
print_status("WGET was the selected method for installation because it plays better than curl -l with recursive URLs.")
subprocess.Popen("cd %s && wget -q %s" % (install_location, repository_location), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print_status("Finished Installing! Enjoy the tool located under: " + install_location)
launcher(filename, install_location)
after_commands(filename, install_location)
print_status("Running updatedb to tidy everything up.")
subprocess.Popen("updatedb", shell=True).wait()
if int(all_trigger) == 1 or int(all_trigger) == 2:
break
def find_containing_file(directory, location):
try:
print("Finding %s in %s"%(location, directory))
for file in [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]:
with open(os.path.join(directory,file)) as handle:
for line in handle:
if ('INSTALL_LOCATION="%s"'%location) in line:
return os.path.splitext(file)[0]
except OSError:
print_warning("%s is not managed by PTF"%(location))
return None
def handle_prompt(prompt, force=False):
# specify no commands, if counter increments then a command was found
base_counter = 0
# main help menu
if prompt == "?" or prompt == "help":
show_help_menu()
base_counter = 1
# if we want to exit out
if prompt == "quit" or prompt == "exit" or prompt == "back":
base_counter = 1
exit_ptf()
sys.exit()
# if we want to see the modules
if prompt == "show modules":
base_counter = 1
show_module()
# list new modules
if prompt == "show new modules":
base_counter = 1
show_new_modules()
# inside joke
if prompt == "install sleeves":
print_error("Scott White? Sleeves? F Sleeves. Scott Rules.")
base_counter = 1
# search functionality here
if prompt.startswith("search"):
base_counter = 1
search(prompt)
# if we want to use a module
if prompt.startswith("use"):
base_counter = 1
counter = 0
prompt = prompt.split(" ")
# do a quick sanity check to see if the module is there first
if "install_update_all" in prompt[1] or "custom_list" in prompt[1]:
counter = 3
try:
if not force: install_query = input("[*] You are about to install/update everything. Proceed? [yes/no]:")
else:
print("[*] You are about to install/update everything. Proceed? [yes/no]:yes")
install_query = "yes"
except EOFError:
install_query = "no"
print("")
if install_query.lower() == "yes" or install_query.lower() == "y":
# do auto update check first
auto_update()
if not "custom_list" in prompt[1]: modules_path = definepath() + "/" + (prompt[1])[:-18]
# else: modules_path = definepath() + "/modules/"
else: modules_path = prompt[1] + ".txt"
# base holder for all debian packages
deb_modules = ""
# base holder for all arch packages
arch_modules = ""
# base holder for all fedora packages
fedora_modules = ""
# base holder for all openbsd packages
openbsd_modules = ""
# first we install all depends for all applications
print_status("We are going to first install all prereqs using apt before installing..")
print_status("Cycling through modules and grabbing requirements...")
# if we install the custom_list tools
if "custom_list" in modules_path:
if os.path.isfile(modules_path):
fileopen = open(modules_path).readlines()
for tools in fileopen:
print_status("Installing and/or updating: " + tools.rstrip())
# run the module for install
use_module(tools.rstrip().replace(".py", ""), "1")
time.sleep(0.2)
for path, subdirs, files in os.walk(modules_path):
for name in files:
if "custom_list" in prompt[1] and name[:-4] not in open(definepath() + "/" + prompt[1] + ".txt").read(): break
# join the structure
filename = os.path.join(path, name)
# strip un-needed files
if not "__init__.py" in filename and not ignore_module(filename) and include_module(filename) and ".py" in filename and not ".pyc" in filename and not ignore_update_all_module(filename):
print("!!!***!!!installing deps for module: " + filename)
# shorten it up a little bit
filename_short = filename.replace(os.getcwd() + "/", "")
# update depend modules
filename_short = str(filename_short)
ostype = profile_os()
# DEBIAN
if ostype == "DEBIAN":
if not "install_update_all" in filename_short and not "custom_list" in filename:
from src.platforms.debian import base_install_modules
# grab all the modules we need
deb_modules = deb_modules + "," + module_parser(filename_short, "DEBIAN")
# archlinux
if ostype == "ARCHLINUX":
if not "install_update_all" in filename_short and not "custom_list" in filename:
from src.platforms.archlinux import base_install_modules
# grab all the modules we need
arch_modules = ""
arch_modules = arch_modules + "," + \
module_parser(filename_short, "ARCHLINUX")
# fedora
if ostype == "FEDORA":
if not "install_update_all" in filename_short and not "custom_list" in filename:
from src.platforms.fedora import base_install_modules
# grab all the modules we need
fedora_modules = fedora_modules + "," + \
module_parser(filename_short, "FEDORA")
# openbsd
if ostype == "OPENSBD":
if not "install_update_all" in filename_short and not "custom_list" in filename:
from src.platforms.openbsd import base_install_modules
# grab all the modules we need
openbsd_modules = openbsd_modules + "," + \
module_parser(filename_short, "OPENBSD")
# install all of the packages at once
ostype = profile_os()
if ostype == "DEBIAN":
deb_modules = deb_modules.replace(",", " ")
if deb_modules != "":
base_install_modules(deb_modules)
print_status("Finished updating depends for modules.")
if ostype == "ARCHLINUX":
arch_modules = arch_modules.replace(",", " ")
if arch_modules != "":
base_install_modules(arch_modules)
print_status("Finished updating depends for modules.")
if ostype == "FEDORA":
fedora_modules = fedora_modules.replace(",", " ")
if fedora_modules != "":
base_install_modules(fedora_modules)
print_status("Finished updating depends for modules.")
if ostype == "OPENBSD":
openbsd_modules = openbsd_modules.replace(",", " ")
if openbsd_modules != "":
base_install_modules(openbsd_modules)
print_status("Finished updating depends for modules.")
for path, subdirs, files in os.walk(modules_path):
for name in files:
if "custom_list" in prompt[1] and name[:-4] not in open(definepath() + "/" + prompt[1] + ".txt").read(): break
# join the structure
filename = os.path.join(path, name)
if not "__init__.py" in filename and not ignore_module(filename) and include_module(filename) and ".py" in filename and not ".pyc" in filename and not "install_update_all" in filename and not "__init__" in filename and not "custom_list" in filename:
# strip un-needed files
# if not "__init__.py" in filename and not ignore_module(filename):
# shorten it up a little bit
filename_short = filename.replace(os.getcwd() + "/", "")
filename_short = filename_short.replace(".py", "")
# check if empty directory - if so purge it before
# anything else
check_blank_dir(path)
print_status("Installing and/or updating: " + filename_short)
# run the module for install
use_module(filename_short, "1")
# sleep a sec
time.sleep(0.2)
# clear the screen
os.system("clear")
print ("\n")
print (
""" _ _ _ _ _ ____ _ _""")
print (
"""| | | | __ _ ___| | __ | |_| |__ ___ | _ \| | __ _ _ __ ___| |_""")
print (
"""| |_| |/ _` |/ __| |/ / | __| '_ \ / _ \ | |_) | |/ _` | '_ \ / _ \ __|""")
print (
"""| _ | (_| | (__| < | |_| | | | __/ | __/| | (_| | | | | __/ |_ """)
print (
"""|_| |_|\__,_|\___|_|\_\ \__|_| |_|\___| |_| |_|\__,_|_| |_|\___|\__|\n\n""")
print_status("All finished installing/and or updating.. All shiny again.\n")
else:
print_status("Alright boss. Not installing right now. Tell me when. I want that shiny. I want it now.")
if "update_installed" in prompt[1]:
counter = 3
base_install = check_config("BASE_INSTALL_PATH=")
for dir in os.listdir(base_install): # ptes dir
# ignore PTF directory
if not 'ptf' == dir and not os.path.isfile(dir):
for subdir in os.listdir(os.path.join(base_install, dir)): # module
# Ignore normal files
if not os.path.isfile(subdir):
module = "modules/%s/%s"%(dir,subdir)
# If the install file and install directory differ, search the correct file
if(not os.path.isfile(module + '.py')):
install_file = find_containing_file("modules/%s"%dir,subdir)
module = "modules/%s/%s"%(dir,install_file)
# Only update if we have an install file
if not 'None' in module:
print(("Updating %s") % module)
use_module(module, 2)
if os.path.isfile(discover_module_filename(prompt[1])):
counter = 1
if counter == 1:
while 1:
try:
module = use_module(prompt[1], "0")
if "use " in module:
prompt = module.split(" ")
else: break
except Exception: break
if counter == 0:
print_error("Module name was not found, try retyping it again.")
# if blanks are used
if prompt == "":
base_counter = 1
if base_counter == 0:
print_warning("Command was not found, try help or ? for more information.")
# start the main loop
def mainloop():
while 1:
# set title
set_title("The PenTesters Framework (PTF) v%s" % grab_version)
try:
prompt = input(bcolors.BOLD + "ptf" + bcolors.ENDC + "> ")
except EOFError:
prompt = "quit"
print("")
handle_prompt(prompt)
| true | true |
f72e9d1be433245d491d2fe54afdb73351728508 | 3,862 | py | Python | tlidb/TLiDB/datasets/clinc150_dataset.py | alon-albalak/TLiDB | 4f3524a3bbe7580e417dd884c4dc8751bdaf8855 | [
"MIT"
] | null | null | null | tlidb/TLiDB/datasets/clinc150_dataset.py | alon-albalak/TLiDB | 4f3524a3bbe7580e417dd884c4dc8751bdaf8855 | [
"MIT"
] | null | null | null | tlidb/TLiDB/datasets/clinc150_dataset.py | alon-albalak/TLiDB | 4f3524a3bbe7580e417dd884c4dc8751bdaf8855 | [
"MIT"
] | null | null | null | from .TLiDB_dataset import TLiDB_Dataset
from tlidb.TLiDB.metrics.all_metrics import Accuracy
class clinc150_dataset(TLiDB_Dataset):
"""
CLINC150 dataset
This is the full dataset from https://github.com/clinc/oos-eval
Input (x):
- text (str): Text utterance
Target (y):
- label (list): List of [Domain, Intent] labels
Metadata:
- domain (str): Domain of the utterance
"""
_dataset_name = "clinc150"
_tasks = ["intent_detection"]
_url = "https://drive.google.com/uc?export=download&id=1dG6KXQ6L7xpbnWmhW9Xo3vPSfYstk43E"
def __init__(self, task, dataset_folder, model_type, split=None):
assert task in self._tasks, f"{task} is not a valid task for {self._dataset_name}"
super().__init__(self.dataset_name, task, model_type, dataset_folder=dataset_folder)
# initialize task data and metadata
categories = [
"auto and commute","banking","credit cards","home",
"kitchen and dining","meta","small talk","travel",
"utility","work"
]
self._input_array = []
self._y_array = []
self._metadata_fields = ["domains"]
self._metadata_array = [[] for _ in self._metadata_fields]
# convert labels to human readable
labels = [label.replace("_"," ") for label in self.task_labels]
formatted_labels = []
for label in labels:
for c in categories:
if c == label[:len(c)]:
formatted_label = c+":"+label[len(c):]
formatted_labels.append(formatted_label)
self.task_labels = formatted_labels
for datum in self.dataset['data']:
if split and datum['dialogue_metadata']['original_data_partition'] != split:
continue
utterance = datum['dialogue'][0]
domain = utterance['intent_detection']['domain']
intent = utterance['intent_detection']['intent']
self._input_array.append(utterance['utterance'])
self._y_array.append([domain, intent])
self.get_metadata_field("domains").append(domain)
self._num_classes = len(self.task_labels)
self._y_size = len(self._y_array)
def get_input(self, idx):
return self._input_array[idx]
def get_metadata(self, idx):
return {
"domains": self.get_metadata_field("domains")[idx],
}
def _collate_encoder(self, batch):
X,y, metadata = [], [], {}
for item in batch:
X.append(item[0])
y.append(f"{item[1][0].replace('_',' ')}: {item[1][1].replace('_',' ')}")
for k, v in item[2].items():
if k not in metadata:
metadata[k] = []
metadata[k].append(v)
return X,y, metadata
def _collate_decoder(self, batch):
X,y, metadata = [], [], {}
for item in batch:
X.append(item[0])
y.append(f"{item[1][0].replace('_',' ')}: {item[1][1].replace('_',' ')}")
for k, v in item[2].items():
if k not in metadata:
metadata[k] = []
metadata[k].append(v)
labels = self.task_labels
if labels:
metadata['labels'] = labels
return X,y, metadata
def _collate_encoderdecoder(self, batch):
X,y, metadata = [], [], {}
for item in batch:
X.append(item[0])
y.append(f"{item[1][0].replace('_',' ')}: {item[1][1].replace('_',' ')}")
for k, v in item[2].items():
if k not in metadata:
metadata[k] = []
metadata[k].append(v)
labels = self.task_labels
if labels:
metadata['labels'] = labels
return X,y, metadata | 36.780952 | 93 | 0.551787 | from .TLiDB_dataset import TLiDB_Dataset
from tlidb.TLiDB.metrics.all_metrics import Accuracy
class clinc150_dataset(TLiDB_Dataset):
_dataset_name = "clinc150"
_tasks = ["intent_detection"]
_url = "https://drive.google.com/uc?export=download&id=1dG6KXQ6L7xpbnWmhW9Xo3vPSfYstk43E"
def __init__(self, task, dataset_folder, model_type, split=None):
assert task in self._tasks, f"{task} is not a valid task for {self._dataset_name}"
super().__init__(self.dataset_name, task, model_type, dataset_folder=dataset_folder)
categories = [
"auto and commute","banking","credit cards","home",
"kitchen and dining","meta","small talk","travel",
"utility","work"
]
self._input_array = []
self._y_array = []
self._metadata_fields = ["domains"]
self._metadata_array = [[] for _ in self._metadata_fields]
labels = [label.replace("_"," ") for label in self.task_labels]
formatted_labels = []
for label in labels:
for c in categories:
if c == label[:len(c)]:
formatted_label = c+":"+label[len(c):]
formatted_labels.append(formatted_label)
self.task_labels = formatted_labels
for datum in self.dataset['data']:
if split and datum['dialogue_metadata']['original_data_partition'] != split:
continue
utterance = datum['dialogue'][0]
domain = utterance['intent_detection']['domain']
intent = utterance['intent_detection']['intent']
self._input_array.append(utterance['utterance'])
self._y_array.append([domain, intent])
self.get_metadata_field("domains").append(domain)
self._num_classes = len(self.task_labels)
self._y_size = len(self._y_array)
def get_input(self, idx):
return self._input_array[idx]
def get_metadata(self, idx):
return {
"domains": self.get_metadata_field("domains")[idx],
}
def _collate_encoder(self, batch):
X,y, metadata = [], [], {}
for item in batch:
X.append(item[0])
y.append(f"{item[1][0].replace('_',' ')}: {item[1][1].replace('_',' ')}")
for k, v in item[2].items():
if k not in metadata:
metadata[k] = []
metadata[k].append(v)
return X,y, metadata
def _collate_decoder(self, batch):
X,y, metadata = [], [], {}
for item in batch:
X.append(item[0])
y.append(f"{item[1][0].replace('_',' ')}: {item[1][1].replace('_',' ')}")
for k, v in item[2].items():
if k not in metadata:
metadata[k] = []
metadata[k].append(v)
labels = self.task_labels
if labels:
metadata['labels'] = labels
return X,y, metadata
def _collate_encoderdecoder(self, batch):
X,y, metadata = [], [], {}
for item in batch:
X.append(item[0])
y.append(f"{item[1][0].replace('_',' ')}: {item[1][1].replace('_',' ')}")
for k, v in item[2].items():
if k not in metadata:
metadata[k] = []
metadata[k].append(v)
labels = self.task_labels
if labels:
metadata['labels'] = labels
return X,y, metadata | true | true |
f72e9dda78207301c3e7659b1baf989a356f877b | 9,707 | py | Python | aesara/tensor/nnet/conv3d2d.py | anirudhacharya/Theano-PyMC | 55f54243cf88397b032ebc7121d1090ee91aea7d | [
"BSD-3-Clause"
] | null | null | null | aesara/tensor/nnet/conv3d2d.py | anirudhacharya/Theano-PyMC | 55f54243cf88397b032ebc7121d1090ee91aea7d | [
"BSD-3-Clause"
] | null | null | null | aesara/tensor/nnet/conv3d2d.py | anirudhacharya/Theano-PyMC | 55f54243cf88397b032ebc7121d1090ee91aea7d | [
"BSD-3-Clause"
] | null | null | null | import aesara
from aesara import tensor as at
from aesara.gradient import DisconnectedType
from aesara.graph.basic import Apply
from aesara.graph.op import Op
from aesara.graph.opt import TopoOptimizer, copy_stack_trace, local_optimizer
def get_diagonal_subtensor_view(x, i0, i1):
"""
Helper function for DiagonalSubtensor and IncDiagonalSubtensor.
Notes
-----
It returns a partial view of x, not a partial copy.
"""
# We have to cast i0 and i0 to int because python
# do not support indexing with 0-dim, 'int*' ndarrays.
i0 = int(i0)
i1 = int(i1)
if x.shape[i0] < x.shape[i1]:
raise NotImplementedError("is this allowed?")
idx = [slice(None)] * x.ndim
idx[i0] = slice(x.shape[i1] - 1, None, None)
xview = x.__getitem__(tuple(idx))
strides = list(xview.strides)
if x.shape[i1] != 1:
strides[i1] -= strides[i0]
xview.strides = strides
return xview
class DiagonalSubtensor(Op):
"""
Return a form a nd diagonal subtensor.
Parameters
----------
x
n-d tensor
i0
Axis index in x
i1
Axis index in x
Notes
-----
Work on the GPU.
Extended summary
----------------
``x`` is some n-dimensional tensor, but this Op only deals with a
matrix-shaped slice, using axes i0 and i1. Without loss of
generality, suppose that ``i0`` picks out our ``row`` dimension,
and i1 the ``column`` dimension.
So the relevant part of ``x`` is some matrix ``u``. Suppose it has 7 rows
and 4 columns::
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
The view returned by this function is also a matrix. It's a thick,
diagonal ``stripe`` across u that discards the lower left triangle
and the upper right triangle:
[ x 0 0 0 ]
[ x x 0 0 ]
[ x x x 0 ]
[ 0 x x x ]
[ 0 0 x x ]
[ 0 0 0 x ]
In this case the return value would be this view of shape 3x4. The
returned view has the same number of dimensions as the input
``x``, and the only difference is that the shape along dimension
``i0`` has been reduced by ``shape[i1] - 1`` because of the
triangles that got chopped out.
The NotImplementedError is meant to catch the case where shape[i0]
is too small for the stripe to reach across the matrix, in which
case it's not clear what this function should do. Maybe always
raise an error. I'd look back to the call site in the Conv3D to
see what's necessary at that point.
"""
__props__ = ("inplace",)
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return f"{self.__class__.__name__}"
def __init__(self, inplace=False):
self.inplace = inplace
if inplace:
self.view_map = {0: [0]}
def make_node(self, x, i0, i1):
_i0 = at.as_tensor_variable(i0)
_i1 = at.as_tensor_variable(i1)
return Apply(self, [x, _i0, _i1], [x.type()])
def perform(self, node, inputs, output_storage):
xview = get_diagonal_subtensor_view(*inputs)
if self.inplace:
output_storage[0][0] = xview
else:
output_storage[0][0] = xview.copy()
def grad(self, inputs, g_outputs):
z = at.zeros_like(inputs[0])
gx = inc_diagonal_subtensor(z, inputs[1], inputs[2], g_outputs[0])
return [gx, DisconnectedType()(), DisconnectedType()()]
def connection_pattern(self, node):
rval = [[True], [False], [False]]
return rval
diagonal_subtensor = DiagonalSubtensor(False)
class IncDiagonalSubtensor(Op):
"""
The gradient of DiagonalSubtensor.
"""
__props__ = ("inplace",)
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return f"{self.__class__.__name__}"
def __init__(self, inplace=False):
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
def make_node(self, x, i0, i1, amt):
_i0 = at.as_tensor_variable(i0)
_i1 = at.as_tensor_variable(i1)
return Apply(self, [x, _i0, _i1, amt], [x.type()])
def perform(self, node, inputs, output_storage):
x, i0, i1, amt = inputs
if not self.inplace:
x = x.copy()
xview = get_diagonal_subtensor_view(x, i0, i1)
xview += amt
output_storage[0][0] = x
def grad(self, inputs, g_outputs):
x, i0, i1, amt = inputs
gy = g_outputs[0]
return [
gy,
DisconnectedType()(),
DisconnectedType()(),
diagonal_subtensor(gy, i0, i1),
]
def connection_pattern(self, node):
rval = [[True], [False], [False], [True]]
return rval
inc_diagonal_subtensor = IncDiagonalSubtensor(False)
def conv3d(
signals, filters, signals_shape=None, filters_shape=None, border_mode="valid"
):
"""
Convolve spatio-temporal filters with a movie.
It flips the filters.
Parameters
----------
signals
Timeseries of images whose pixels have color channels.
Shape: [Ns, Ts, C, Hs, Ws].
filters
Spatio-temporal filters.
Shape: [Nf, Tf, C, Hf, Wf].
signals_shape
None or a tuple/list with the shape of signals.
filters_shape
None or a tuple/list with the shape of filters.
border_mode
One of 'valid', 'full' or 'half'.
Notes
-----
Another way to define signals: (batch, time, in channel, row, column)
Another way to define filters: (out channel,time,in channel, row, column)
For the GPU, use nnet.conv3d.
See Also
--------
Someone made a script that shows how to swap the axes between
both 3d convolution implementations in Aesara. See the last
`attachment <https://groups.google.com/d/msg/aesara-users/1S9_bZgHxVw/0cQR9a4riFUJ>`_
"""
if isinstance(border_mode, str):
border_mode = (border_mode, border_mode, border_mode)
if signals_shape is None:
_signals_shape_5d = signals.shape
else:
_signals_shape_5d = signals_shape
if filters_shape is None:
_filters_shape_5d = filters.shape
else:
_filters_shape_5d = filters_shape
Ns, Ts, C, Hs, Ws = _signals_shape_5d
Nf, Tf, C, Hf, Wf = _filters_shape_5d
_signals_shape_4d = (Ns * Ts, C, Hs, Ws)
_filters_shape_4d = (Nf * Tf, C, Hf, Wf)
if border_mode[1] != border_mode[2]:
raise NotImplementedError("height and width bordermodes must match")
conv2d_signal_shape = _signals_shape_4d
conv2d_filter_shape = _filters_shape_4d
if signals_shape is None:
conv2d_signal_shape = None
if filters_shape is None:
conv2d_filter_shape = None
out_4d = aesara.tensor.nnet.conv2d(
signals.reshape(_signals_shape_4d),
filters.reshape(_filters_shape_4d),
input_shape=conv2d_signal_shape,
filter_shape=conv2d_filter_shape,
border_mode=border_mode[1],
) # ignoring border_mode[2]
# compute the intended output size
if border_mode[1] == "valid":
Hout = Hs - Hf + 1
Wout = Ws - Wf + 1
elif border_mode[1] == "full":
Hout = Hs + Hf - 1
Wout = Ws + Wf - 1
elif border_mode[1] == "half":
Hout = Hs - (Hf % 2) + 1
Wout = Ws - (Wf % 2) + 1
elif border_mode[1] == "same":
raise NotImplementedError()
else:
raise ValueError("invalid border mode", border_mode[1])
# reshape the temporary output to restore its original size
out_tmp = out_4d.reshape((Ns, Ts, Nf, Tf, Hout, Wout))
# now sum out along the Tf to get the output
# but we have to sum on a diagonal through the Tf and Ts submatrix.
if Tf == 1:
# for Tf==1, no sum along Tf, the Ts-axis of the output is unchanged!
out_5d = out_tmp.reshape((Ns, Ts, Nf, Hout, Wout))
else:
# for some types of convolution, pad out_tmp with zeros
if border_mode[0] == "valid":
Tpad = 0
elif border_mode[0] == "full":
Tpad = Tf - 1
elif border_mode[0] == "half":
Tpad = Tf // 2
elif border_mode[0] == "same":
raise NotImplementedError()
else:
raise ValueError("invalid border mode", border_mode[0])
if Tpad == 0:
out_5d = diagonal_subtensor(out_tmp, 1, 3).sum(axis=3)
else:
# pad out_tmp with zeros before summing over the diagonal
out_tmp_padded = at.zeros(
dtype=out_tmp.dtype, shape=(Ns, Ts + 2 * Tpad, Nf, Tf, Hout, Wout)
)
out_tmp_padded = aesara.tensor.subtensor.set_subtensor(
out_tmp_padded[:, Tpad : (Ts + Tpad), :, :, :, :], out_tmp
)
out_5d = diagonal_subtensor(out_tmp_padded, 1, 3).sum(axis=3)
return out_5d
@local_optimizer([DiagonalSubtensor, IncDiagonalSubtensor])
def local_inplace_DiagonalSubtensor(fgraph, node):
"""Also work for IncDiagonalSubtensor."""
if (
isinstance(node.op, (DiagonalSubtensor, IncDiagonalSubtensor))
and not node.op.inplace
):
new_op = node.op.__class__(inplace=True)
new_node = new_op(*node.inputs)
copy_stack_trace(node.outputs[0], new_node)
return [new_node]
return False
aesara.compile.optdb.register(
"local_inplace_DiagonalSubtensor",
TopoOptimizer(
local_inplace_DiagonalSubtensor, failure_callback=TopoOptimizer.warn_inplace
),
60,
"fast_run",
"inplace",
)
| 29.685015 | 89 | 0.609457 | import aesara
from aesara import tensor as at
from aesara.gradient import DisconnectedType
from aesara.graph.basic import Apply
from aesara.graph.op import Op
from aesara.graph.opt import TopoOptimizer, copy_stack_trace, local_optimizer
def get_diagonal_subtensor_view(x, i0, i1):
i0 = int(i0)
i1 = int(i1)
if x.shape[i0] < x.shape[i1]:
raise NotImplementedError("is this allowed?")
idx = [slice(None)] * x.ndim
idx[i0] = slice(x.shape[i1] - 1, None, None)
xview = x.__getitem__(tuple(idx))
strides = list(xview.strides)
if x.shape[i1] != 1:
strides[i1] -= strides[i0]
xview.strides = strides
return xview
class DiagonalSubtensor(Op):
__props__ = ("inplace",)
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return f"{self.__class__.__name__}"
def __init__(self, inplace=False):
self.inplace = inplace
if inplace:
self.view_map = {0: [0]}
def make_node(self, x, i0, i1):
_i0 = at.as_tensor_variable(i0)
_i1 = at.as_tensor_variable(i1)
return Apply(self, [x, _i0, _i1], [x.type()])
def perform(self, node, inputs, output_storage):
xview = get_diagonal_subtensor_view(*inputs)
if self.inplace:
output_storage[0][0] = xview
else:
output_storage[0][0] = xview.copy()
def grad(self, inputs, g_outputs):
z = at.zeros_like(inputs[0])
gx = inc_diagonal_subtensor(z, inputs[1], inputs[2], g_outputs[0])
return [gx, DisconnectedType()(), DisconnectedType()()]
def connection_pattern(self, node):
rval = [[True], [False], [False]]
return rval
diagonal_subtensor = DiagonalSubtensor(False)
class IncDiagonalSubtensor(Op):
__props__ = ("inplace",)
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return f"{self.__class__.__name__}"
def __init__(self, inplace=False):
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
def make_node(self, x, i0, i1, amt):
_i0 = at.as_tensor_variable(i0)
_i1 = at.as_tensor_variable(i1)
return Apply(self, [x, _i0, _i1, amt], [x.type()])
def perform(self, node, inputs, output_storage):
x, i0, i1, amt = inputs
if not self.inplace:
x = x.copy()
xview = get_diagonal_subtensor_view(x, i0, i1)
xview += amt
output_storage[0][0] = x
def grad(self, inputs, g_outputs):
x, i0, i1, amt = inputs
gy = g_outputs[0]
return [
gy,
DisconnectedType()(),
DisconnectedType()(),
diagonal_subtensor(gy, i0, i1),
]
def connection_pattern(self, node):
rval = [[True], [False], [False], [True]]
return rval
inc_diagonal_subtensor = IncDiagonalSubtensor(False)
def conv3d(
signals, filters, signals_shape=None, filters_shape=None, border_mode="valid"
):
if isinstance(border_mode, str):
border_mode = (border_mode, border_mode, border_mode)
if signals_shape is None:
_signals_shape_5d = signals.shape
else:
_signals_shape_5d = signals_shape
if filters_shape is None:
_filters_shape_5d = filters.shape
else:
_filters_shape_5d = filters_shape
Ns, Ts, C, Hs, Ws = _signals_shape_5d
Nf, Tf, C, Hf, Wf = _filters_shape_5d
_signals_shape_4d = (Ns * Ts, C, Hs, Ws)
_filters_shape_4d = (Nf * Tf, C, Hf, Wf)
if border_mode[1] != border_mode[2]:
raise NotImplementedError("height and width bordermodes must match")
conv2d_signal_shape = _signals_shape_4d
conv2d_filter_shape = _filters_shape_4d
if signals_shape is None:
conv2d_signal_shape = None
if filters_shape is None:
conv2d_filter_shape = None
out_4d = aesara.tensor.nnet.conv2d(
signals.reshape(_signals_shape_4d),
filters.reshape(_filters_shape_4d),
input_shape=conv2d_signal_shape,
filter_shape=conv2d_filter_shape,
border_mode=border_mode[1],
)
if border_mode[1] == "valid":
Hout = Hs - Hf + 1
Wout = Ws - Wf + 1
elif border_mode[1] == "full":
Hout = Hs + Hf - 1
Wout = Ws + Wf - 1
elif border_mode[1] == "half":
Hout = Hs - (Hf % 2) + 1
Wout = Ws - (Wf % 2) + 1
elif border_mode[1] == "same":
raise NotImplementedError()
else:
raise ValueError("invalid border mode", border_mode[1])
out_tmp = out_4d.reshape((Ns, Ts, Nf, Tf, Hout, Wout))
if Tf == 1:
out_5d = out_tmp.reshape((Ns, Ts, Nf, Hout, Wout))
else:
if border_mode[0] == "valid":
Tpad = 0
elif border_mode[0] == "full":
Tpad = Tf - 1
elif border_mode[0] == "half":
Tpad = Tf // 2
elif border_mode[0] == "same":
raise NotImplementedError()
else:
raise ValueError("invalid border mode", border_mode[0])
if Tpad == 0:
out_5d = diagonal_subtensor(out_tmp, 1, 3).sum(axis=3)
else:
out_tmp_padded = at.zeros(
dtype=out_tmp.dtype, shape=(Ns, Ts + 2 * Tpad, Nf, Tf, Hout, Wout)
)
out_tmp_padded = aesara.tensor.subtensor.set_subtensor(
out_tmp_padded[:, Tpad : (Ts + Tpad), :, :, :, :], out_tmp
)
out_5d = diagonal_subtensor(out_tmp_padded, 1, 3).sum(axis=3)
return out_5d
@local_optimizer([DiagonalSubtensor, IncDiagonalSubtensor])
def local_inplace_DiagonalSubtensor(fgraph, node):
if (
isinstance(node.op, (DiagonalSubtensor, IncDiagonalSubtensor))
and not node.op.inplace
):
new_op = node.op.__class__(inplace=True)
new_node = new_op(*node.inputs)
copy_stack_trace(node.outputs[0], new_node)
return [new_node]
return False
aesara.compile.optdb.register(
"local_inplace_DiagonalSubtensor",
TopoOptimizer(
local_inplace_DiagonalSubtensor, failure_callback=TopoOptimizer.warn_inplace
),
60,
"fast_run",
"inplace",
)
| true | true |
f72e9f4df3baeedd0f0a991769dc963a8cee6f32 | 897 | py | Python | python/tvm/driver/tvmc/__main__.py | mwillsey/incubator-tvm | e02dc69fef294eb73dd65d18949ed9e108f60cda | [
"Apache-2.0"
] | 4 | 2019-05-08T04:46:07.000Z | 2019-11-11T19:43:04.000Z | python/tvm/driver/tvmc/__main__.py | mwillsey/incubator-tvm | e02dc69fef294eb73dd65d18949ed9e108f60cda | [
"Apache-2.0"
] | 3 | 2020-04-20T15:37:55.000Z | 2020-05-13T05:34:28.000Z | python/tvm/driver/tvmc/__main__.py | mwillsey/incubator-tvm | e02dc69fef294eb73dd65d18949ed9e108f60cda | [
"Apache-2.0"
] | 2 | 2019-08-08T01:48:03.000Z | 2019-09-27T06:49:16.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMC - TVM driver command-line interface
"""
from .main import main
if __name__ == "__main__":
main()
| 35.88 | 62 | 0.758082 |
from .main import main
if __name__ == "__main__":
main()
| true | true |
f72e9fae2a5cbb0065907b84dcb83ecc498baec3 | 161 | py | Python | delicious_treat/__init__.py | briggySmalls/delicious-treat | 92a4eb3ba56262b4b167e06dce52a4cb4b1bb5fb | [
"MIT"
] | null | null | null | delicious_treat/__init__.py | briggySmalls/delicious-treat | 92a4eb3ba56262b4b167e06dce52a4cb4b1bb5fb | [
"MIT"
] | 3 | 2020-03-24T17:57:40.000Z | 2021-02-02T22:23:51.000Z | delicious_treat/__init__.py | briggySmalls/delicious-treat | 92a4eb3ba56262b4b167e06dce52a4cb4b1bb5fb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for delicious treat."""
__author__ = """Sam Briggs"""
__email__ = 'briggySmalls90@gmail.com'
__version__ = '0.1.0'
| 23 | 44 | 0.658385 |
__author__ = """Sam Briggs"""
__email__ = 'briggySmalls90@gmail.com'
__version__ = '0.1.0'
| true | true |
f72e9feb8ce2f78432251248f5f7ea13e9f2b929 | 10,321 | py | Python | salt/states/csf.py | springborland/salt | bee85e477d57e9a171884e54fefb9a59d0835ed0 | [
"Apache-2.0"
] | 1 | 2020-04-09T03:25:10.000Z | 2020-04-09T03:25:10.000Z | salt/states/csf.py | springborland/salt | bee85e477d57e9a171884e54fefb9a59d0835ed0 | [
"Apache-2.0"
] | null | null | null | salt/states/csf.py | springborland/salt | bee85e477d57e9a171884e54fefb9a59d0835ed0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
CSF Ip tables management
========================
:depends: - csf utility
:configuration: See http://download.configserver.com/csf/install.txt
for setup instructions.
.. code-block:: yaml
Simply allow/deny rules:
csf.rule_present:
ip: 1.2.3.4
method: allow
""" # pylint: disable=W0105
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Libs
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
return "csf"
def rule_present(
name,
method,
port=None,
proto="tcp",
direction="in",
port_origin="d",
ip_origin="s",
ttl=None,
comment="",
reload=False,
):
"""
Ensure iptable rule exists.
name
The ip address or CIDR for the rule.
method
The type of rule. Either 'allow' or 'deny'.
port
Optional port to be open or closed for the
iptables rule.
proto
The protocol. Either 'tcp', or 'udp'.
Only applicable if port is specified.
direction
The diretion of traffic to apply the rule to.
Either 'in', or 'out'. Only applicable if
port is specified.
port_origin
Specifies either the source or destination
port is relevant for this rule. Only applicable
if port is specified. Either 's', or 'd'.
ip_origin
Specifies whether the ip in this rule refers to
the source or destination ip. Either 's', or
'd'. Only applicable if port is specified.
ttl
How long the rule should exist. If supplied,
`csf.tempallow()` or csf.tempdeny()` are used.
comment
An optional comment to appear after the rule
as a #comment .
reload
Reload the csf service after applying this rule.
Default false.
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Rule already exists.",
}
ip = name
# Check if rule is already present
exists = __salt__["csf.exists"](
method=method,
ip=ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
ttl=ttl,
comment=comment,
)
if exists:
return ret
else:
if ttl:
method = "temp{0}".format(method)
func = __salt__["csf.{0}".format(method)]
rule = func(
ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
ttl=ttl,
comment=comment,
)
if rule:
comment = "Rule has been added."
if reload:
if __salt__["csf.reload"]():
comment += " Csf reloaded."
else:
comment += " Unable to reload csf."
ret["result"] = False
ret["comment"] = comment
ret["changes"]["Rule"] = "Created"
return ret
def rule_absent(
name,
method,
port=None,
proto="tcp",
direction="in",
port_origin="d",
ip_origin="s",
ttl=None,
reload=False,
):
"""
Ensure iptable is not present.
name
The ip address or CIDR for the rule.
method
The type of rule. Either 'allow' or 'deny'.
port
Optional port to be open or closed for the
iptables rule.
proto
The protocol. Either 'tcp', 'udp'.
Only applicable if port is specified.
direction
The diretion of traffic to apply the rule to.
Either 'in', or 'out'. Only applicable if
port is specified.
port_origin
Specifies either the source or destination
port is relevant for this rule. Only applicable
if port is specified. Either 's', or 'd'.
ip_origin
Specifies whether the ip in this rule refers to
the source or destination ip. Either 's', or
'd'. Only applicable if port is specified.
ttl
How long the rule should exist. If supplied,
`csf.tempallow()` or csf.tempdeny()` are used.
reload
Reload the csf service after applying this rule.
Default false.
"""
ip = name
ret = {"name": name, "changes": {}, "result": True, "comment": "Rule not present."}
exists = __salt__["csf.exists"](
method,
ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
ttl=ttl,
)
if not exists:
return ret
else:
rule = __salt__["csf.remove_rule"](
method=method,
ip=ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
comment="",
ttl=ttl,
)
if rule:
comment = "Rule has been removed."
if reload:
if __salt__["csf.reload"]():
comment += " Csf reloaded."
else:
comment += "Csf unable to be reloaded."
ret["comment"] = comment
ret["changes"]["Rule"] = "Removed"
return ret
def ports_open(name, ports, proto="tcp", direction="in"):
"""
Ensure ports are open for a protocol, in a direction.
e.g. - proto='tcp', direction='in' would set the values
for TCP_IN in the csf.conf file.
ports
A list of ports that should be open.
proto
The protocol. May be one of 'tcp', 'udp',
'tcp6', or 'udp6'.
direction
Choose 'in', 'out', or both to indicate the port
should be opened for inbound traffic, outbound
traffic, or both.
"""
ports = list(six.moves.map(six.text_type, ports))
diff = False
ret = {
"name": ",".join(ports),
"changes": {},
"result": True,
"comment": "Ports open.",
}
current_ports = __salt__["csf.get_ports"](proto=proto, direction=direction)
direction = direction.upper()
directions = __salt__["csf.build_directions"](direction)
for direction in directions:
log.trace("current_ports[direction]: %s", current_ports[direction])
log.trace("ports: %s", ports)
if current_ports[direction] != ports:
diff = True
if diff:
result = __salt__["csf.allow_ports"](ports, proto=proto, direction=direction)
ret["changes"]["Ports"] = "Changed"
ret["comment"] = result
return ret
def nics_skip(name, nics, ipv6):
"""
Alias for :mod:`csf.nics_skipped <salt.states.csf.nics_skipped>`
"""
return nics_skipped(name, nics=nics, ipv6=ipv6)
def nics_skipped(name, nics, ipv6=False):
"""
name
Meaningless arg, but required for state.
nics
A list of nics to skip.
ipv6
Boolean. Set to true if you want to skip
the ipv6 interface. Default false (ipv4).
"""
ret = {
"name": ",".join(nics),
"changes": {},
"result": True,
"comment": "NICs skipped.",
}
current_skipped_nics = __salt__["csf.get_skipped_nics"](ipv6=ipv6)
if nics == current_skipped_nics:
return ret
result = __salt__["csf.skip_nics"](nics, ipv6=ipv6)
ret["changes"]["Skipped NICs"] = "Changed"
return ret
def testing_on(name, reload=False):
"""
Ensure testing mode is enabled in csf.
reload
Reload CSF after changing the testing status.
Default false.
"""
ret = {
"name": "testing mode",
"changes": {},
"result": True,
"comment": "Testing mode already ON.",
}
result = {}
testing = __salt__["csf.get_testing_status"]()
if int(testing) == 1:
return ret
enable = __salt__["csf.enable_testing_mode"]()
if enable:
comment = "Csf testing mode enabled"
if reload:
if __salt__["csf.reload"]():
comment += " and csf reloaded."
ret["changes"]["Testing Mode"] = "on"
ret["comment"] = result
return ret
def testing_off(name, reload=False):
"""
Ensure testing mode is enabled in csf.
reload
Reload CSF after changing the testing status.
Default false.
"""
ret = {
"name": "testing mode",
"changes": {},
"result": True,
"comment": "Testing mode already OFF.",
}
result = {}
testing = __salt__["csf.get_testing_status"]()
if int(testing) == 0:
return ret
disable = __salt__["csf.disable_testing_mode"]()
if disable:
comment = "Csf testing mode disabled"
if reload:
if __salt__["csf.reload"]():
comment += " and csf reloaded."
ret["changes"]["Testing Mode"] = "off"
ret["comment"] = comment
return ret
def option_present(name, value, reload=False):
"""
Ensure the state of a particular option/setting in csf.
name
The option name in csf.conf
value
The value it should be set to.
reload
Boolean. If set to true, csf will be reloaded after.
"""
ret = {
"name": "testing mode",
"changes": {},
"result": True,
"comment": "Option already present.",
}
option = name
current_option = __salt__["csf.get_option"](option)
if current_option:
l = __salt__["csf.split_option"](current_option)
option_value = l[1]
if '"{0}"'.format(value) == option_value:
return ret
else:
result = __salt__["csf.set_option"](option, value)
ret["comment"] = "Option modified."
ret["changes"]["Option"] = "Changed"
else:
result = __salt__["file.append"](
"/etc/csf/csf.conf", args='{0} = "{1}"'.format(option, value)
)
ret["comment"] = "Option not present. Appended to csf.conf"
ret["changes"]["Option"] = "Changed."
if reload:
if __salt__["csf.reload"]():
ret["comment"] += ". Csf reloaded."
else:
ret["comment"] += ". Csf failed to reload."
ret["result"] = False
return ret
| 24.990315 | 87 | 0.560314 |
from __future__ import absolute_import, print_function, unicode_literals
import logging
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
return "csf"
def rule_present(
name,
method,
port=None,
proto="tcp",
direction="in",
port_origin="d",
ip_origin="s",
ttl=None,
comment="",
reload=False,
):
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Rule already exists.",
}
ip = name
exists = __salt__["csf.exists"](
method=method,
ip=ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
ttl=ttl,
comment=comment,
)
if exists:
return ret
else:
if ttl:
method = "temp{0}".format(method)
func = __salt__["csf.{0}".format(method)]
rule = func(
ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
ttl=ttl,
comment=comment,
)
if rule:
comment = "Rule has been added."
if reload:
if __salt__["csf.reload"]():
comment += " Csf reloaded."
else:
comment += " Unable to reload csf."
ret["result"] = False
ret["comment"] = comment
ret["changes"]["Rule"] = "Created"
return ret
def rule_absent(
name,
method,
port=None,
proto="tcp",
direction="in",
port_origin="d",
ip_origin="s",
ttl=None,
reload=False,
):
ip = name
ret = {"name": name, "changes": {}, "result": True, "comment": "Rule not present."}
exists = __salt__["csf.exists"](
method,
ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
ttl=ttl,
)
if not exists:
return ret
else:
rule = __salt__["csf.remove_rule"](
method=method,
ip=ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
comment="",
ttl=ttl,
)
if rule:
comment = "Rule has been removed."
if reload:
if __salt__["csf.reload"]():
comment += " Csf reloaded."
else:
comment += "Csf unable to be reloaded."
ret["comment"] = comment
ret["changes"]["Rule"] = "Removed"
return ret
def ports_open(name, ports, proto="tcp", direction="in"):
ports = list(six.moves.map(six.text_type, ports))
diff = False
ret = {
"name": ",".join(ports),
"changes": {},
"result": True,
"comment": "Ports open.",
}
current_ports = __salt__["csf.get_ports"](proto=proto, direction=direction)
direction = direction.upper()
directions = __salt__["csf.build_directions"](direction)
for direction in directions:
log.trace("current_ports[direction]: %s", current_ports[direction])
log.trace("ports: %s", ports)
if current_ports[direction] != ports:
diff = True
if diff:
result = __salt__["csf.allow_ports"](ports, proto=proto, direction=direction)
ret["changes"]["Ports"] = "Changed"
ret["comment"] = result
return ret
def nics_skip(name, nics, ipv6):
return nics_skipped(name, nics=nics, ipv6=ipv6)
def nics_skipped(name, nics, ipv6=False):
ret = {
"name": ",".join(nics),
"changes": {},
"result": True,
"comment": "NICs skipped.",
}
current_skipped_nics = __salt__["csf.get_skipped_nics"](ipv6=ipv6)
if nics == current_skipped_nics:
return ret
result = __salt__["csf.skip_nics"](nics, ipv6=ipv6)
ret["changes"]["Skipped NICs"] = "Changed"
return ret
def testing_on(name, reload=False):
ret = {
"name": "testing mode",
"changes": {},
"result": True,
"comment": "Testing mode already ON.",
}
result = {}
testing = __salt__["csf.get_testing_status"]()
if int(testing) == 1:
return ret
enable = __salt__["csf.enable_testing_mode"]()
if enable:
comment = "Csf testing mode enabled"
if reload:
if __salt__["csf.reload"]():
comment += " and csf reloaded."
ret["changes"]["Testing Mode"] = "on"
ret["comment"] = result
return ret
def testing_off(name, reload=False):
ret = {
"name": "testing mode",
"changes": {},
"result": True,
"comment": "Testing mode already OFF.",
}
result = {}
testing = __salt__["csf.get_testing_status"]()
if int(testing) == 0:
return ret
disable = __salt__["csf.disable_testing_mode"]()
if disable:
comment = "Csf testing mode disabled"
if reload:
if __salt__["csf.reload"]():
comment += " and csf reloaded."
ret["changes"]["Testing Mode"] = "off"
ret["comment"] = comment
return ret
def option_present(name, value, reload=False):
ret = {
"name": "testing mode",
"changes": {},
"result": True,
"comment": "Option already present.",
}
option = name
current_option = __salt__["csf.get_option"](option)
if current_option:
l = __salt__["csf.split_option"](current_option)
option_value = l[1]
if '"{0}"'.format(value) == option_value:
return ret
else:
result = __salt__["csf.set_option"](option, value)
ret["comment"] = "Option modified."
ret["changes"]["Option"] = "Changed"
else:
result = __salt__["file.append"](
"/etc/csf/csf.conf", args='{0} = "{1}"'.format(option, value)
)
ret["comment"] = "Option not present. Appended to csf.conf"
ret["changes"]["Option"] = "Changed."
if reload:
if __salt__["csf.reload"]():
ret["comment"] += ". Csf reloaded."
else:
ret["comment"] += ". Csf failed to reload."
ret["result"] = False
return ret
| true | true |
f72ea02cfd7d58ff08cac71df6447397276916de | 1,902 | py | Python | tests/test_text.py | ankurRakuten/WebHelper | 5e36d75161588260371978b8a42f4adf1f0ec252 | [
"BSD-3-Clause"
] | null | null | null | tests/test_text.py | ankurRakuten/WebHelper | 5e36d75161588260371978b8a42f4adf1f0ec252 | [
"BSD-3-Clause"
] | null | null | null | tests/test_text.py | ankurRakuten/WebHelper | 5e36d75161588260371978b8a42f4adf1f0ec252 | [
"BSD-3-Clause"
] | 1 | 2019-07-31T11:00:05.000Z | 2019-07-31T11:00:05.000Z | # -*- coding: utf-8 -*-
from util import WebHelpersTestCase
import unittest
from nose.tools import eq_
from webhelpers.text import *
class TestTextHelper(WebHelpersTestCase):
def test_excerpt(self):
self.assertEqual("...lo my wo...",
excerpt("hello my world", "my", 3))
self.assertEqual("...is a beautiful morn...",
excerpt("This is a beautiful morning", "beautiful", 5))
self.assertEqual("This is a...",
excerpt("This is a beautiful morning", "this", 5))
self.assertEqual("...iful morning",
excerpt("This is a beautiful morning", "morning", 5))
self.assertEqual('',
excerpt("This is a beautiful morning", "day"))
def test_excerpt_with_regex(self):
self.assertEqual('...is a beautiful! mor...',
excerpt('This is a beautiful! morning', 'beautiful', 5))
self.assertEqual('...is a beautiful? mor...',
excerpt('This is a beautiful? morning', 'beautiful', 5))
def test_excerpt_with_utf8(self):
self.assertEqual("...fficiency could not be ...",
excerpt("That's why efficiency could not be helped", 'could', 8))
def test_truncate(self):
self.assertEqual("Hello World!", truncate("Hello World!", 12))
self.assertEqual("Hello Wor...", truncate("Hello World!!", 12))
self.assertEqual("Hello...", truncate("Hello World!!", 12, whole_word=True))
def test_strip_leading_whitespace(self):
s = " def fn(x):\n return x\n"
control = "def fn(x):\nreturn x\n"
eq_(control, strip_leading_whitespace(s))
# @@MO wrap_paragraphs untested.
def test_urlify(self):
s = "What is this? It is a car."
control = "What%20is%20this%3f%20It%20is%20a%20car."
| 38.816327 | 88 | 0.572555 |
from util import WebHelpersTestCase
import unittest
from nose.tools import eq_
from webhelpers.text import *
class TestTextHelper(WebHelpersTestCase):
def test_excerpt(self):
self.assertEqual("...lo my wo...",
excerpt("hello my world", "my", 3))
self.assertEqual("...is a beautiful morn...",
excerpt("This is a beautiful morning", "beautiful", 5))
self.assertEqual("This is a...",
excerpt("This is a beautiful morning", "this", 5))
self.assertEqual("...iful morning",
excerpt("This is a beautiful morning", "morning", 5))
self.assertEqual('',
excerpt("This is a beautiful morning", "day"))
def test_excerpt_with_regex(self):
self.assertEqual('...is a beautiful! mor...',
excerpt('This is a beautiful! morning', 'beautiful', 5))
self.assertEqual('...is a beautiful? mor...',
excerpt('This is a beautiful? morning', 'beautiful', 5))
def test_excerpt_with_utf8(self):
self.assertEqual("...fficiency could not be ...",
excerpt("That's why efficiency could not be helped", 'could', 8))
def test_truncate(self):
self.assertEqual("Hello World!", truncate("Hello World!", 12))
self.assertEqual("Hello Wor...", truncate("Hello World!!", 12))
self.assertEqual("Hello...", truncate("Hello World!!", 12, whole_word=True))
def test_strip_leading_whitespace(self):
s = " def fn(x):\n return x\n"
control = "def fn(x):\nreturn x\n"
eq_(control, strip_leading_whitespace(s))
# @@MO wrap_paragraphs untested.
def test_urlify(self):
s = "What is this? It is a car."
control = "What%20is%20this%3f%20It%20is%20a%20car."
| true | true |
f72ea0bd5ec69a1e378dcf4fa5f871d867ab1210 | 5,929 | py | Python | tensorflow_model_analysis/eval_saved_model/testutil.py | hakanhp/chanel | 6825b60e86c46daabb18f40f1e45d3de2ff8e983 | [
"Apache-2.0"
] | null | null | null | tensorflow_model_analysis/eval_saved_model/testutil.py | hakanhp/chanel | 6825b60e86c46daabb18f40f1e45d3de2ff8e983 | [
"Apache-2.0"
] | null | null | null | tensorflow_model_analysis/eval_saved_model/testutil.py | hakanhp/chanel | 6825b60e86c46daabb18f40f1e45d3de2ff8e983 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for writing tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tempfile
import tensorflow as tf
from tensorflow_model_analysis.types_compat import Dict, Iterable, Union, Sequence, Tuple
from tensorflow.core.example import example_pb2
class TensorflowModelAnalysisTest(tf.test.TestCase):
"""Test class that extends tf.test.TestCase with extra functionality."""
def setUp(self):
self.longMessage = True # pylint: disable=invalid-name
def _getTempDir(self):
return tempfile.mkdtemp()
def _makeExample(self, **kwargs):
"""Make a TensorFlow Example with the given fields.
The arguments can be singleton values, or a list of values, e.g.
_makeExample(age=3.0, fruits=['apples', 'pears', 'oranges']).
Empty lists are not allowed, since we won't be able to deduce the type.
Args:
**kwargs: Each key=value pair defines a field in the example to be
constructed. The name of the field will be key, and the value will be
value. The type will be deduced from the type of the value.
Returns:
TensorFlow.Example with the corresponding fields set to the corresponding
values.
Raises:
ValueError: One of the arguments was an empty list.
TypeError: One of the elements (or one of the elements in a list) had an
unsupported type.
"""
result = example_pb2.Example()
for key, value in kwargs.items():
if isinstance(value, float) or isinstance(value, int):
result.features.feature[key].float_list.value[:] = [value]
elif isinstance(value, str):
result.features.feature[key].bytes_list.value[:] = [value]
elif isinstance(value, list):
if len(value) == 0: # pylint: disable=g-explicit-length-test
raise ValueError('empty lists not allowed, but field %s was an empty '
'list' % key)
if isinstance(value[0], float) or isinstance(value[0], int):
result.features.feature[key].float_list.value[:] = value
elif isinstance(value[0], str):
result.features.feature[key].bytes_list.value[:] = value
else:
raise TypeError('field %s was a list, but the first element had '
'unknown type %s' % key, type(value[0]))
else:
raise TypeError('unrecognised type for field %s: type %s' %
(key, type(value)))
return result
def assertHasKeyWithValueAlmostEqual(self,
d,
key,
value,
places = 5):
self.assertIn(key, d)
self.assertAlmostEqual(d[key], value, places=places, msg='key %s' % key)
def assertDictElementsAlmostEqual(self,
got_values_dict,
expected_values_dict,
places = 5):
for key, expected_value in expected_values_dict.items():
self.assertHasKeyWithValueAlmostEqual(got_values_dict, key,
expected_value, places)
def assertDictMatrixRowsAlmostEqual(
self,
got_values_dict,
expected_values_dict,
places = 5):
"""Fails if got_values_dict does not match values in expected_values_dict.
For each entry, expected_values_dict provides the row index and the values
of that row to be compared to the bucketing result in got_values_dict. For
example:
got_values_dict={'key', [[1,2,3],[4,5,6],[7,8,9]]}
you can check the first and last row of got_values_dict[key] by setting
expected_values_dict={'key', [(0,[1,2,3]), (2,[7,8,9])]}
Args:
got_values_dict: The dict got, where each value represents a full
bucketing result.
expected_values_dict: The expected dict. It may contain a subset of keys
in got_values_dict. The value is of type "Iterable[Tuple[int,
Iterable[scalar]]]", where each Tuple contains the index of a row to be
checked and the expected values of that row.
places: The number of decimal places to compare.
"""
for key, expected_value in expected_values_dict.items():
self.assertIn(key, got_values_dict)
for (row, values) in expected_value:
self.assertSequenceAlmostEqual(
got_values_dict[key][row],
values,
places=places,
msg_prefix='for key %s, row %d: ' % (key, row))
def assertSequenceAlmostEqual(self,
got_seq,
expected_seq,
places = 5,
msg_prefix=''):
got = list(got_seq)
expected = list(expected_seq)
self.assertEqual(
len(got), len(expected), msg=msg_prefix + 'lengths do not match')
for index, (a, b) in enumerate(zip(got, expected)):
msg = msg_prefix + 'at index %d. sequences were: %s and %s' % (index, got,
expected),
if math.isnan(a) or math.isnan(b):
self.assertEqual(math.isnan(a), math.isnan(b), msg=msg)
else:
self.assertAlmostEqual(a, b, msg=msg, places=places)
| 40.889655 | 89 | 0.624051 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tempfile
import tensorflow as tf
from tensorflow_model_analysis.types_compat import Dict, Iterable, Union, Sequence, Tuple
from tensorflow.core.example import example_pb2
class TensorflowModelAnalysisTest(tf.test.TestCase):
def setUp(self):
self.longMessage = True
def _getTempDir(self):
return tempfile.mkdtemp()
def _makeExample(self, **kwargs):
result = example_pb2.Example()
for key, value in kwargs.items():
if isinstance(value, float) or isinstance(value, int):
result.features.feature[key].float_list.value[:] = [value]
elif isinstance(value, str):
result.features.feature[key].bytes_list.value[:] = [value]
elif isinstance(value, list):
if len(value) == 0:
raise ValueError('empty lists not allowed, but field %s was an empty '
'list' % key)
if isinstance(value[0], float) or isinstance(value[0], int):
result.features.feature[key].float_list.value[:] = value
elif isinstance(value[0], str):
result.features.feature[key].bytes_list.value[:] = value
else:
raise TypeError('field %s was a list, but the first element had '
'unknown type %s' % key, type(value[0]))
else:
raise TypeError('unrecognised type for field %s: type %s' %
(key, type(value)))
return result
def assertHasKeyWithValueAlmostEqual(self,
d,
key,
value,
places = 5):
self.assertIn(key, d)
self.assertAlmostEqual(d[key], value, places=places, msg='key %s' % key)
def assertDictElementsAlmostEqual(self,
got_values_dict,
expected_values_dict,
places = 5):
for key, expected_value in expected_values_dict.items():
self.assertHasKeyWithValueAlmostEqual(got_values_dict, key,
expected_value, places)
def assertDictMatrixRowsAlmostEqual(
self,
got_values_dict,
expected_values_dict,
places = 5):
for key, expected_value in expected_values_dict.items():
self.assertIn(key, got_values_dict)
for (row, values) in expected_value:
self.assertSequenceAlmostEqual(
got_values_dict[key][row],
values,
places=places,
msg_prefix='for key %s, row %d: ' % (key, row))
def assertSequenceAlmostEqual(self,
got_seq,
expected_seq,
places = 5,
msg_prefix=''):
got = list(got_seq)
expected = list(expected_seq)
self.assertEqual(
len(got), len(expected), msg=msg_prefix + 'lengths do not match')
for index, (a, b) in enumerate(zip(got, expected)):
msg = msg_prefix + 'at index %d. sequences were: %s and %s' % (index, got,
expected),
if math.isnan(a) or math.isnan(b):
self.assertEqual(math.isnan(a), math.isnan(b), msg=msg)
else:
self.assertAlmostEqual(a, b, msg=msg, places=places)
| true | true |
f72ea42d49b9e748dddd0ea72f10fa57fb092cbe | 14,348 | py | Python | torch_utils.py | MDoid10111/EMNLP2020 | 97e4da06abc72873a4830cfa53c035a27eb3975b | [
"MIT"
] | 42 | 2020-10-13T19:47:37.000Z | 2022-03-26T09:56:46.000Z | torch_utils.py | MDoid10111/EMNLP2020 | 97e4da06abc72873a4830cfa53c035a27eb3975b | [
"MIT"
] | null | null | null | torch_utils.py | MDoid10111/EMNLP2020 | 97e4da06abc72873a4830cfa53c035a27eb3975b | [
"MIT"
] | 5 | 2020-11-30T14:48:44.000Z | 2022-02-19T17:18:21.000Z | import numpy as np
import torch, os
import torch.nn.utils.rnn as rnn_utils
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
import torchvision
from torchvision import transforms
def flatten(x):
'''
flatten high dimensional tensor x into an array
:param x: shape (B, D1, D2, ...)
:return: 1 dimensional tensor
'''
dims = x.size()[1:] #remove the first dimension as it is batch dimension
num_features = 1
for s in dims: num_features *= s
return x.contiguous().view(-1, num_features)
def gpu(tensor, gpu=False):
if gpu: return tensor.cuda()
else: return tensor
def cpu(tensor):
if tensor.is_cuda: return tensor.cpu()
else: return tensor
def minibatch(*tensors, **kwargs):
batch_size = kwargs['batch_size']
if len(tensors) == 1:
tensor = tensors[0]
for i in range(0, len(tensor), batch_size):
yield tensor[i:i + batch_size]
else:
for i in range(0, len(tensors[0]), batch_size):
yield tuple(x[i:i + batch_size] for x in tensors)
def shuffle(*arrays, **kwargs):
"""This is not an inplace operation. Therefore, you can shuffle without worrying changing data."""
if len(set(len(x) for x in arrays)) != 1:
raise ValueError('All inputs to shuffle must have '
'the same length.')
shuffle_indices = np.arange(len(arrays[0]))
np.random.shuffle(shuffle_indices) # fix this for reproducible
if len(arrays) == 1:
return arrays[0][shuffle_indices]
else:
return tuple(x[shuffle_indices] for x in arrays)
def assert_no_grad(variable):
if variable.requires_grad:
raise ValueError(
"nn criterions don't compute the gradient w.r.t. targets - please "
"mark these variables as volatile or not requiring gradients"
)
def numpy2tensor(x, dtype):
# torch.tensor(torch.from_numpy(var), dtype = torch.int, torch.long)
return torch.tensor(torch.from_numpy(x), dtype = dtype)
def tensor2numpy(x):
# return x.numpy()
return cpu(x).numpy()
def set_seed(seed, cuda=False):
torch.manual_seed(seed)
if cuda: torch.cuda.manual_seed(seed)
def create_mask_tensor(query: torch.Tensor, doc: torch.Tensor, threshold: int = 0):
"""
Creating masking of two tensor. These two tensors are integer tensor
Parameters
----------
query: (B, L)
doc: (B, R)
threshold: when it is 0, means we ignore padding tokens. when it is 1, it means we ignore <unk> or oov words
Returns
-------
"""
assert query.size(0) == doc.size(0)
assert len(query.size()) == 2 and len(doc.size()) == 2
query_mask = query > threshold
doc_mask = doc > threshold
query_mask = query_mask.unsqueeze(2) # (B, L, 1)
doc_mask = doc_mask.unsqueeze(2) # (B, R, 1)
doc_mask = doc_mask.permute(0, 2, 1) # (B, 1, R)
mask_tensor = torch.bmm(query_mask.float(), doc_mask.float()) # (B, L, R)
return mask_tensor # , torch.sum(query_mask, dim = 1).squeeze(), torch.sum(doc_mask, dim = 1).squeeze()
def create_mask_tensor_image(left_indices: torch.Tensor, right_indices: torch.Tensor, threshold: int = 0):
"""
Creating masking of two tensor. These two tensors are integer tensor
Parameters
----------
left_indices: (B1, n1, M1)
right_indices: (B, n, M2)
threshold: when it is 0, means we ignore padding tokens. when it is 1, it means we ignore <unk> or oov words
Returns
-------
"""
B1, n1, M1 = left_indices.size()
B, n, M2 = right_indices.size()
assert n1 == 1
left_mask = left_indices > 0
right_mask = right_indices > 0
left_mask = left_mask.view(B1, M1, 1)
if B1 == 1: left_mask = left_mask.expand(B, M1, 1) # during testing
right_mask = right_mask.view(B, n * M2, 1)
ans = torch.bmm(left_mask.float(), right_mask.permute(0, 2, 1).float())
ans = ans.view(B, M1, n, M2).permute(0, 2, 1, 3) # (B, n, M1, M2)
return ans
def count_parameters(model: nn.Module):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_sorted_index_and_reverse_index(base_array: np.ndarray):
"""
We use sorted_index = np.argsort(-base_array) to find the indices to short the array decreasingly.
We also need to find the indices to restoring the original order of elements of base_array
after apply sorted_index.
This method is important because we need to input the tensor to GRU/LSTM with packed sequence.
Parameters
----------
base_array: (B, )
Returns
-------
"""
assert type(base_array) == np.ndarray
batch_size = base_array.shape[0]
assert base_array.shape == (batch_size,)
new_indices = np.argsort(-base_array)
old_indices = np.arange(batch_size)
r = np.stack([new_indices, old_indices], axis = 1)
r = r[np.argsort(r[:, 0])]
restoring_indices = r[:, 1] # the retoring indices. This method is tested very carefully.
return new_indices, restoring_indices
def packing_sequence(seq: torch.Tensor, seq_lens: np.ndarray, new_index) -> torch.Tensor:
"""
Prepare a packed sequence to input to an RNN. It is required that the length of sequences in `seq` must be sorted.
After
Parameters
----------
seq: (B, L, D) where L is length of sequence
seq_lens: (B, )
new_index: (B, ) this index is used to make sequence lengths sorted
old_index: (B, ) this index is used to restore the sequence lengths
Returns
-------
"""
return rnn_utils.pack_padded_sequence(seq[new_index], seq_lens[new_index], batch_first = True)
def torch_repeat_dim0(A: torch.tensor, n: int):
"""
Repeat tensor across a dimension
Parameters
----------
A
axis
Returns
-------
"""
assert len(A.size()) == 3
d1, d2, d3 = A.size()
A = A.unsqueeze(0).transpose(0, 1).repeat(1, n, 1, 1).view(-1, d2, d3)
assert A.size() == (n * d1, d2, d3)
return A
def boolean_mask(target: torch.Tensor, mask: torch.Tensor):
"""
Mimick tf.boolean_mask
Copied from https://discuss.pytorch.org/t/slicing-tensor-using-boolean-list/7354/3
Parameters
----------
target
mask
Returns
-------
"""
x = mask == True
# y=torch.arange(0,3)
# x=torch.Tensor([True,False,True])==True
# print(y[x])
return target[x]
def torch_argsort(input, dim=None, descending=False):
"""Returns the indices that sort a tensor along a given dimension in ascending
order by value.
This is the second value returned by :meth:`torch.sort`. See its documentation
for the exact semantics of this method.
Args:
input (Tensor): the input tensor
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
[ 0.1598, 0.0788, -0.0745, -1.2700],
[ 1.2208, 1.0722, -0.7064, 1.2564],
[ 0.0669, -0.2318, -0.8229, -0.9280]])
>>> torch.argsort(a, dim=1)
tensor([[2, 0, 3, 1],
[3, 2, 1, 0],
[2, 1, 0, 3],
[3, 2, 1, 0]])
"""
# copy from https://github.com/pytorch/pytorch/pull/9600/files
if dim is None:
return torch.sort(input, -1, descending)[1]
return torch.sort(input, dim, descending)[1]
def _predict_process_ids(user_ids, item_ids, num_items, use_cuda):
"""
Parameters
----------
user_ids
item_ids
num_items
use_cuda
Returns
-------
"""
if item_ids is None:
item_ids = np.arange(num_items, dtype=np.int64)
if np.isscalar(user_ids):
user_ids = np.array(user_ids, dtype=np.int64)
user_ids = torch.from_numpy(user_ids.reshape(-1, 1).astype(np.int64))
item_ids = torch.from_numpy(item_ids.reshape(-1, 1).astype(np.int64))
if item_ids.size()[0] != user_ids.size(0):
user_ids = user_ids.expand(item_ids.size())
user_var = gpu(user_ids, use_cuda)
item_var = gpu(item_ids, use_cuda)
return user_var.squeeze(), item_var.squeeze()
def idf(total_docs: int, term_freq: int) -> float:
"""compute inverse doc frequency. If a term appears at all docs, then, its value is low for discrimination.
If a term does not show in any doc, then, we simply use set denominator to 1 => largest idf value """
assert term_freq <= total_docs, "The number of documents that contain a term must be smaller than total_docs"
return np.log((1.0 + total_docs) / float(term_freq + 1.0)) + 1.0
def moving_average(input_tensor: torch.Tensor, window_size: int, dimension: int):
"""
Parameters
----------
input_tensor: torch.Tensor of shape (B, L, D)
window_size: sliding windows size
dimension: dimension we want to apply sliding window
Returns
-------
"""
ret = torch.cumsum(input_tensor, dim = dimension)
# print("Here:", ret, ret.shape)
ret[:, window_size:] = ret[:, window_size:] - ret[:, :-window_size]
return ret[:, window_size - 1:] / window_size
def cosine_distance(a: torch.Tensor, b: torch.Tensor):
"""
Compute the cosine distance between two tensors. This implementation saves a lot of memory since
memory complexity is O(B x L x R)
Parameters
----------
a: `torch.Tensor` shape (B, L, D)
b: `torch.Tensor` shape (B, R, D)
Returns
-------
"""
assert len(a.size()) == len(b.size()) == 3
A_square = (a * a).sum(dim = - 1) # B, L
B_square = (b * b).sum(dim = -1) # B, R
dot = torch.bmm(a, b.permute(0, 2, 1)) # B, L, R
# added abs in case of negative, added 1e-10 to avoid nan gradient of sqrt
return torch.sqrt(torch.abs(A_square.unsqueeze(-1) - 2 * dot + B_square.unsqueeze(1)) + 1e-10)
def l1_distance(a: torch.Tensor, b: torch.Tensor):
"""
Compute the l1 distance between two tensors. This implementation consumes a lot of memory since
mem complexity is O(B x L x R x D) due to x - y. I tried many ways but this is the best thing I can do
Parameters
----------
a: `torch.Tensor` shape (B, L, D)
b: `torch.Tensor` shape (B, R, D)
Returns
-------
"""
assert len(a.size()) == len(b.size()) == 3
x = a.unsqueeze(2) # (B, L, 1, D)
y = b.unsqueeze(1) # (B, 1, R, D)
return torch.norm(x - y, p = 1, dim = -1)
def _get_doc_context_copacrr(doc: torch.Tensor, doc_mask: torch.Tensor, context_window_size: int) -> torch.Tensor:
"""
Parameters
----------
doc: with shape (B, R, D)
doc_mask: binary tensor that differentiate real tokens from padding tokens (B, R)
Returns
-------
a tensor of shape (B, R, D) which indicates the context representation of each token in doc.
We also reset padding tokens to zero since they have no context
"""
def moving_average(a: torch.Tensor, window_size: int, dimension: int):
ret = torch.cumsum(a, dim = dimension)
# print("Here:", ret, ret.shape)
ret[:, window_size:] = ret[:, window_size:] - ret[:, :-window_size]
return ret[:, window_size - 1:] / window_size
left = context_window_size // 2
right = context_window_size - left - 1 # in case context windows is an even number then left=x//2, right=x-x//2
y = F.pad(doc, (0, 0, left, right)) # (B, c/2 + R + c/2, D)
document_context = moving_average(y, window_size = context_window_size, dimension = 1)
document_context = document_context * doc_mask.unsqueeze(-1).float()
return document_context
def init_weights(m):
"""
Copied from https://discuss.pytorch.org/t/how-are-layer-weights-and-biases-initialized-by-default/13073/3
Examples:
>>> w = nn.Linear(3, 4)
>>> w.apply(init_weights)
"""
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
if hasattr(m.bias, "data"): m.bias.data.fill_(0)
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
if m.bias:
torch.nn.init.xavier_uniform_(m.bias)
def auto_rnn(rnn_cell: nn.RNN, input_feats: torch.Tensor,
lens: torch.Tensor, new_indices: torch.Tensor, restoring_indices: torch.Tensor, max_len: int):
"""
Parameters
----------
rnn_cell : a rnn cell
input_feats: `torch.Tensor` (B, L, D)
lens: `torch.Tensor` (B, )
new_indices: `torch.Tensor` (B, )
restoring_indices: `torch.Tensor` (B, )
max_len: int
Returns
-------
"""
return rnn_cell((input_feats, lens, new_indices, restoring_indices), max_len=max_len, return_h=False)[0]
def rnn_last_h(rnn_cell: nn.RNN, input_feats: torch.Tensor,
lens: torch.Tensor, new_indices: torch.Tensor, restoring_indices: torch.Tensor, max_len: int):
"""
return the last hidden vectors of an RNN
Parameters
----------
rnn_cell : a rnn cell
input_feats: `torch.Tensor` (B, L, D)
lens: `torch.Tensor` (B, )
new_indices: `torch.Tensor` (B, )
restoring_indices: `torch.Tensor` (B, )
max_len: int
Returns
-------
"""
return rnn_cell((input_feats, lens, new_indices, restoring_indices), max_len=max_len, return_h=True)[1]
def retrieve_elements_from_indices(tensor: torch.Tensor, indices: torch.Tensor):
"""
Copied from https://discuss.pytorch.org/t/pooling-using-idices-from-another-max-pooling/37209/4
How does this work? (Checked
Parameters
----------
tensor: torch.Tensor shape B, C, L, R
indices: torch.Tensor shape (B, C, L, R) the values are indices where the last two dimensions are flattened
Returns
-------
"""
flattened_tensor = tensor.flatten(start_dim=2)
output = flattened_tensor.gather(dim=2, index=indices.flatten(start_dim=2)).view_as(indices)
return output
data_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def load_images(infile):
im = Image.open(infile).convert('RGB')
return data_transforms(im)
| 31.259259 | 118 | 0.625802 | import numpy as np
import torch, os
import torch.nn.utils.rnn as rnn_utils
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
import torchvision
from torchvision import transforms
def flatten(x):
dims = x.size()[1:]
num_features = 1
for s in dims: num_features *= s
return x.contiguous().view(-1, num_features)
def gpu(tensor, gpu=False):
if gpu: return tensor.cuda()
else: return tensor
def cpu(tensor):
if tensor.is_cuda: return tensor.cpu()
else: return tensor
def minibatch(*tensors, **kwargs):
batch_size = kwargs['batch_size']
if len(tensors) == 1:
tensor = tensors[0]
for i in range(0, len(tensor), batch_size):
yield tensor[i:i + batch_size]
else:
for i in range(0, len(tensors[0]), batch_size):
yield tuple(x[i:i + batch_size] for x in tensors)
def shuffle(*arrays, **kwargs):
if len(set(len(x) for x in arrays)) != 1:
raise ValueError('All inputs to shuffle must have '
'the same length.')
shuffle_indices = np.arange(len(arrays[0]))
np.random.shuffle(shuffle_indices)
if len(arrays) == 1:
return arrays[0][shuffle_indices]
else:
return tuple(x[shuffle_indices] for x in arrays)
def assert_no_grad(variable):
if variable.requires_grad:
raise ValueError(
"nn criterions don't compute the gradient w.r.t. targets - please "
"mark these variables as volatile or not requiring gradients"
)
def numpy2tensor(x, dtype):
# torch.tensor(torch.from_numpy(var), dtype = torch.int, torch.long)
return torch.tensor(torch.from_numpy(x), dtype = dtype)
def tensor2numpy(x):
# return x.numpy()
return cpu(x).numpy()
def set_seed(seed, cuda=False):
torch.manual_seed(seed)
if cuda: torch.cuda.manual_seed(seed)
def create_mask_tensor(query: torch.Tensor, doc: torch.Tensor, threshold: int = 0):
assert query.size(0) == doc.size(0)
assert len(query.size()) == 2 and len(doc.size()) == 2
query_mask = query > threshold
doc_mask = doc > threshold
query_mask = query_mask.unsqueeze(2) # (B, L, 1)
doc_mask = doc_mask.unsqueeze(2) # (B, R, 1)
doc_mask = doc_mask.permute(0, 2, 1) # (B, 1, R)
mask_tensor = torch.bmm(query_mask.float(), doc_mask.float()) # (B, L, R)
return mask_tensor # , torch.sum(query_mask, dim = 1).squeeze(), torch.sum(doc_mask, dim = 1).squeeze()
def create_mask_tensor_image(left_indices: torch.Tensor, right_indices: torch.Tensor, threshold: int = 0):
B1, n1, M1 = left_indices.size()
B, n, M2 = right_indices.size()
assert n1 == 1
left_mask = left_indices > 0
right_mask = right_indices > 0
left_mask = left_mask.view(B1, M1, 1)
if B1 == 1: left_mask = left_mask.expand(B, M1, 1) # during testing
right_mask = right_mask.view(B, n * M2, 1)
ans = torch.bmm(left_mask.float(), right_mask.permute(0, 2, 1).float())
ans = ans.view(B, M1, n, M2).permute(0, 2, 1, 3) # (B, n, M1, M2)
return ans
def count_parameters(model: nn.Module):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_sorted_index_and_reverse_index(base_array: np.ndarray):
assert type(base_array) == np.ndarray
batch_size = base_array.shape[0]
assert base_array.shape == (batch_size,)
new_indices = np.argsort(-base_array)
old_indices = np.arange(batch_size)
r = np.stack([new_indices, old_indices], axis = 1)
r = r[np.argsort(r[:, 0])]
restoring_indices = r[:, 1] # the retoring indices. This method is tested very carefully.
return new_indices, restoring_indices
def packing_sequence(seq: torch.Tensor, seq_lens: np.ndarray, new_index) -> torch.Tensor:
return rnn_utils.pack_padded_sequence(seq[new_index], seq_lens[new_index], batch_first = True)
def torch_repeat_dim0(A: torch.tensor, n: int):
assert len(A.size()) == 3
d1, d2, d3 = A.size()
A = A.unsqueeze(0).transpose(0, 1).repeat(1, n, 1, 1).view(-1, d2, d3)
assert A.size() == (n * d1, d2, d3)
return A
def boolean_mask(target: torch.Tensor, mask: torch.Tensor):
x = mask == True
# y=torch.arange(0,3)
# x=torch.Tensor([True,False,True])==True
# print(y[x])
return target[x]
def torch_argsort(input, dim=None, descending=False):
# copy from https://github.com/pytorch/pytorch/pull/9600/files
if dim is None:
return torch.sort(input, -1, descending)[1]
return torch.sort(input, dim, descending)[1]
def _predict_process_ids(user_ids, item_ids, num_items, use_cuda):
if item_ids is None:
item_ids = np.arange(num_items, dtype=np.int64)
if np.isscalar(user_ids):
user_ids = np.array(user_ids, dtype=np.int64)
user_ids = torch.from_numpy(user_ids.reshape(-1, 1).astype(np.int64))
item_ids = torch.from_numpy(item_ids.reshape(-1, 1).astype(np.int64))
if item_ids.size()[0] != user_ids.size(0):
user_ids = user_ids.expand(item_ids.size())
user_var = gpu(user_ids, use_cuda)
item_var = gpu(item_ids, use_cuda)
return user_var.squeeze(), item_var.squeeze()
def idf(total_docs: int, term_freq: int) -> float:
assert term_freq <= total_docs, "The number of documents that contain a term must be smaller than total_docs"
return np.log((1.0 + total_docs) / float(term_freq + 1.0)) + 1.0
def moving_average(input_tensor: torch.Tensor, window_size: int, dimension: int):
ret = torch.cumsum(input_tensor, dim = dimension)
# print("Here:", ret, ret.shape)
ret[:, window_size:] = ret[:, window_size:] - ret[:, :-window_size]
return ret[:, window_size - 1:] / window_size
def cosine_distance(a: torch.Tensor, b: torch.Tensor):
assert len(a.size()) == len(b.size()) == 3
A_square = (a * a).sum(dim = - 1) # B, L
B_square = (b * b).sum(dim = -1) # B, R
dot = torch.bmm(a, b.permute(0, 2, 1)) # B, L, R
# added abs in case of negative, added 1e-10 to avoid nan gradient of sqrt
return torch.sqrt(torch.abs(A_square.unsqueeze(-1) - 2 * dot + B_square.unsqueeze(1)) + 1e-10)
def l1_distance(a: torch.Tensor, b: torch.Tensor):
assert len(a.size()) == len(b.size()) == 3
x = a.unsqueeze(2) # (B, L, 1, D)
y = b.unsqueeze(1) # (B, 1, R, D)
return torch.norm(x - y, p = 1, dim = -1)
def _get_doc_context_copacrr(doc: torch.Tensor, doc_mask: torch.Tensor, context_window_size: int) -> torch.Tensor:
def moving_average(a: torch.Tensor, window_size: int, dimension: int):
ret = torch.cumsum(a, dim = dimension)
# print("Here:", ret, ret.shape)
ret[:, window_size:] = ret[:, window_size:] - ret[:, :-window_size]
return ret[:, window_size - 1:] / window_size
left = context_window_size // 2
right = context_window_size - left - 1 # in case context windows is an even number then left=x//2, right=x-x//2
y = F.pad(doc, (0, 0, left, right)) # (B, c/2 + R + c/2, D)
document_context = moving_average(y, window_size = context_window_size, dimension = 1)
document_context = document_context * doc_mask.unsqueeze(-1).float()
return document_context
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
if hasattr(m.bias, "data"): m.bias.data.fill_(0)
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
if m.bias:
torch.nn.init.xavier_uniform_(m.bias)
def auto_rnn(rnn_cell: nn.RNN, input_feats: torch.Tensor,
lens: torch.Tensor, new_indices: torch.Tensor, restoring_indices: torch.Tensor, max_len: int):
return rnn_cell((input_feats, lens, new_indices, restoring_indices), max_len=max_len, return_h=False)[0]
def rnn_last_h(rnn_cell: nn.RNN, input_feats: torch.Tensor,
lens: torch.Tensor, new_indices: torch.Tensor, restoring_indices: torch.Tensor, max_len: int):
return rnn_cell((input_feats, lens, new_indices, restoring_indices), max_len=max_len, return_h=True)[1]
def retrieve_elements_from_indices(tensor: torch.Tensor, indices: torch.Tensor):
flattened_tensor = tensor.flatten(start_dim=2)
output = flattened_tensor.gather(dim=2, index=indices.flatten(start_dim=2)).view_as(indices)
return output
data_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def load_images(infile):
im = Image.open(infile).convert('RGB')
return data_transforms(im)
| true | true |
f72ea474aea803c8551d2dd9a3d642ba5bdfa8bf | 6,529 | py | Python | qa/rpc-tests/txn_doublespend.py | mirzaei-ce/core-outbit | 3ebf7d8f398fa564c593433f7808d0a1d35809b9 | [
"MIT"
] | null | null | null | qa/rpc-tests/txn_doublespend.py | mirzaei-ce/core-outbit | 3ebf7d8f398fa564c593433f7808d0a1d35809b9 | [
"MIT"
] | null | null | null | qa/rpc-tests/txn_doublespend.py | mirzaei-ce/core-outbit | 3ebf7d8f398fa564c593433f7808d0a1d35809b9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with a double-spend conflict
#
from test_framework.test_framework import OutbitTestFramework
from test_framework.util import *
class TxnMallTest(OutbitTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 UBT:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1240 UBT to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 1240
outputs[change_address] = 1248 - 1240 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 UBT coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50UBT for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100UBT for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance("foo"), 1219)
assert_equal(self.nodes[0].getbalance("bar"), 29)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-1219
- 29
-1240
+ 100
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
# Node1's "from0" account balance should be just the doublespend:
assert_equal(self.nodes[1].getbalance("from0"), 1240)
if __name__ == '__main__':
TxnMallTest().main()
| 45.657343 | 111 | 0.606065 |
from test_framework.test_framework import OutbitTestFramework
from test_framework.util import *
class TxnMallTest(OutbitTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("")
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
node1_address = self.nodes[1].getnewaddress("from0")
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 1240
outputs[change_address] = 1248 - 1240 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 UBT coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50UBT for another
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("foo", 0), 1219+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100UBT for
expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
assert_equal(self.nodes[0].getbalance("foo"), 1219)
assert_equal(self.nodes[0].getbalance("bar"), 29)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-1219
- 29
-1240
+ 100
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
assert_equal(self.nodes[1].getbalance("from0"), 1240)
if __name__ == '__main__':
TxnMallTest().main()
| true | true |
f72ea519259a797fa9330cd2c0f999cf42083662 | 3,975 | py | Python | diverse/conveyor.py | sakkada/django-diverse | dbd13bb13c3663d6149a28d94daaf06c1e47b0f4 | [
"MIT"
] | null | null | null | diverse/conveyor.py | sakkada/django-diverse | dbd13bb13c3663d6149a28d94daaf06c1e47b0f4 | [
"MIT"
] | null | null | null | diverse/conveyor.py | sakkada/django-diverse | dbd13bb13c3663d6149a28d94daaf06c1e47b0f4 | [
"MIT"
] | null | null | null | import os
import time
import shutil
import hashlib
import mimetypes
from django.core.files.storage import FileSystemStorage
from . import settings
class VersionGenerationError(Exception):
pass
class Conveyor(object):
# convention: storage should operate files on local filesystem
# to allow processors use system file operation functions
storage_allowed = (FileSystemStorage,)
storage = None
def __init__(self, *args, **kwargs):
if not self.storage or not isinstance(self.storage,
self.storage_allowed):
raise ValueError('Conveyor storage should'
' be in storage_allowed (local fs).')
def run(self, filever, force=False):
raise NotImplementedError
class TempFileConveyor(Conveyor):
def __init__(self, *args, **kwargs):
self.storage = FileSystemStorage(location=settings.TEMPORARY_DIR)
super(TempFileConveyor, self).__init__(*args, **kwargs)
def run(self, filever, force=False):
source_file = filever.source_file
dest_storage = filever.storage()
replace_mode = False
# check self processing (equality of source and destination)
if dest_storage.path(filever.path) == dest_storage.path(
source_file.path) and filever.attrname == 'self':
replace_mode = True
# check file existance and force
if not replace_mode and dest_storage.exists(filever.path):
if not force:
return
dest_storage.delete(filever.path)
# open (rb mode) source file
source_closed = source_file.closed
source_closed and source_file.open()
# get hasher
md5hash = hashlib.md5()
md5hash.update('{}@{}'.format(source_file.name,
time.time()).encode('utf-8', 'ignore'))
# create temporary file and get mimetype
tempname = os.path.splitext(source_file.name)
tempname = '%s%s' % (md5hash.hexdigest(), tempname[1])
tempname = self.storage.save(tempname, source_file)
mimetype = mimetypes.guess_type(tempname)
# close source
source_closed and source_file.close()
# safe processors call and close source
status = True
try:
# run processors conveyor
for processor in filever.processors():
tempname, mimetype = processor.run(tempname, mimetype,
self.storage, filever)
if not tempname:
break
except Exception as e:
status = False
# alter default exception message
message = ('File version "%s" generation error for "%s" at %s.'
' Real reason is: %%s'
% (filever.attrname,
source_file.name, processor.__class__))
e.args = tuple([message % e.args[0]] + list(e.args[1:]))
raise
else:
if status:
# save target file with destination storage
# todo: check new filename correctness
if replace_mode:
dest_storage.delete(filever.path)
with self.storage.open(tempname) as tempfile:
dest_storage.save(filever.path, tempfile)
finally:
# delete temporary
# warning: delete is unsafe with locks (especially write mode locks)
# that means that each processor have to be extremally
# safety with opened file pointers
self.storage.delete(tempname)
if not status:
status = ('File version "%s" generation error for "%s" at %s.'
% (filever.attrname,
source_file.name, processor.__class__))
raise VersionGenerationError(status)
| 37.149533 | 80 | 0.580881 | import os
import time
import shutil
import hashlib
import mimetypes
from django.core.files.storage import FileSystemStorage
from . import settings
class VersionGenerationError(Exception):
pass
class Conveyor(object):
storage_allowed = (FileSystemStorage,)
storage = None
def __init__(self, *args, **kwargs):
if not self.storage or not isinstance(self.storage,
self.storage_allowed):
raise ValueError('Conveyor storage should'
' be in storage_allowed (local fs).')
def run(self, filever, force=False):
raise NotImplementedError
class TempFileConveyor(Conveyor):
def __init__(self, *args, **kwargs):
self.storage = FileSystemStorage(location=settings.TEMPORARY_DIR)
super(TempFileConveyor, self).__init__(*args, **kwargs)
def run(self, filever, force=False):
source_file = filever.source_file
dest_storage = filever.storage()
replace_mode = False
if dest_storage.path(filever.path) == dest_storage.path(
source_file.path) and filever.attrname == 'self':
replace_mode = True
if not replace_mode and dest_storage.exists(filever.path):
if not force:
return
dest_storage.delete(filever.path)
source_closed = source_file.closed
source_closed and source_file.open()
md5hash = hashlib.md5()
md5hash.update('{}@{}'.format(source_file.name,
time.time()).encode('utf-8', 'ignore'))
tempname = os.path.splitext(source_file.name)
tempname = '%s%s' % (md5hash.hexdigest(), tempname[1])
tempname = self.storage.save(tempname, source_file)
mimetype = mimetypes.guess_type(tempname)
source_closed and source_file.close()
status = True
try:
for processor in filever.processors():
tempname, mimetype = processor.run(tempname, mimetype,
self.storage, filever)
if not tempname:
break
except Exception as e:
status = False
message = ('File version "%s" generation error for "%s" at %s.'
' Real reason is: %%s'
% (filever.attrname,
source_file.name, processor.__class__))
e.args = tuple([message % e.args[0]] + list(e.args[1:]))
raise
else:
if status:
if replace_mode:
dest_storage.delete(filever.path)
with self.storage.open(tempname) as tempfile:
dest_storage.save(filever.path, tempfile)
finally:
self.storage.delete(tempname)
if not status:
status = ('File version "%s" generation error for "%s" at %s.'
% (filever.attrname,
source_file.name, processor.__class__))
raise VersionGenerationError(status)
| true | true |
f72ea553a3dff77429beda1663ea93edb12f2be7 | 56 | py | Python | tests/__init__.py | dougppaz/pyaml_env | 09d3c43da39d5f997ac88b0b7e9945de797eca02 | [
"MIT"
] | 30 | 2021-04-27T15:26:28.000Z | 2022-03-29T17:12:36.000Z | tests/__init__.py | dougppaz/pyaml_env | 09d3c43da39d5f997ac88b0b7e9945de797eca02 | [
"MIT"
] | 12 | 2021-04-28T11:43:15.000Z | 2022-03-03T17:48:17.000Z | tests/__init__.py | dougppaz/pyaml_env | 09d3c43da39d5f997ac88b0b7e9945de797eca02 | [
"MIT"
] | 10 | 2021-04-29T00:31:08.000Z | 2022-03-14T13:49:54.000Z | import os
import sys
sys.path.insert(0, (os.getcwd()))
| 11.2 | 33 | 0.696429 | import os
import sys
sys.path.insert(0, (os.getcwd()))
| true | true |
f72ea6730b4f2126d264157f36867ba7c80f59b1 | 8,491 | py | Python | test/functional/wallet_accounts.py | ALQO-GitHub-Official/new-chain | b993c07397f91860311e2f9e207cb84fdd3a3ffa | [
"MIT"
] | 110 | 2019-07-12T11:46:31.000Z | 2022-02-18T19:47:23.000Z | test/functional/wallet_accounts.py | ALQO-GitHub-Official/new-chain | b993c07397f91860311e2f9e207cb84fdd3a3ffa | [
"MIT"
] | 29 | 2018-10-23T21:28:56.000Z | 2021-02-10T14:42:59.000Z | test/functional/wallet_accounts.py | ALQO-GitHub-Official/new-chain | b993c07397f91860311e2f9e207cb84fdd3a3ffa | [
"MIT"
] | 55 | 2018-10-20T13:40:39.000Z | 2022-03-07T07:13:02.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test account RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount
- listaddressgroupings
- setaccount
- sendfrom (with account arguments)
- move (with account arguments)
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class WalletAccountsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 500)
# there should be 2 address groups
# each with 1 address with a balance of 50 Bitcoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
#for address_group in address_groups:
# assert_equal(len(address_group), 1)
# assert_equal(len(address_group[0]), 2)
# assert_equal(address_group[0][1], 250)
# linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
node.settxfee(0)
common_address = "y9B3dwrBGGs3yVkyEHm68Yn36Wp2Rt7Vtd"
txid = node.sendmany("", {common_address: 100}, 1)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
#address_groups = node.listaddressgroupings()
#assert_equal(len(address_groups), 1)
#assert_equal(len(address_groups[0]), 1)
#assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
#assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" account has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
node.sendfrom("", common_address, float(fee))
amount_to_send = 5.0
# Create accounts and make sure subsequent account API calls
# recognize the account/address associations.
accounts = [Account(name) for name in ("a", "b", "c", "d", "e")]
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
# Send a transaction to each account, and make sure this forces
# getaccountaddress to generate a new receiving address.
for account in accounts:
node.sendtoaddress(account.receive_address, amount_to_send)
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
# Check the amounts received.
node.generate(1)
for account in accounts:
assert_equal(
node.getreceivedbyaddress(account.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbyaccount(account.name), amount_to_send)
# Check that sendfrom account reduces listaccounts balances.
for i, account in enumerate(accounts):
to_account = accounts[(i+1) % len(accounts)]
node.sendfrom(account.name, to_account.receive_address, amount_to_send)
node.generate(1)
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
assert_equal(node.getreceivedbyaccount(account.name), 10)
node.move(account.name, "", float(node.getbalance(account.name)))
account.verify(node)
node.generate(101)
#expected_account_balances = {"": 26149.99985650}
#for account in accounts:
# expected_account_balances[account.name] = 0
#assert_equal(node.listaccounts(), expected_account_balances)
#assert_equal(node.getbalance(""), 26149.99985650)
# Check that setaccount can assign an account to a new unused address.
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account.name)
account.add_address(address)
account.verify(node)
assert(address not in node.getaddressesbyaccount(""))
# Check that addmultisigaddress can assign accounts.
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account.name)
account.add_address(multisig_address)
account.verify(node)
node.sendfrom("", multisig_address, 50)
#node.generate(101)
#for account in accounts:
# assert_equal(node.getbalance(account.name), 50)
# Check that setaccount can change the account of an address from a
# different account.
change_account(node, accounts[0].addresses[0], accounts[0], accounts[1])
# Check that setaccount can change the account of an address which
# is the receiving address of a different account.
change_account(node, accounts[0].receive_address, accounts[0], accounts[1])
# Check that setaccount can set the account of an address already
# in the account. This is a no-op.
change_account(node, accounts[2].addresses[0], accounts[2], accounts[2])
# Check that setaccount can set the account of an address which is
# already the receiving address of the account. It would probably make
# sense for this to be a no-op, but right now it resets the receiving
# address, causing getaccountaddress to return a brand new address.
change_account(node, accounts[2].receive_address, accounts[2], accounts[2])
class Account:
def __init__(self, name):
# Account name
self.name = name
# Current receiving address associated with this account.
self.receive_address = None
# List of all addresses assigned with this account
self.addresses = []
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(node.getaccount(address), self.name)
assert_equal(
set(node.getaddressesbyaccount(self.name)), set(self.addresses))
def change_account(node, address, old_account, new_account):
assert_equal(address in old_account.addresses, True)
node.setaccount(address, new_account.name)
old_account.addresses.remove(address)
new_account.add_address(address)
# Calling setaccount on an address which was previously the receiving
# address of a different account should reset the receiving address of
# the old account, causing getaccountaddress to return a brand new
# address.
if address == old_account.receive_address:
new_address = node.getaccountaddress(old_account.name)
assert_equal(new_address not in old_account.addresses, True)
assert_equal(new_address not in new_account.addresses, True)
old_account.add_receive_address(new_address)
old_account.verify(node)
new_account.verify(node)
if __name__ == '__main__':
WalletAccountsTest().main()
| 41.827586 | 83 | 0.668119 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class WalletAccountsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0]
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 500)
# there should be 2 address groups
# each with 1 address with a balance of 50 Bitcoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
# the addresses aren't linked now, but will be after we send to the
linked_addresses = set()
# matures.
node.settxfee(0)
common_address = "y9B3dwrBGGs3yVkyEHm68Yn36Wp2Rt7Vtd"
txid = node.sendmany("", {common_address: 100}, 1)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
#address_groups = node.listaddressgroupings()
#assert_equal(len(address_groups), 1)
#assert_equal(len(address_groups[0]), 1)
#assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
#assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" account has what's expected.
node.sendfrom("", common_address, float(fee))
amount_to_send = 5.0
accounts = [Account(name) for name in ("a", "b", "c", "d", "e")]
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
for account in accounts:
node.sendtoaddress(account.receive_address, amount_to_send)
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
node.generate(1)
for account in accounts:
assert_equal(
node.getreceivedbyaddress(account.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbyaccount(account.name), amount_to_send)
for i, account in enumerate(accounts):
to_account = accounts[(i+1) % len(accounts)]
node.sendfrom(account.name, to_account.receive_address, amount_to_send)
node.generate(1)
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
assert_equal(node.getreceivedbyaccount(account.name), 10)
node.move(account.name, "", float(node.getbalance(account.name)))
account.verify(node)
node.generate(101)
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account.name)
account.add_address(address)
account.verify(node)
assert(address not in node.getaddressesbyaccount(""))
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account.name)
account.add_address(multisig_address)
account.verify(node)
node.sendfrom("", multisig_address, 50)
change_account(node, accounts[0].addresses[0], accounts[0], accounts[1])
change_account(node, accounts[0].receive_address, accounts[0], accounts[1])
change_account(node, accounts[2].addresses[0], accounts[2], accounts[2])
change_account(node, accounts[2].receive_address, accounts[2], accounts[2])
class Account:
def __init__(self, name):
self.name = name
self.receive_address = None
self.addresses = []
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(node.getaccount(address), self.name)
assert_equal(
set(node.getaddressesbyaccount(self.name)), set(self.addresses))
def change_account(node, address, old_account, new_account):
assert_equal(address in old_account.addresses, True)
node.setaccount(address, new_account.name)
old_account.addresses.remove(address)
new_account.add_address(address)
if address == old_account.receive_address:
new_address = node.getaccountaddress(old_account.name)
assert_equal(new_address not in old_account.addresses, True)
assert_equal(new_address not in new_account.addresses, True)
old_account.add_receive_address(new_address)
old_account.verify(node)
new_account.verify(node)
if __name__ == '__main__':
WalletAccountsTest().main()
| true | true |
f72ea7749502804e63cabefce69af2c3762767e4 | 2,268 | py | Python | server/tests/test_patients.py | ishitakapoor26/Nutrihelp | 5bac42aaee61884f9ee7415caf441e80b7b03b48 | [
"MIT"
] | 22 | 2021-02-15T10:30:59.000Z | 2022-01-09T07:10:36.000Z | server/tests/test_patients.py | Ayonijakaushik19/Nutrihelp | 85926b187a6bfcf80f1f1cd60667ed3d14dce0be | [
"MIT"
] | 51 | 2021-02-27T15:42:15.000Z | 2022-03-01T15:02:03.000Z | server/tests/test_patients.py | Ayonijakaushik19/Nutrihelp | 85926b187a6bfcf80f1f1cd60667ed3d14dce0be | [
"MIT"
] | 25 | 2021-02-14T17:49:23.000Z | 2022-02-27T18:27:39.000Z | from bson.json_util import dumps
from ..app import app
from json import dumps as pretty
class glo:
patient_id = []
g = glo()
userid = ['1k33224', '60961d77a7090edb5b69c62c']
patient = {
'name': 'Abhishek shrivastava',
'age': 19,
'gender': 'M',
'mobile': '9022930339'
}
patient2 = {
'name': 'Avinash',
'age': 39,
'gender': 'M',
'mobile': '2992123212',
'stats': {
'bp': 223,
'glucose': 213,
'weight': 922
}
}
data = [{
'userid': userid[0],
'patient':patient,
},
{
'userid': userid[1],
'patient':patient,
}]
def pprint(data):
print(pretty(data, sort_keys=True, indent=4))
def test_add_patient():
with app.test_client() as client:
for item in data:
uri = '/patients'
res = client.post(uri, json=item)
pprint(res.json)
assert res.status_code == 200
def test_get_all_patients():
with app.test_client() as client:
for id in userid:
res = client.get('/patients/'+id)
pprint(res.json)
if type(res.json) == list:
g.patient_id = [(d.get('id')) for d in res.json]
g.patient_id.append({'$oid': userid[1]})
assert res.status_code == 200
def test_patient_get():
with app.test_client() as client:
for uid in userid:
for pid in g.patient_id:
uri = '/patients/' + uid+'/'+pid['$oid']
res = client.get(uri)
pprint(res.json)
assert res.status_code == 200
def test_patient_update():
with app.test_client() as client:
for uid in userid:
for pid in g.patient_id:
uri = '/patients/'+uid+'/'+pid['$oid']
res = client.put(uri, json=patient2)
pprint(res.json)
assert res.status_code == 200
def test_patient_delete():
with app.test_client() as client:
for uid in userid:
for pid in g.patient_id:
uri = '/patients/'+uid+'/'+pid['$oid']
res = client.delete(uri)
pprint(res.json)
assert res.status_code == 200
def test_patient_get_after_delete():
test_patient_get()
| 23.142857 | 64 | 0.53351 | from bson.json_util import dumps
from ..app import app
from json import dumps as pretty
class glo:
patient_id = []
g = glo()
userid = ['1k33224', '60961d77a7090edb5b69c62c']
patient = {
'name': 'Abhishek shrivastava',
'age': 19,
'gender': 'M',
'mobile': '9022930339'
}
patient2 = {
'name': 'Avinash',
'age': 39,
'gender': 'M',
'mobile': '2992123212',
'stats': {
'bp': 223,
'glucose': 213,
'weight': 922
}
}
data = [{
'userid': userid[0],
'patient':patient,
},
{
'userid': userid[1],
'patient':patient,
}]
def pprint(data):
print(pretty(data, sort_keys=True, indent=4))
def test_add_patient():
with app.test_client() as client:
for item in data:
uri = '/patients'
res = client.post(uri, json=item)
pprint(res.json)
assert res.status_code == 200
def test_get_all_patients():
with app.test_client() as client:
for id in userid:
res = client.get('/patients/'+id)
pprint(res.json)
if type(res.json) == list:
g.patient_id = [(d.get('id')) for d in res.json]
g.patient_id.append({'$oid': userid[1]})
assert res.status_code == 200
def test_patient_get():
with app.test_client() as client:
for uid in userid:
for pid in g.patient_id:
uri = '/patients/' + uid+'/'+pid['$oid']
res = client.get(uri)
pprint(res.json)
assert res.status_code == 200
def test_patient_update():
with app.test_client() as client:
for uid in userid:
for pid in g.patient_id:
uri = '/patients/'+uid+'/'+pid['$oid']
res = client.put(uri, json=patient2)
pprint(res.json)
assert res.status_code == 200
def test_patient_delete():
with app.test_client() as client:
for uid in userid:
for pid in g.patient_id:
uri = '/patients/'+uid+'/'+pid['$oid']
res = client.delete(uri)
pprint(res.json)
assert res.status_code == 200
def test_patient_get_after_delete():
test_patient_get()
| true | true |
f72ea7d8dd96b72bf999b8d29730d781aa003ace | 321 | py | Python | main/migrations/0002_remove_project_created_date.py | NancyWachiuri/AwardsApp | c4eb0a87ab528c2166d1bd27e3ec6302e7ef08df | [
"MIT"
] | null | null | null | main/migrations/0002_remove_project_created_date.py | NancyWachiuri/AwardsApp | c4eb0a87ab528c2166d1bd27e3ec6302e7ef08df | [
"MIT"
] | null | null | null | main/migrations/0002_remove_project_created_date.py | NancyWachiuri/AwardsApp | c4eb0a87ab528c2166d1bd27e3ec6302e7ef08df | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-11-05 00:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='created_date',
),
]
| 17.833333 | 47 | 0.582555 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='created_date',
),
]
| true | true |
f72ea823aa4b99280f01169d6a40994760c8465e | 3,674 | py | Python | 18.Web Scraping with Python Scrapy - RM/03_Advanced_Techniques/news_scraper_challenge/news_scraper_challenge/middlewares.py | ptyadana/python-dojo | 98c7234b84f0afea99a091c7198342d66bbdff5b | [
"MIT"
] | 3 | 2020-06-01T04:17:18.000Z | 2020-12-18T03:05:55.000Z | 18.Web Scraping with Python Scrapy - RM/03_Advanced_Techniques/news_scraper_challenge/news_scraper_challenge/middlewares.py | ptyadana/python-dojo | 98c7234b84f0afea99a091c7198342d66bbdff5b | [
"MIT"
] | 1 | 2020-04-25T08:01:59.000Z | 2020-04-25T08:01:59.000Z | 18.Web Scraping with Python Scrapy - RM/03_Advanced_Techniques/news_scraper_challenge/news_scraper_challenge/middlewares.py | ptyadana/python-dojo | 98c7234b84f0afea99a091c7198342d66bbdff5b | [
"MIT"
] | 7 | 2020-04-26T10:02:36.000Z | 2021-06-08T05:12:46.000Z | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class NewsScraperChallengeSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class NewsScraperChallengeDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 35.326923 | 78 | 0.676647 |
from scrapy import signals
from itemadapter import is_item, ItemAdapter
class NewsScraperChallengeSpiderMiddleware:
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
return None
def process_spider_output(self, response, result, spider):
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
pass
def process_start_requests(self, start_requests, spider):
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class NewsScraperChallengeDownloaderMiddleware:
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
return None
def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| true | true |
f72ea85064b2c2a6f54570529a6ce35ac41d67bd | 488 | py | Python | astronomy_datamodels/tests/test_subarray.py | spacetelescope/astronomy_datamodels | ca5db82d5982781ea763cef9851d4c982fd86328 | [
"BSD-3-Clause"
] | 1 | 2019-03-08T03:06:43.000Z | 2019-03-08T03:06:43.000Z | astronomy_datamodels/tests/test_subarray.py | spacetelescope/astronomy_datamodels | ca5db82d5982781ea763cef9851d4c982fd86328 | [
"BSD-3-Clause"
] | 1 | 2020-10-29T19:54:28.000Z | 2020-10-29T19:54:28.000Z | astronomy_datamodels/tests/test_subarray.py | spacetelescope/astronomy_datamodels | ca5db82d5982781ea763cef9851d4c982fd86328 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
import numpy as np
asdf = pytest.importorskip('asdf', minversion='2.0.0.dev0')
from asdf import util
from asdf.tests import helpers
from ..subarray import Subarray
def test1(tmpdir, ret=False):
subarray = Subarray(offset=(100, 131), size=(256, 256), name='SA1')
tree = {'subarray': subarray}
if ret:
return subarray
helpers.assert_roundtrip_tree(tree, tmpdir) | 28.705882 | 71 | 0.702869 |
import pytest
import numpy as np
asdf = pytest.importorskip('asdf', minversion='2.0.0.dev0')
from asdf import util
from asdf.tests import helpers
from ..subarray import Subarray
def test1(tmpdir, ret=False):
subarray = Subarray(offset=(100, 131), size=(256, 256), name='SA1')
tree = {'subarray': subarray}
if ret:
return subarray
helpers.assert_roundtrip_tree(tree, tmpdir) | true | true |
f72ea9c1cb6d29549be75d7beed55fe01e257814 | 707 | py | Python | tests/framework/RunFailures/failer.py | rinelson456/raven | 1114246136a2f72969e75b5e99a11b35500d4eef | [
"Apache-2.0"
] | 159 | 2017-03-24T21:07:06.000Z | 2022-03-20T13:44:40.000Z | tests/framework/RunFailures/failer.py | rinelson456/raven | 1114246136a2f72969e75b5e99a11b35500d4eef | [
"Apache-2.0"
] | 1,667 | 2017-03-27T14:41:22.000Z | 2022-03-31T19:50:06.000Z | tests/framework/RunFailures/failer.py | rinelson456/raven | 1114246136a2f72969e75b5e99a11b35500d4eef | [
"Apache-2.0"
] | 95 | 2017-03-24T21:05:03.000Z | 2022-03-08T17:30:22.000Z | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def run(self,Input):
if self.x+self.y > 0:
raise IOError('Testing, testing, 1,2,3.')
self.ans = self.x+self.y
| 39.277778 | 74 | 0.746818 |
def run(self,Input):
if self.x+self.y > 0:
raise IOError('Testing, testing, 1,2,3.')
self.ans = self.x+self.y
| true | true |
f72eaa3113edd3d25a929e626271bcff8fa160e6 | 1,162 | py | Python | ver-3/backup_ver3.py | Emmanuel-Temitope/backup-problem-solved | c5e10bc586c9fc449c6e13d08f9ad0d964105c67 | [
"MIT"
] | null | null | null | ver-3/backup_ver3.py | Emmanuel-Temitope/backup-problem-solved | c5e10bc586c9fc449c6e13d08f9ad0d964105c67 | [
"MIT"
] | null | null | null | ver-3/backup_ver3.py | Emmanuel-Temitope/backup-problem-solved | c5e10bc586c9fc449c6e13d08f9ad0d964105c67 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 9 18:52:56 2019
@author: Emmanuel-Temitope
"""
import os
import time
#location of directory
source = [r'source', '"Another source"']
#targer directory
target_dir = r'target'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
today = target_dir + os.sep + time.strftime('%Y%m%d')
#current time will be the zip archive
now = time.strftime('%H%M%S')
#User Comment
comment = input('Enter a comment --> ')
#check if comment was entered
if len(comment) == 0:
target = today + os.sep + now + '.zip'
else:
target = today + os.sep + now + '_' + \
comment.replace(" ", "_")
print(time.asctime())
#create the subdirectory if it isn't there
if not os.path.exists(today):
os.mkdir(today)
print('Successfully created directory', today)
#we use the zip command to put the files in a zip archive
zip_command = 'zip -r {0} {1}'.format(target, ' '.join(source))
#Run the backup
print('Zip command is: ')
print(zip_command)
print('Running...')
if os.system(zip_command) == 0:
print('Successful backup to', target,'at ', time.asctime())
else:
print('Backup FAILED')
| 22.784314 | 63 | 0.654905 |
import os
import time
source = [r'source', '"Another source"']
target_dir = r'target'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
today = target_dir + os.sep + time.strftime('%Y%m%d')
now = time.strftime('%H%M%S')
comment = input('Enter a comment --> ')
if len(comment) == 0:
target = today + os.sep + now + '.zip'
else:
target = today + os.sep + now + '_' + \
comment.replace(" ", "_")
print(time.asctime())
if not os.path.exists(today):
os.mkdir(today)
print('Successfully created directory', today)
#we use the zip command to put the files in a zip archive
zip_command = 'zip -r {0} {1}'.format(target, ' '.join(source))
#Run the backup
print('Zip command is: ')
print(zip_command)
print('Running...')
if os.system(zip_command) == 0:
print('Successful backup to', target,'at ', time.asctime())
else:
print('Backup FAILED')
| true | true |
f72eaa8f5af2633482ecb8f03a085435a35f5fa3 | 4,852 | py | Python | archive/attention.py | emmettmeinzer/hmwgen | cd47733b5a34a6a3a9b56026eb5e73069e398033 | [
"MIT"
] | null | null | null | archive/attention.py | emmettmeinzer/hmwgen | cd47733b5a34a6a3a9b56026eb5e73069e398033 | [
"MIT"
] | null | null | null | archive/attention.py | emmettmeinzer/hmwgen | cd47733b5a34a6a3a9b56026eb5e73069e398033 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 22:53:44 2020
@author: Emmett
"""
import tensorflow as tf
import os
from tensorflow.python.keras.layers import Layer
from tensorflow.python.keras import backend as K
class AttentionLayer(Layer):
"""
This class implements Bahdanau attention (https://arxiv.org/pdf/1409.0473.pdf).
There are three sets of weights introduced W_a, U_a, and V_a
"""
def __init__(self, **kwargs):
super(AttentionLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert isinstance(input_shape, list)
# Create a trainable weight variable for this layer.
self.W_a = self.add_weight(name='W_a',
shape=tf.TensorShape((input_shape[0][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.U_a = self.add_weight(name='U_a',
shape=tf.TensorShape((input_shape[1][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.V_a = self.add_weight(name='V_a',
shape=tf.TensorShape((input_shape[0][2], 1)),
initializer='uniform',
trainable=True)
super(AttentionLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, inputs, verbose=False):
"""
inputs: [encoder_output_sequence, decoder_output_sequence]
"""
assert type(inputs) == list
encoder_out_seq, decoder_out_seq = inputs
if verbose:
print('encoder_out_seq>', encoder_out_seq.shape)
print('decoder_out_seq>', decoder_out_seq.shape)
def energy_step(inputs, states):
""" Step function for computing energy for a single decoder state
inputs: (batchsize * 1 * de_in_dim)
states: (batchsize * 1 * de_latent_dim)
"""
assert_msg = "States must be an iterable. Got {} of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
""" Some parameters required for shaping tensors"""
en_seq_len, en_hidden = encoder_out_seq.shape[1], encoder_out_seq.shape[2]
de_hidden = inputs.shape[-1]
""" Computing S.Wa where S=[s0, s1, ..., si]"""
# <= batch size * en_seq_len * latent_dim
W_a_dot_s = K.dot(encoder_out_seq, self.W_a)
""" Computing hj.Ua """
U_a_dot_h = K.expand_dims(K.dot(inputs, self.U_a), 1) # <= batch_size, 1, latent_dim
if verbose:
print('Ua.h>', U_a_dot_h.shape)
""" tanh(S.Wa + hj.Ua) """
# <= batch_size*en_seq_len, latent_dim
Ws_plus_Uh = K.tanh(W_a_dot_s + U_a_dot_h)
if verbose:
print('Ws+Uh>', Ws_plus_Uh.shape)
""" softmax(va.tanh(S.Wa + hj.Ua)) """
# <= batch_size, en_seq_len
e_i = K.squeeze(K.dot(Ws_plus_Uh, self.V_a), axis=-1)
# <= batch_size, en_seq_len
e_i = K.softmax(e_i)
if verbose:
print('ei>', e_i.shape)
return e_i, [e_i]
def context_step(inputs, states):
""" Step function for computing ci using ei """
assert_msg = "States must be an iterable. Got {} of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
# <= batch_size, hidden_size
c_i = K.sum(encoder_out_seq * K.expand_dims(inputs, -1), axis=1)
if verbose:
print('ci>', c_i.shape)
return c_i, [c_i]
fake_state_c = K.sum(encoder_out_seq, axis=1)
fake_state_e = K.sum(encoder_out_seq, axis=2) # <= (batch_size, enc_seq_len, latent_dim
""" Computing energy outputs """
# e_outputs => (batch_size, de_seq_len, en_seq_len)
last_out, e_outputs, _ = K.rnn(
energy_step, decoder_out_seq, [fake_state_e],
)
""" Computing context vectors """
last_out, c_outputs, _ = K.rnn(
context_step, e_outputs, [fake_state_c],
)
return c_outputs, e_outputs
def compute_output_shape(self, input_shape):
""" Outputs produced by the layer """
return [
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))
] | 39.447154 | 102 | 0.543899 |
import tensorflow as tf
import os
from tensorflow.python.keras.layers import Layer
from tensorflow.python.keras import backend as K
class AttentionLayer(Layer):
def __init__(self, **kwargs):
super(AttentionLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert isinstance(input_shape, list)
self.W_a = self.add_weight(name='W_a',
shape=tf.TensorShape((input_shape[0][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.U_a = self.add_weight(name='U_a',
shape=tf.TensorShape((input_shape[1][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.V_a = self.add_weight(name='V_a',
shape=tf.TensorShape((input_shape[0][2], 1)),
initializer='uniform',
trainable=True)
super(AttentionLayer, self).build(input_shape)
def call(self, inputs, verbose=False):
assert type(inputs) == list
encoder_out_seq, decoder_out_seq = inputs
if verbose:
print('encoder_out_seq>', encoder_out_seq.shape)
print('decoder_out_seq>', decoder_out_seq.shape)
def energy_step(inputs, states):
assert_msg = "States must be an iterable. Got {} of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
en_seq_len, en_hidden = encoder_out_seq.shape[1], encoder_out_seq.shape[2]
de_hidden = inputs.shape[-1]
W_a_dot_s = K.dot(encoder_out_seq, self.W_a)
U_a_dot_h = K.expand_dims(K.dot(inputs, self.U_a), 1)
if verbose:
print('Ua.h>', U_a_dot_h.shape)
Ws_plus_Uh = K.tanh(W_a_dot_s + U_a_dot_h)
if verbose:
print('Ws+Uh>', Ws_plus_Uh.shape)
e_i = K.squeeze(K.dot(Ws_plus_Uh, self.V_a), axis=-1)
e_i = K.softmax(e_i)
if verbose:
print('ei>', e_i.shape)
return e_i, [e_i]
def context_step(inputs, states):
assert_msg = "States must be an iterable. Got {} of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
c_i = K.sum(encoder_out_seq * K.expand_dims(inputs, -1), axis=1)
if verbose:
print('ci>', c_i.shape)
return c_i, [c_i]
fake_state_c = K.sum(encoder_out_seq, axis=1)
fake_state_e = K.sum(encoder_out_seq, axis=2)
last_out, e_outputs, _ = K.rnn(
energy_step, decoder_out_seq, [fake_state_e],
)
last_out, c_outputs, _ = K.rnn(
context_step, e_outputs, [fake_state_c],
)
return c_outputs, e_outputs
def compute_output_shape(self, input_shape):
return [
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))
] | true | true |
f72eabe57eec9d5c6a19eec2a538f36cdca7eb4c | 637 | py | Python | src/cbapi/__init__.py | rlmaers/cbapi-python | 395763e609ba1338ff3c7540395a6f2804e94584 | [
"MIT"
] | 3 | 2019-01-23T19:11:33.000Z | 2022-02-25T02:06:51.000Z | src/cbapi/__init__.py | rlmaers/cbapi-python | 395763e609ba1338ff3c7540395a6f2804e94584 | [
"MIT"
] | null | null | null | src/cbapi/__init__.py | rlmaers/cbapi-python | 395763e609ba1338ff3c7540395a6f2804e94584 | [
"MIT"
] | 1 | 2022-02-25T02:06:52.000Z | 2022-02-25T02:06:52.000Z | from __future__ import absolute_import
import cbapi.six
__title__ = 'cbapi'
__author__ = 'Carbon Black Developer Network'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 Carbon Black'
__version__ = '1.3.6'
# New API as of cbapi 0.9.0
from cbapi.response.rest_api import CbEnterpriseResponseAPI, CbResponseAPI
from cbapi.protection.rest_api import CbEnterpriseProtectionAPI, CbProtectionAPI
from cbapi.psc.defense import CbDefenseAPI
from cbapi.psc.threathunter import CbThreatHunterAPI
from cbapi.psc.livequery import CbLiveQueryAPI
# for compatibility with Cb Defense code from cbapi < 1.4.0
import cbapi.psc.defense as defense
| 31.85 | 80 | 0.821036 | from __future__ import absolute_import
import cbapi.six
__title__ = 'cbapi'
__author__ = 'Carbon Black Developer Network'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 Carbon Black'
__version__ = '1.3.6'
from cbapi.response.rest_api import CbEnterpriseResponseAPI, CbResponseAPI
from cbapi.protection.rest_api import CbEnterpriseProtectionAPI, CbProtectionAPI
from cbapi.psc.defense import CbDefenseAPI
from cbapi.psc.threathunter import CbThreatHunterAPI
from cbapi.psc.livequery import CbLiveQueryAPI
import cbapi.psc.defense as defense
| true | true |
f72eac1b900c7f609802611bd21042c02362d0e3 | 16,499 | py | Python | telethon/client/users.py | polisitni1/DogeClickBot | ac57eaeefca2c6ab9e48458f9f928a6a421a162e | [
"MIT"
] | null | null | null | telethon/client/users.py | polisitni1/DogeClickBot | ac57eaeefca2c6ab9e48458f9f928a6a421a162e | [
"MIT"
] | null | null | null | telethon/client/users.py | polisitni1/DogeClickBot | ac57eaeefca2c6ab9e48458f9f928a6a421a162e | [
"MIT"
] | null | null | null | import asyncio
import itertools
import logging
import time
from .telegrambaseclient import TelegramBaseClient
from .. import errors, utils
from ..tl import TLObject, TLRequest, types, functions
__log__ = logging.getLogger(__name__)
_NOT_A_REQUEST = TypeError('You can only invoke requests, not types!')
class UserMethods(TelegramBaseClient):
async def __call__(self, request, ordered=False):
for r in (request if utils.is_list_like(request) else (request,)):
if not isinstance(r, TLRequest):
raise _NOT_A_REQUEST
await r.resolve(self, utils)
# Avoid making the request if it's already in a flood wait
if r.CONSTRUCTOR_ID in self._flood_waited_requests:
due = self._flood_waited_requests[r.CONSTRUCTOR_ID]
diff = round(due - time.time())
if diff <= 3: # Flood waits below 3 seconds are "ignored"
self._flood_waited_requests.pop(r.CONSTRUCTOR_ID, None)
elif diff <= self.flood_sleep_threshold:
__log__.info('Sleeping early for %ds on flood wait', diff)
await asyncio.sleep(diff, loop=self._loop)
self._flood_waited_requests.pop(r.CONSTRUCTOR_ID, None)
else:
raise errors.FloodWaitError(capture=diff)
request_index = 0
self._last_request = time.time()
for _ in range(self._request_retries):
try:
future = self._sender.send(request, ordered=ordered)
if isinstance(future, list):
results = []
for f in future:
result = await f
self.session.process_entities(result)
results.append(result)
request_index += 1
return results
else:
result = await future
self.session.process_entities(result)
return result
except (errors.ServerError, errors.RpcCallFailError) as e:
__log__.warning('Telegram is having internal issues %s: %s',
e.__class__.__name__, e)
except (errors.FloodWaitError, errors.FloodTestPhoneWaitError) as e:
if utils.is_list_like(request):
request = request[request_index]
self._flood_waited_requests\
[request.CONSTRUCTOR_ID] = time.time() + e.seconds
if e.seconds <= self.flood_sleep_threshold:
__log__.info('Sleeping for %ds on flood wait', e.seconds)
await asyncio.sleep(e.seconds, loop=self._loop)
else:
raise
except (errors.PhoneMigrateError, errors.NetworkMigrateError,
errors.UserMigrateError) as e:
__log__.info('Phone migrated to %d', e.new_dc)
should_raise = isinstance(e, (
errors.PhoneMigrateError, errors.NetworkMigrateError
))
if should_raise and await self.is_user_authorized():
raise
await self._switch_dc(e.new_dc)
raise ValueError('Number of retries reached 0')
# region Public methods
async def get_me(self, input_peer=False):
"""
Gets "me" (the self user) which is currently authenticated,
or None if the request fails (hence, not authenticated).
Args:
input_peer (`bool`, optional):
Whether to return the :tl:`InputPeerUser` version or the normal
:tl:`User`. This can be useful if you just need to know the ID
of yourself.
Returns:
Your own :tl:`User`.
"""
if input_peer and self._self_input_peer:
return self._self_input_peer
try:
me = (await self(
functions.users.GetUsersRequest([types.InputUserSelf()])))[0]
if not self._self_input_peer:
self._self_input_peer = utils.get_input_peer(
me, allow_self=False
)
return self._self_input_peer if input_peer else me
except errors.UnauthorizedError:
return None
async def is_user_authorized(self):
"""
Returns ``True`` if the user is authorized.
"""
if self._self_input_peer is not None or self._state.pts != -1:
return True
try:
self._state = await self(functions.updates.GetStateRequest())
return True
except errors.RPCError:
return False
async def get_entity(self, entity):
"""
Turns the given entity into a valid Telegram :tl:`User`, :tl:`Chat`
or :tl:`Channel`. You can also pass a list or iterable of entities,
and they will be efficiently fetched from the network.
entity (`str` | `int` | :tl:`Peer` | :tl:`InputPeer`):
If an username is given, **the username will be resolved** making
an API call every time. Resolving usernames is an expensive
operation and will start hitting flood waits around 50 usernames
in a short period of time.
If you want to get the entity for a *cached* username, you should
first `get_input_entity(username) <get_input_entity>` which will
use the cache), and then use `get_entity` with the result of the
previous call.
Similar limits apply to invite links, and you should use their
ID instead.
Using phone numbers, exact names, integer IDs or :tl:`Peer`
rely on a `get_input_entity` first, which in turn needs the
entity to be in cache, unless a :tl:`InputPeer` was passed.
Unsupported types will raise ``TypeError``.
If the entity can't be found, ``ValueError`` will be raised.
Returns:
:tl:`User`, :tl:`Chat` or :tl:`Channel` corresponding to the
input entity. A list will be returned if more than one was given.
"""
single = not utils.is_list_like(entity)
if single:
entity = (entity,)
# Group input entities by string (resolve username),
# input users (get users), input chat (get chats) and
# input channels (get channels) to get the most entities
# in the less amount of calls possible.
inputs = []
for x in entity:
if isinstance(x, str):
inputs.append(x)
else:
inputs.append(await self.get_input_entity(x))
users = [x for x in inputs
if isinstance(x, (types.InputPeerUser, types.InputPeerSelf))]
chats = [x.chat_id for x in inputs
if isinstance(x, types.InputPeerChat)]
channels = [x for x in inputs
if isinstance(x, types.InputPeerChannel)]
if users:
# GetUsersRequest has a limit of 200 per call
tmp = []
while users:
curr, users = users[:200], users[200:]
tmp.extend(await self(functions.users.GetUsersRequest(curr)))
users = tmp
if chats: # TODO Handle chats slice?
chats = (await self(
functions.messages.GetChatsRequest(chats))).chats
if channels:
channels = (await self(
functions.channels.GetChannelsRequest(channels))).chats
# Merge users, chats and channels into a single dictionary
id_entity = {
utils.get_peer_id(x): x
for x in itertools.chain(users, chats, channels)
}
# We could check saved usernames and put them into the users,
# chats and channels list from before. While this would reduce
# the amount of ResolveUsername calls, it would fail to catch
# username changes.
result = []
for x in inputs:
if isinstance(x, str):
result.append(await self._get_entity_from_string(x))
elif not isinstance(x, types.InputPeerSelf):
result.append(id_entity[utils.get_peer_id(x)])
else:
result.append(next(
u for u in id_entity.values()
if isinstance(u, types.User) and u.is_self
))
return result[0] if single else result
async def get_input_entity(self, peer):
"""
Turns the given peer into its input entity version. Most requests
use this kind of :tl:`InputPeer`, so this is the most suitable call
to make for those cases. **Generally you should let the library do
its job** and don't worry about getting the input entity first, but
if you're going to use an entity often, consider making the call:
>>> import asyncio
>>> rc = asyncio.get_event_loop().run_until_complete
>>>
>>> from telethon import TelegramClient
>>> client = TelegramClient(...)
>>> # If you're going to use "username" often in your code
>>> # (make a lot of calls), consider getting its input entity
>>> # once, and then using the "user" everywhere instead.
>>> user = rc(client.get_input_entity('username'))
>>> # The same applies to IDs, chats or channels.
>>> chat = rc(client.get_input_entity(-123456789))
entity (`str` | `int` | :tl:`Peer` | :tl:`InputPeer`):
If an username is given, **the library will use the cache**. This
means that it's possible to be using an username that *changed*.
If the username is not found in the cache, it will be fetched.
The same rules apply to phone numbers (``'+34 123456789'``).
If an exact name is given, it must be in the cache too. This
is not reliable as different people can share the same name
and which entity is returned is arbitrary, and should be used
only for quick tests.
If a positive integer ID is given, the entity will be searched
in cached users, chats or channels, without making any call.
If a negative integer ID is given, the entity will be searched
exactly as either a chat (prefixed with ``-``) or as a channel
(prefixed with ``-100``).
If a :tl:`Peer` is given, it will be searched exactly in the
cache as either an user, chat or channel.
If the given object can be turned into an input entity directly,
said operation will be done.
Invite links make an API call **always** and are expensive.
You should use the chat ID instead.
Unsupported types will raise ``TypeError``.
If the entity can't be found, ``ValueError`` will be raised.
Returns:
:tl:`InputPeerUser`, :tl:`InputPeerChat` or :tl:`InputPeerChannel`
or :tl:`InputPeerSelf` if the parameter is ``'me'`` or ``'self'``.
If you need to get the ID of yourself, you should use
`get_me` with ``input_peer=True``) instead.
"""
if peer in ('me', 'self'):
return types.InputPeerSelf()
try:
# First try to get the entity from cache, otherwise figure it out
return self.session.get_input_entity(peer)
except ValueError:
pass
if isinstance(peer, str):
return utils.get_input_peer(
await self._get_entity_from_string(peer))
if not isinstance(peer, int) and (not isinstance(peer, TLObject)
or peer.SUBCLASS_OF_ID != 0x2d45687):
# Try casting the object into an input peer. Might TypeError.
# Don't do it if a not-found ID was given (instead ValueError).
# Also ignore Peer (0x2d45687 == crc32(b'Peer'))'s, lacking hash.
return utils.get_input_peer(peer)
raise ValueError(
'Could not find the input entity for "{}". Please read https://'
'telethon.readthedocs.io/en/latest/extra/basic/entities.html to'
' find out more details.'
.format(peer)
)
async def get_peer_id(self, peer, add_mark=True):
"""
Gets the ID for the given peer, which may be anything entity-like.
This method needs to be ``async`` because `peer` supports usernames,
invite-links, phone numbers, etc.
If ``add_mark is False``, then a positive ID will be returned
instead. By default, bot-API style IDs (signed) are returned.
"""
if isinstance(peer, int):
return utils.get_peer_id(peer, add_mark=add_mark)
try:
if peer.SUBCLASS_OF_ID in (0x2d45687, 0xc91c90b6):
# 0x2d45687, 0xc91c90b6 == crc32(b'Peer') and b'InputPeer'
return utils.get_peer_id(peer)
except AttributeError:
pass
peer = await self.get_input_entity(peer)
if isinstance(peer, types.InputPeerSelf):
peer = await self.get_me(input_peer=True)
return utils.get_peer_id(peer, add_mark=add_mark)
# endregion
# region Private methods
async def _get_entity_from_string(self, string):
"""
Gets a full entity from the given string, which may be a phone or
an username, and processes all the found entities on the session.
The string may also be a user link, or a channel/chat invite link.
This method has the side effect of adding the found users to the
session database, so it can be queried later without API calls,
if this option is enabled on the session.
Returns the found entity, or raises TypeError if not found.
"""
phone = utils.parse_phone(string)
if phone:
for user in (await self(
functions.contacts.GetContactsRequest(0))).users:
if user.phone == phone:
return user
else:
username, is_join_chat = utils.parse_username(string)
if is_join_chat:
invite = await self(
functions.messages.CheckChatInviteRequest(username))
if isinstance(invite, types.ChatInvite):
raise ValueError(
'Cannot get entity from a channel (or group) '
'that you are not part of. Join the group and retry'
)
elif isinstance(invite, types.ChatInviteAlready):
return invite.chat
elif username:
if username in ('me', 'self'):
return await self.get_me()
try:
result = await self(
functions.contacts.ResolveUsernameRequest(username))
except errors.UsernameNotOccupiedError as e:
raise ValueError('No user has "{}" as username'
.format(username)) from e
for entity in itertools.chain(result.users, result.chats):
if getattr(entity, 'username', None) or '' \
.lower() == username:
return entity
try:
# Nobody with this username, maybe it's an exact name/title
return await self.get_entity(
self.session.get_input_entity(string))
except ValueError:
pass
raise ValueError(
'Cannot find any entity corresponding to "{}"'.format(string)
)
async def _get_input_notify(self, notify):
"""
Returns a :tl:`InputNotifyPeer`. This is a bit tricky because
it may or not need access to the client to convert what's given
into an input entity.
"""
try:
if notify.SUBCLASS_OF_ID == 0x58981615:
if isinstance(notify, types.InputNotifyPeer):
notify.peer = await self.get_input_entity(notify.peer)
return notify
except AttributeError:
return types.InputNotifyPeer(await self.get_input_entity(notify))
# endregion
| 41.042289 | 80 | 0.576217 | import asyncio
import itertools
import logging
import time
from .telegrambaseclient import TelegramBaseClient
from .. import errors, utils
from ..tl import TLObject, TLRequest, types, functions
__log__ = logging.getLogger(__name__)
_NOT_A_REQUEST = TypeError('You can only invoke requests, not types!')
class UserMethods(TelegramBaseClient):
async def __call__(self, request, ordered=False):
for r in (request if utils.is_list_like(request) else (request,)):
if not isinstance(r, TLRequest):
raise _NOT_A_REQUEST
await r.resolve(self, utils)
if r.CONSTRUCTOR_ID in self._flood_waited_requests:
due = self._flood_waited_requests[r.CONSTRUCTOR_ID]
diff = round(due - time.time())
if diff <= 3: # Flood waits below 3 seconds are "ignored"
self._flood_waited_requests.pop(r.CONSTRUCTOR_ID, None)
elif diff <= self.flood_sleep_threshold:
__log__.info('Sleeping early for %ds on flood wait', diff)
await asyncio.sleep(diff, loop=self._loop)
self._flood_waited_requests.pop(r.CONSTRUCTOR_ID, None)
else:
raise errors.FloodWaitError(capture=diff)
request_index = 0
self._last_request = time.time()
for _ in range(self._request_retries):
try:
future = self._sender.send(request, ordered=ordered)
if isinstance(future, list):
results = []
for f in future:
result = await f
self.session.process_entities(result)
results.append(result)
request_index += 1
return results
else:
result = await future
self.session.process_entities(result)
return result
except (errors.ServerError, errors.RpcCallFailError) as e:
__log__.warning('Telegram is having internal issues %s: %s',
e.__class__.__name__, e)
except (errors.FloodWaitError, errors.FloodTestPhoneWaitError) as e:
if utils.is_list_like(request):
request = request[request_index]
self._flood_waited_requests\
[request.CONSTRUCTOR_ID] = time.time() + e.seconds
if e.seconds <= self.flood_sleep_threshold:
__log__.info('Sleeping for %ds on flood wait', e.seconds)
await asyncio.sleep(e.seconds, loop=self._loop)
else:
raise
except (errors.PhoneMigrateError, errors.NetworkMigrateError,
errors.UserMigrateError) as e:
__log__.info('Phone migrated to %d', e.new_dc)
should_raise = isinstance(e, (
errors.PhoneMigrateError, errors.NetworkMigrateError
))
if should_raise and await self.is_user_authorized():
raise
await self._switch_dc(e.new_dc)
raise ValueError('Number of retries reached 0')
# region Public methods
async def get_me(self, input_peer=False):
if input_peer and self._self_input_peer:
return self._self_input_peer
try:
me = (await self(
functions.users.GetUsersRequest([types.InputUserSelf()])))[0]
if not self._self_input_peer:
self._self_input_peer = utils.get_input_peer(
me, allow_self=False
)
return self._self_input_peer if input_peer else me
except errors.UnauthorizedError:
return None
async def is_user_authorized(self):
if self._self_input_peer is not None or self._state.pts != -1:
return True
try:
self._state = await self(functions.updates.GetStateRequest())
return True
except errors.RPCError:
return False
async def get_entity(self, entity):
single = not utils.is_list_like(entity)
if single:
entity = (entity,)
# Group input entities by string (resolve username),
# input users (get users), input chat (get chats) and
# input channels (get channels) to get the most entities
# in the less amount of calls possible.
inputs = []
for x in entity:
if isinstance(x, str):
inputs.append(x)
else:
inputs.append(await self.get_input_entity(x))
users = [x for x in inputs
if isinstance(x, (types.InputPeerUser, types.InputPeerSelf))]
chats = [x.chat_id for x in inputs
if isinstance(x, types.InputPeerChat)]
channels = [x for x in inputs
if isinstance(x, types.InputPeerChannel)]
if users:
# GetUsersRequest has a limit of 200 per call
tmp = []
while users:
curr, users = users[:200], users[200:]
tmp.extend(await self(functions.users.GetUsersRequest(curr)))
users = tmp
if chats: # TODO Handle chats slice?
chats = (await self(
functions.messages.GetChatsRequest(chats))).chats
if channels:
channels = (await self(
functions.channels.GetChannelsRequest(channels))).chats
# Merge users, chats and channels into a single dictionary
id_entity = {
utils.get_peer_id(x): x
for x in itertools.chain(users, chats, channels)
}
# We could check saved usernames and put them into the users,
# chats and channels list from before. While this would reduce
# the amount of ResolveUsername calls, it would fail to catch
# username changes.
result = []
for x in inputs:
if isinstance(x, str):
result.append(await self._get_entity_from_string(x))
elif not isinstance(x, types.InputPeerSelf):
result.append(id_entity[utils.get_peer_id(x)])
else:
result.append(next(
u for u in id_entity.values()
if isinstance(u, types.User) and u.is_self
))
return result[0] if single else result
async def get_input_entity(self, peer):
if peer in ('me', 'self'):
return types.InputPeerSelf()
try:
# First try to get the entity from cache, otherwise figure it out
return self.session.get_input_entity(peer)
except ValueError:
pass
if isinstance(peer, str):
return utils.get_input_peer(
await self._get_entity_from_string(peer))
if not isinstance(peer, int) and (not isinstance(peer, TLObject)
or peer.SUBCLASS_OF_ID != 0x2d45687):
# Try casting the object into an input peer. Might TypeError.
# Don't do it if a not-found ID was given (instead ValueError).
return utils.get_input_peer(peer)
raise ValueError(
'Could not find the input entity for "{}". Please read https://'
'telethon.readthedocs.io/en/latest/extra/basic/entities.html to'
' find out more details.'
.format(peer)
)
async def get_peer_id(self, peer, add_mark=True):
if isinstance(peer, int):
return utils.get_peer_id(peer, add_mark=add_mark)
try:
if peer.SUBCLASS_OF_ID in (0x2d45687, 0xc91c90b6):
# 0x2d45687, 0xc91c90b6 == crc32(b'Peer') and b'InputPeer'
return utils.get_peer_id(peer)
except AttributeError:
pass
peer = await self.get_input_entity(peer)
if isinstance(peer, types.InputPeerSelf):
peer = await self.get_me(input_peer=True)
return utils.get_peer_id(peer, add_mark=add_mark)
# endregion
# region Private methods
async def _get_entity_from_string(self, string):
phone = utils.parse_phone(string)
if phone:
for user in (await self(
functions.contacts.GetContactsRequest(0))).users:
if user.phone == phone:
return user
else:
username, is_join_chat = utils.parse_username(string)
if is_join_chat:
invite = await self(
functions.messages.CheckChatInviteRequest(username))
if isinstance(invite, types.ChatInvite):
raise ValueError(
'Cannot get entity from a channel (or group) '
'that you are not part of. Join the group and retry'
)
elif isinstance(invite, types.ChatInviteAlready):
return invite.chat
elif username:
if username in ('me', 'self'):
return await self.get_me()
try:
result = await self(
functions.contacts.ResolveUsernameRequest(username))
except errors.UsernameNotOccupiedError as e:
raise ValueError('No user has "{}" as username'
.format(username)) from e
for entity in itertools.chain(result.users, result.chats):
if getattr(entity, 'username', None) or '' \
.lower() == username:
return entity
try:
# Nobody with this username, maybe it's an exact name/title
return await self.get_entity(
self.session.get_input_entity(string))
except ValueError:
pass
raise ValueError(
'Cannot find any entity corresponding to "{}"'.format(string)
)
async def _get_input_notify(self, notify):
try:
if notify.SUBCLASS_OF_ID == 0x58981615:
if isinstance(notify, types.InputNotifyPeer):
notify.peer = await self.get_input_entity(notify.peer)
return notify
except AttributeError:
return types.InputNotifyPeer(await self.get_input_entity(notify))
| true | true |
f72ead7d4b9e3e317119cf83ac6cb3d4a19f18ec | 340 | py | Python | examples/resetting_errors.py | MK8J/dobot-python | 3ce4a2a5d6e9ae5ab6d42546eab0228419e82e8e | [
"MIT"
] | null | null | null | examples/resetting_errors.py | MK8J/dobot-python | 3ce4a2a5d6e9ae5ab6d42546eab0228419e82e8e | [
"MIT"
] | null | null | null | examples/resetting_errors.py | MK8J/dobot-python | 3ce4a2a5d6e9ae5ab6d42546eab0228419e82e8e | [
"MIT"
] | null | null | null | '''
A script to try and reset from a stage error
without having to turn off the dobot
'''
import sys
import os
sys.path.append(os.path.abspath('..'))
import connecting
#connect
bot = connecting.connect()
bot.reset_pose(1,45,45) # 0 is false, will try to automatically reset?
bot.clear_alarms_state()
bot.serial.close()
#import homing
| 17 | 70 | 0.738235 | import sys
import os
sys.path.append(os.path.abspath('..'))
import connecting
bot = connecting.connect()
bot.reset_pose(1,45,45)
bot.clear_alarms_state()
bot.serial.close()
| true | true |
f72eadfc88eda868eba4d4b77c1b8a758b724676 | 18,817 | py | Python | src/docker-images/job-exporter/test/test_collector.py | jinlmsft/Apulis-AI-Platform | 2cf1fbb50e08b477940f5f336b1b897a49608b72 | [
"MIT"
] | 38 | 2020-07-13T08:46:39.000Z | 2021-02-08T01:38:44.000Z | src/docker-images/job-exporter/test/test_collector.py | debbie-alaine/DLWorkspace | 2888042c0f9388f911bc74fe5ecd20ef3fabd715 | [
"MIT"
] | null | null | null | src/docker-images/job-exporter/test/test_collector.py | debbie-alaine/DLWorkspace | 2888042c0f9388f911bc74fe5ecd20ef3fabd715 | [
"MIT"
] | 20 | 2020-07-14T03:38:50.000Z | 2021-01-08T06:24:17.000Z | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import unittest
import datetime
import time
import logging
import base
sys.path.append(os.path.abspath("../src/"))
import collector
import nvidia
import docker_inspect
from collector import ContainerCollector
from collector import GpuCollector
logger = logging.getLogger(__name__)
class TestContainerCollector(base.TestBase):
"""
Test ContainerCollector in collecotr.py
"""
def test_parse_from_labels(self):
inspect_result = docker_inspect.InspectResult(
"openmindstudio",
"trialslot_nnimain_d65bc5ac",
"tuner",
"0",
"this_is_pod_name_val",
"0,1,",
12345,
"dixu@example.com",
"platform",
False,
)
gpu_ids, labels = ContainerCollector.parse_from_labels(
inspect_result, None)
self.assertEqual(["0", "1"], gpu_ids)
target_labels = {
"username": "openmindstudio",
"job_name": "trialslot_nnimain_d65bc5ac",
"role_name": "tuner",
"task_index": "0",
"pod_name": "this_is_pod_name_val",
"user_email": "dixu@example.com",
"vc_name": "platform",
}
self.assertEqual(target_labels, labels)
def test_infer_service_name(self):
self.assertIsNone(
ContainerCollector.infer_service_name(
"k8s_POD_alertmanager-7884c59f78-66r86_default_0a32e30a-f6ae-11e8"
))
self.assertEqual(
"alertmanager",
ContainerCollector.infer_service_name(
"k8s_alertmanager_alertmanager-7884c59f78-66r86_default_0a32e30a-f6ae-11e8-a62d-000d3ab25bb6_2"
))
self.assertIsNone(
ContainerCollector.infer_service_name(
"k8s_kube-scheduler_kube-scheduler-10.151.40.4_kube-system_f1164d931979939cf0601155df9c748a_6"
))
class TestDockerCollector(base.TestBase):
"""
Test DockerCollector in collector.py
"""
def assert_metrics(self, metrics):
self.assertEqual(1, len(metrics))
self.assertEqual(1, len(metrics[0].samples))
sample = metrics[0].samples[0]
self.assertEqual(1, len(sample[1])) # label keys
self.assertEqual(1, sample[2]) # sample value
def test_impl(self):
_, c = collector.instantiate_collector("test_docker_collector1", 0.5,
datetime.timedelta(seconds=1),
collector.DockerCollector)
self.assert_metrics(c.collect_impl())
def test_base_collector(self):
""" actually setup DockerCollector thread, and test, since this is multi-thread
test case, maybe sensitive to the system load """
ref = collector.make_collector("test_docker_collector2", 0.5,
datetime.timedelta(seconds=10),
collector.DockerCollector)
metrics = None
for i in range(20):
metrics = ref.get(datetime.datetime.now())
if metrics is not None:
break
time.sleep(0.1)
self.assert_metrics(metrics)
class TestZombieCollector(base.TestBase):
"""
Test ZombieCollector in collector.py
"""
def setUp(self):
# Because prometheus forbid same metric name, and we generate metric
# in from name, we need to differentiate name using time.
t = str(time.time()).replace(".", "_")
decay_time = datetime.timedelta(seconds=1)
_, self.collector = collector.instantiate_collector(
"test_zombie_collector" + t, 0.5, decay_time,
collector.ZombieCollector, collector.AtomicRef(decay_time),
collector.AtomicRef(decay_time))
def test_update_zombie_count_type1(self):
start = datetime.datetime.now()
one_sec = datetime.timedelta(seconds=1)
type1_recorder = self.collector.type1_zombies
self.assertEqual(
set(), self.collector.update_zombie_count_type1({"a", "b"}, start))
self.assertEqual(2, len(type1_recorder))
self.assertEqual(
set(),
self.collector.update_zombie_count_type1(
{"a", "b"}, start + type1_recorder.decay_time - one_sec))
self.assertEqual(2, len(type1_recorder))
self.assertEqual({"a", "b"},
self.collector.update_zombie_count_type1(
{"a", "b"},
start + type1_recorder.decay_time + one_sec))
self.assertEqual(2, len(type1_recorder))
self.assertEqual({"a"},
self.collector.update_zombie_count_type1(
{"a"},
start + type1_recorder.decay_time + 2 * one_sec))
self.assertEqual(1, len(type1_recorder))
self.assertEqual(
set(),
self.collector.update_zombie_count_type1(
{}, start + type1_recorder.decay_time + 3 * one_sec))
self.assertEqual(0, len(type1_recorder))
def test_update_zombie_count_type2(self):
start = datetime.datetime.now()
one_sec = datetime.timedelta(seconds=1)
stats = {
"43ffe701d883": {
"name":
"core-caffe2_resnet50_20181012040921.586-container_e03_1539312078880_0780_01_000002",
"id":
"43ffe701d883"
},
"8de2f53e64cb": {
"name": "container_e03_1539312078880_0780_01_000002",
"id": "8de2f53e64cb"
}
}
type2_recorder = self.collector.type2_zombies
self.assertEqual(set(),
self.collector.update_zombie_count_type2(stats, start))
stats.pop("8de2f53e64cb")
self.assertEqual(
set(),
self.collector.update_zombie_count_type2(stats, start + one_sec))
self.assertEqual(
set(),
self.collector.update_zombie_count_type2(
stats, start + type2_recorder.decay_time))
self.assertEqual({"43ffe701d883"},
self.collector.update_zombie_count_type2(
stats,
start + type2_recorder.decay_time + 2 * one_sec))
stats.pop("43ffe701d883")
self.assertEqual(
set(),
self.collector.update_zombie_count_type2(
stats, start + type2_recorder.decay_time + 3 * one_sec))
class TestGpuCollector(base.TestBase):
"""
Test GpuCollector in collecotr.py
"""
def make_pid_to_cid_fn(self, mapping):
def fn(pid):
if pid in mapping:
return True, mapping[pid]
return False, ""
return fn
def test_convert_to_metrics(self):
# sample may not ordered, and can not assertEqual directly, so tear them apart
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(20, 21, [22, 33, 44], nvidia.EccError(), "0",
"GPU-uuid0", 37.0)
])
zombie_info = {"abc", "def"}
pid_to_cid_mapping = {33: "def", 22: "ghi"} # only 33 is zombie
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_core_utils = collector.gen_gpu_util_gauge()
target_core_utils.add_metric(["0", "GPU-uuid0"], 20)
self.assertEqual(target_core_utils, core_utils)
target_mem_utils = collector.gen_gpu_mem_util_gauge()
target_mem_utils.add_metric(["0", "GPU-uuid0"], 21)
self.assertEqual(target_mem_utils, mem_utils)
target_ecc_errors = collector.gen_gpu_ecc_counter()
target_ecc_errors.add_metric(["0", "GPU-uuid0", "volatile_single"], 0)
target_ecc_errors.add_metric(["0", "GPU-uuid0", "volatile_double"], 0)
target_ecc_errors.add_metric(["0", "GPU-uuid0", "aggregated_single"], 0)
target_ecc_errors.add_metric(["0", "GPU-uuid0", "aggregated_double"], 0)
self.assertEqual(target_ecc_errors, ecc_errors)
target_mem_leak = collector.gen_gpu_memory_leak_counter()
self.assertEqual(target_mem_leak, mem_leak)
target_external_process = collector.gen_gpu_used_by_external_process_counter(
)
target_external_process.add_metric(["0", "44"], 1)
self.assertEqual(target_external_process, external_process)
target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter(
)
target_zombie_container.add_metric(["0", "def"], 1)
self.assertEqual(target_zombie_container, zombie_container)
target_gpu_temp = collector.gen_gpu_temperature_gauge()
target_gpu_temp.add_metric(["0", "GPU-uuid0"], 37.0)
self.assertEqual(target_gpu_temp, gpu_temp)
# test minor 1
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(
30, 31, [55, 123],
nvidia.EccError(volatile_single=2,
volatile_double=3,
aggregated_single=4,
aggregated_double=5), "1", "GPU-uuid1", 24.0)
])
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_core_utils = collector.gen_gpu_util_gauge()
target_core_utils.add_metric(["1", "GPU-uuid1"], 30)
self.assertEqual(target_core_utils, core_utils)
target_mem_utils = collector.gen_gpu_mem_util_gauge()
target_mem_utils.add_metric(["1", "GPU-uuid1"], 31)
self.assertEqual(target_mem_utils, mem_utils)
target_ecc_errors = collector.gen_gpu_ecc_counter()
target_ecc_errors.add_metric(["1", "GPU-uuid1", "volatile_single"], 2)
target_ecc_errors.add_metric(["1", "GPU-uuid1", "volatile_double"], 3)
target_ecc_errors.add_metric(["1", "GPU-uuid1", "aggregated_single"], 4)
target_ecc_errors.add_metric(["1", "GPU-uuid1", "aggregated_double"], 5)
self.assertEqual(target_ecc_errors, ecc_errors)
target_mem_leak = collector.gen_gpu_memory_leak_counter()
self.assertEqual(target_mem_leak, mem_leak)
target_external_process = collector.gen_gpu_used_by_external_process_counter(
)
target_external_process.add_metric(["1", "55"], 1)
target_external_process.add_metric(["1", "123"], 1)
self.assertEqual(target_external_process, external_process)
target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter(
)
self.assertEqual(target_zombie_container, zombie_container)
target_gpu_temp = collector.gen_gpu_temperature_gauge()
target_gpu_temp.add_metric(["1", "GPU-uuid1"], 24.0)
self.assertEqual(target_gpu_temp, gpu_temp)
# test minor 2
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(40, 20 * 1024 * 1024, [], nvidia.EccError(),
"2", "GPU-uuid2", 30.0)
])
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_core_utils = collector.gen_gpu_util_gauge()
target_core_utils.add_metric(["2", "GPU-uuid2"], 40)
self.assertEqual(target_core_utils, core_utils)
target_mem_utils = collector.gen_gpu_mem_util_gauge()
target_mem_utils.add_metric(["2", "GPU-uuid2"], 20 * 1024 * 1024)
self.assertEqual(target_mem_utils, mem_utils)
target_ecc_errors = collector.gen_gpu_ecc_counter()
target_ecc_errors.add_metric(["2", "GPU-uuid2", "volatile_single"], 0)
target_ecc_errors.add_metric(["2", "GPU-uuid2", "volatile_double"], 0)
target_ecc_errors.add_metric(["2", "GPU-uuid2", "aggregated_single"], 0)
target_ecc_errors.add_metric(["2", "GPU-uuid2", "aggregated_double"], 0)
self.assertEqual(target_ecc_errors, ecc_errors)
target_mem_leak = collector.gen_gpu_memory_leak_counter()
self.assertEqual(target_mem_leak, mem_leak)
target_external_process = collector.gen_gpu_used_by_external_process_counter(
)
self.assertEqual(target_external_process, external_process)
target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter(
)
self.assertEqual(target_zombie_container, zombie_container)
target_gpu_temp = collector.gen_gpu_temperature_gauge()
target_gpu_temp.add_metric(["2", "GPU-uuid2"], 30.0)
self.assertEqual(target_gpu_temp, gpu_temp)
# test memory leak
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(40, 20 * 1024 * 1024 + 1, [],
nvidia.EccError(), "3", "GPU-uuid3", 30.0)
])
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_mem_leak = collector.gen_gpu_memory_leak_counter()
target_mem_leak.add_metric(["3", "GPU-uuid3"], 1)
self.assertEqual(target_mem_leak, mem_leak)
def test_convert_to_metrics_with_no_zombie_info_BUGFIX(self):
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(20, 21, [22, 33, 44], nvidia.EccError(), "0",
"GPU-uuid0", 40.0)
])
# zombie_info is empty should also have external process metric
zombie_info = []
pid_to_cid_mapping = {
33: "def",
22: "ghi"
} # only 44 is external process
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
self.assertEqual(0, len(zombie_container.samples))
self.assertEqual(1, len(external_process.samples))
self.assertEqual("0",
external_process.samples[0].labels["minor_number"])
self.assertEqual("44", external_process.samples[0].labels["pid"])
# zombie_info is None should also have external process metric
zombie_info = None
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
self.assertEqual(0, len(zombie_container.samples))
self.assertEqual(1, len(external_process.samples))
self.assertEqual("0",
external_process.samples[0].labels["minor_number"])
self.assertEqual("44", external_process.samples[0].labels["pid"])
def test_convert_to_metrics_with_real_id_BUGFIX(self):
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(20, 21, [22], nvidia.EccError(), "0",
"GPU-uuid0", 50.0)
])
# zombie_info is empty should also have external process metric
zombie_info = {"ce5de12d6275"}
pid_to_cid_mapping = {
22:
"ce5de12d6275dc05c9ec5b7f58484f075f4775d8f54f6a4be3dc1439344df356"
}
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
self.assertEqual(1, len(zombie_container.samples))
self.assertEqual("0",
zombie_container.samples[0].labels["minor_number"])
self.assertEqual("ce5de12d6275",
zombie_container.samples[0].labels["container_id"])
class TestAtomicRef(base.TestBase):
"""
Test AtomicRef in collecotr.py
"""
def test_expiration(self):
ref = collector.AtomicRef(datetime.timedelta(seconds=10))
now = datetime.datetime.now()
delta = datetime.timedelta(seconds=1)
ref.set(1, now)
self.assertEquals(1, ref.get(now))
self.assertEquals(1, ref.get(now - delta))
self.assertEquals(1, ref.get(now + delta))
self.assertEquals(1, ref.get(now + delta * 10))
self.assertEquals(None, ref.get(now + delta * 11))
self.assertEquals(1, ref.get(now + delta * 10))
ref.set(2, now + delta)
self.assertEquals(2, ref.get(now))
self.assertEquals(2, ref.get(now + delta * 10))
self.assertEquals(2, ref.get(now + delta * 11))
self.assertEquals(None, ref.get(now + delta * 12))
if __name__ == '__main__':
unittest.main()
| 38.718107 | 128 | 0.634905 |
import os
import sys
import unittest
import datetime
import time
import logging
import base
sys.path.append(os.path.abspath("../src/"))
import collector
import nvidia
import docker_inspect
from collector import ContainerCollector
from collector import GpuCollector
logger = logging.getLogger(__name__)
class TestContainerCollector(base.TestBase):
def test_parse_from_labels(self):
inspect_result = docker_inspect.InspectResult(
"openmindstudio",
"trialslot_nnimain_d65bc5ac",
"tuner",
"0",
"this_is_pod_name_val",
"0,1,",
12345,
"dixu@example.com",
"platform",
False,
)
gpu_ids, labels = ContainerCollector.parse_from_labels(
inspect_result, None)
self.assertEqual(["0", "1"], gpu_ids)
target_labels = {
"username": "openmindstudio",
"job_name": "trialslot_nnimain_d65bc5ac",
"role_name": "tuner",
"task_index": "0",
"pod_name": "this_is_pod_name_val",
"user_email": "dixu@example.com",
"vc_name": "platform",
}
self.assertEqual(target_labels, labels)
def test_infer_service_name(self):
self.assertIsNone(
ContainerCollector.infer_service_name(
"k8s_POD_alertmanager-7884c59f78-66r86_default_0a32e30a-f6ae-11e8"
))
self.assertEqual(
"alertmanager",
ContainerCollector.infer_service_name(
"k8s_alertmanager_alertmanager-7884c59f78-66r86_default_0a32e30a-f6ae-11e8-a62d-000d3ab25bb6_2"
))
self.assertIsNone(
ContainerCollector.infer_service_name(
"k8s_kube-scheduler_kube-scheduler-10.151.40.4_kube-system_f1164d931979939cf0601155df9c748a_6"
))
class TestDockerCollector(base.TestBase):
def assert_metrics(self, metrics):
self.assertEqual(1, len(metrics))
self.assertEqual(1, len(metrics[0].samples))
sample = metrics[0].samples[0]
self.assertEqual(1, len(sample[1]))
self.assertEqual(1, sample[2])
def test_impl(self):
_, c = collector.instantiate_collector("test_docker_collector1", 0.5,
datetime.timedelta(seconds=1),
collector.DockerCollector)
self.assert_metrics(c.collect_impl())
def test_base_collector(self):
ref = collector.make_collector("test_docker_collector2", 0.5,
datetime.timedelta(seconds=10),
collector.DockerCollector)
metrics = None
for i in range(20):
metrics = ref.get(datetime.datetime.now())
if metrics is not None:
break
time.sleep(0.1)
self.assert_metrics(metrics)
class TestZombieCollector(base.TestBase):
def setUp(self):
t = str(time.time()).replace(".", "_")
decay_time = datetime.timedelta(seconds=1)
_, self.collector = collector.instantiate_collector(
"test_zombie_collector" + t, 0.5, decay_time,
collector.ZombieCollector, collector.AtomicRef(decay_time),
collector.AtomicRef(decay_time))
def test_update_zombie_count_type1(self):
start = datetime.datetime.now()
one_sec = datetime.timedelta(seconds=1)
type1_recorder = self.collector.type1_zombies
self.assertEqual(
set(), self.collector.update_zombie_count_type1({"a", "b"}, start))
self.assertEqual(2, len(type1_recorder))
self.assertEqual(
set(),
self.collector.update_zombie_count_type1(
{"a", "b"}, start + type1_recorder.decay_time - one_sec))
self.assertEqual(2, len(type1_recorder))
self.assertEqual({"a", "b"},
self.collector.update_zombie_count_type1(
{"a", "b"},
start + type1_recorder.decay_time + one_sec))
self.assertEqual(2, len(type1_recorder))
self.assertEqual({"a"},
self.collector.update_zombie_count_type1(
{"a"},
start + type1_recorder.decay_time + 2 * one_sec))
self.assertEqual(1, len(type1_recorder))
self.assertEqual(
set(),
self.collector.update_zombie_count_type1(
{}, start + type1_recorder.decay_time + 3 * one_sec))
self.assertEqual(0, len(type1_recorder))
def test_update_zombie_count_type2(self):
start = datetime.datetime.now()
one_sec = datetime.timedelta(seconds=1)
stats = {
"43ffe701d883": {
"name":
"core-caffe2_resnet50_20181012040921.586-container_e03_1539312078880_0780_01_000002",
"id":
"43ffe701d883"
},
"8de2f53e64cb": {
"name": "container_e03_1539312078880_0780_01_000002",
"id": "8de2f53e64cb"
}
}
type2_recorder = self.collector.type2_zombies
self.assertEqual(set(),
self.collector.update_zombie_count_type2(stats, start))
stats.pop("8de2f53e64cb")
self.assertEqual(
set(),
self.collector.update_zombie_count_type2(stats, start + one_sec))
self.assertEqual(
set(),
self.collector.update_zombie_count_type2(
stats, start + type2_recorder.decay_time))
self.assertEqual({"43ffe701d883"},
self.collector.update_zombie_count_type2(
stats,
start + type2_recorder.decay_time + 2 * one_sec))
stats.pop("43ffe701d883")
self.assertEqual(
set(),
self.collector.update_zombie_count_type2(
stats, start + type2_recorder.decay_time + 3 * one_sec))
class TestGpuCollector(base.TestBase):
def make_pid_to_cid_fn(self, mapping):
def fn(pid):
if pid in mapping:
return True, mapping[pid]
return False, ""
return fn
def test_convert_to_metrics(self):
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(20, 21, [22, 33, 44], nvidia.EccError(), "0",
"GPU-uuid0", 37.0)
])
zombie_info = {"abc", "def"}
pid_to_cid_mapping = {33: "def", 22: "ghi"}
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_core_utils = collector.gen_gpu_util_gauge()
target_core_utils.add_metric(["0", "GPU-uuid0"], 20)
self.assertEqual(target_core_utils, core_utils)
target_mem_utils = collector.gen_gpu_mem_util_gauge()
target_mem_utils.add_metric(["0", "GPU-uuid0"], 21)
self.assertEqual(target_mem_utils, mem_utils)
target_ecc_errors = collector.gen_gpu_ecc_counter()
target_ecc_errors.add_metric(["0", "GPU-uuid0", "volatile_single"], 0)
target_ecc_errors.add_metric(["0", "GPU-uuid0", "volatile_double"], 0)
target_ecc_errors.add_metric(["0", "GPU-uuid0", "aggregated_single"], 0)
target_ecc_errors.add_metric(["0", "GPU-uuid0", "aggregated_double"], 0)
self.assertEqual(target_ecc_errors, ecc_errors)
target_mem_leak = collector.gen_gpu_memory_leak_counter()
self.assertEqual(target_mem_leak, mem_leak)
target_external_process = collector.gen_gpu_used_by_external_process_counter(
)
target_external_process.add_metric(["0", "44"], 1)
self.assertEqual(target_external_process, external_process)
target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter(
)
target_zombie_container.add_metric(["0", "def"], 1)
self.assertEqual(target_zombie_container, zombie_container)
target_gpu_temp = collector.gen_gpu_temperature_gauge()
target_gpu_temp.add_metric(["0", "GPU-uuid0"], 37.0)
self.assertEqual(target_gpu_temp, gpu_temp)
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(
30, 31, [55, 123],
nvidia.EccError(volatile_single=2,
volatile_double=3,
aggregated_single=4,
aggregated_double=5), "1", "GPU-uuid1", 24.0)
])
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_core_utils = collector.gen_gpu_util_gauge()
target_core_utils.add_metric(["1", "GPU-uuid1"], 30)
self.assertEqual(target_core_utils, core_utils)
target_mem_utils = collector.gen_gpu_mem_util_gauge()
target_mem_utils.add_metric(["1", "GPU-uuid1"], 31)
self.assertEqual(target_mem_utils, mem_utils)
target_ecc_errors = collector.gen_gpu_ecc_counter()
target_ecc_errors.add_metric(["1", "GPU-uuid1", "volatile_single"], 2)
target_ecc_errors.add_metric(["1", "GPU-uuid1", "volatile_double"], 3)
target_ecc_errors.add_metric(["1", "GPU-uuid1", "aggregated_single"], 4)
target_ecc_errors.add_metric(["1", "GPU-uuid1", "aggregated_double"], 5)
self.assertEqual(target_ecc_errors, ecc_errors)
target_mem_leak = collector.gen_gpu_memory_leak_counter()
self.assertEqual(target_mem_leak, mem_leak)
target_external_process = collector.gen_gpu_used_by_external_process_counter(
)
target_external_process.add_metric(["1", "55"], 1)
target_external_process.add_metric(["1", "123"], 1)
self.assertEqual(target_external_process, external_process)
target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter(
)
self.assertEqual(target_zombie_container, zombie_container)
target_gpu_temp = collector.gen_gpu_temperature_gauge()
target_gpu_temp.add_metric(["1", "GPU-uuid1"], 24.0)
self.assertEqual(target_gpu_temp, gpu_temp)
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(40, 20 * 1024 * 1024, [], nvidia.EccError(),
"2", "GPU-uuid2", 30.0)
])
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_core_utils = collector.gen_gpu_util_gauge()
target_core_utils.add_metric(["2", "GPU-uuid2"], 40)
self.assertEqual(target_core_utils, core_utils)
target_mem_utils = collector.gen_gpu_mem_util_gauge()
target_mem_utils.add_metric(["2", "GPU-uuid2"], 20 * 1024 * 1024)
self.assertEqual(target_mem_utils, mem_utils)
target_ecc_errors = collector.gen_gpu_ecc_counter()
target_ecc_errors.add_metric(["2", "GPU-uuid2", "volatile_single"], 0)
target_ecc_errors.add_metric(["2", "GPU-uuid2", "volatile_double"], 0)
target_ecc_errors.add_metric(["2", "GPU-uuid2", "aggregated_single"], 0)
target_ecc_errors.add_metric(["2", "GPU-uuid2", "aggregated_double"], 0)
self.assertEqual(target_ecc_errors, ecc_errors)
target_mem_leak = collector.gen_gpu_memory_leak_counter()
self.assertEqual(target_mem_leak, mem_leak)
target_external_process = collector.gen_gpu_used_by_external_process_counter(
)
self.assertEqual(target_external_process, external_process)
target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter(
)
self.assertEqual(target_zombie_container, zombie_container)
target_gpu_temp = collector.gen_gpu_temperature_gauge()
target_gpu_temp.add_metric(["2", "GPU-uuid2"], 30.0)
self.assertEqual(target_gpu_temp, gpu_temp)
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(40, 20 * 1024 * 1024 + 1, [],
nvidia.EccError(), "3", "GPU-uuid3", 30.0)
])
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_mem_leak = collector.gen_gpu_memory_leak_counter()
target_mem_leak.add_metric(["3", "GPU-uuid3"], 1)
self.assertEqual(target_mem_leak, mem_leak)
def test_convert_to_metrics_with_no_zombie_info_BUGFIX(self):
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(20, 21, [22, 33, 44], nvidia.EccError(), "0",
"GPU-uuid0", 40.0)
])
zombie_info = []
pid_to_cid_mapping = {
33: "def",
22: "ghi"
}
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
self.assertEqual(0, len(zombie_container.samples))
self.assertEqual(1, len(external_process.samples))
self.assertEqual("0",
external_process.samples[0].labels["minor_number"])
self.assertEqual("44", external_process.samples[0].labels["pid"])
zombie_info = None
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
self.assertEqual(0, len(zombie_container.samples))
self.assertEqual(1, len(external_process.samples))
self.assertEqual("0",
external_process.samples[0].labels["minor_number"])
self.assertEqual("44", external_process.samples[0].labels["pid"])
def test_convert_to_metrics_with_real_id_BUGFIX(self):
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(20, 21, [22], nvidia.EccError(), "0",
"GPU-uuid0", 50.0)
])
zombie_info = {"ce5de12d6275"}
pid_to_cid_mapping = {
22:
"ce5de12d6275dc05c9ec5b7f58484f075f4775d8f54f6a4be3dc1439344df356"
}
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
self.assertEqual(1, len(zombie_container.samples))
self.assertEqual("0",
zombie_container.samples[0].labels["minor_number"])
self.assertEqual("ce5de12d6275",
zombie_container.samples[0].labels["container_id"])
class TestAtomicRef(base.TestBase):
def test_expiration(self):
ref = collector.AtomicRef(datetime.timedelta(seconds=10))
now = datetime.datetime.now()
delta = datetime.timedelta(seconds=1)
ref.set(1, now)
self.assertEquals(1, ref.get(now))
self.assertEquals(1, ref.get(now - delta))
self.assertEquals(1, ref.get(now + delta))
self.assertEquals(1, ref.get(now + delta * 10))
self.assertEquals(None, ref.get(now + delta * 11))
self.assertEquals(1, ref.get(now + delta * 10))
ref.set(2, now + delta)
self.assertEquals(2, ref.get(now))
self.assertEquals(2, ref.get(now + delta * 10))
self.assertEquals(2, ref.get(now + delta * 11))
self.assertEquals(None, ref.get(now + delta * 12))
if __name__ == '__main__':
unittest.main()
| true | true |
f72eae12827e90588b406e44397f79f94ffd2658 | 5,224 | py | Python | spacy/lemmatizer.py | gandersen101/spaCy | 109849bd311490f17a29b320cb032e43d153f36f | [
"MIT"
] | null | null | null | spacy/lemmatizer.py | gandersen101/spaCy | 109849bd311490f17a29b320cb032e43d153f36f | [
"MIT"
] | null | null | null | spacy/lemmatizer.py | gandersen101/spaCy | 109849bd311490f17a29b320cb032e43d153f36f | [
"MIT"
] | null | null | null | # coding: utf8
from __future__ import unicode_literals
from collections import OrderedDict
from .symbols import NOUN, VERB, ADJ, PUNCT, PROPN
from .errors import Errors
from .lookups import Lookups
from .parts_of_speech import NAMES as UPOS_NAMES
class Lemmatizer(object):
"""
The Lemmatizer supports simple part-of-speech-sensitive suffix rules and
lookup tables.
DOCS: https://spacy.io/api/lemmatizer
"""
@classmethod
def load(cls, *args, **kwargs):
raise NotImplementedError(Errors.E172)
def __init__(self, lookups, *args, is_base_form=None, **kwargs):
"""Initialize a Lemmatizer.
lookups (Lookups): The lookups object containing the (optional) tables
"lemma_rules", "lemma_index", "lemma_exc" and "lemma_lookup".
RETURNS (Lemmatizer): The newly constructed object.
"""
if args or kwargs or not isinstance(lookups, Lookups):
raise ValueError(Errors.E173)
self.lookups = lookups
self.is_base_form = is_base_form
def __call__(self, string, univ_pos, morphology=None):
"""Lemmatize a string.
string (unicode): The string to lemmatize, e.g. the token text.
univ_pos (unicode / int): The token's universal part-of-speech tag.
morphology (dict): The token's morphological features following the
Universal Dependencies scheme.
RETURNS (list): The available lemmas for the string.
"""
lookup_table = self.lookups.get_table("lemma_lookup", {})
if "lemma_rules" not in self.lookups:
return [lookup_table.get(string, string)]
if isinstance(univ_pos, int):
univ_pos = UPOS_NAMES.get(univ_pos, "X")
univ_pos = univ_pos.lower()
if univ_pos in ("", "eol", "space"):
return [string.lower()]
# See Issue #435 for example of where this logic is requied.
if callable(self.is_base_form) and self.is_base_form(univ_pos, morphology):
return [string.lower()]
index_table = self.lookups.get_table("lemma_index", {})
exc_table = self.lookups.get_table("lemma_exc", {})
rules_table = self.lookups.get_table("lemma_rules", {})
if not any((index_table.get(univ_pos), exc_table.get(univ_pos), rules_table.get(univ_pos))):
if univ_pos == "propn":
return [string]
else:
return [string.lower()]
lemmas = self.lemmatize(
string,
index_table.get(univ_pos, {}),
exc_table.get(univ_pos, {}),
rules_table.get(univ_pos, []),
)
return lemmas
def noun(self, string, morphology=None):
return self(string, "noun", morphology)
def verb(self, string, morphology=None):
return self(string, "verb", morphology)
def adj(self, string, morphology=None):
return self(string, "adj", morphology)
def det(self, string, morphology=None):
return self(string, "det", morphology)
def pron(self, string, morphology=None):
return self(string, "pron", morphology)
def adp(self, string, morphology=None):
return self(string, "adp", morphology)
def num(self, string, morphology=None):
return self(string, "num", morphology)
def punct(self, string, morphology=None):
return self(string, "punct", morphology)
def lookup(self, string, orth=None):
"""Look up a lemma in the table, if available. If no lemma is found,
the original string is returned.
string (unicode): The original string.
orth (int): Optional hash of the string to look up. If not set, the
string will be used and hashed.
RETURNS (unicode): The lemma if the string was found, otherwise the
original string.
"""
lookup_table = self.lookups.get_table("lemma_lookup", {})
key = orth if orth is not None else string
if key in lookup_table:
return lookup_table[key]
return string
def lemmatize(self, string, index, exceptions, rules):
orig = string
string = string.lower()
forms = []
oov_forms = []
for old, new in rules:
if string.endswith(old):
form = string[: len(string) - len(old)] + new
if not form:
pass
elif form in index or not form.isalpha():
forms.append(form)
else:
oov_forms.append(form)
# Remove duplicates but preserve the ordering of applied "rules"
forms = list(OrderedDict.fromkeys(forms))
# Put exceptions at the front of the list, so they get priority.
# This is a dodgy heuristic -- but it's the best we can do until we get
# frequencies on this. We can at least prune out problematic exceptions,
# if they shadow more frequent analyses.
for form in exceptions.get(string, []):
if form not in forms:
forms.insert(0, form)
if not forms:
forms.extend(oov_forms)
if not forms:
forms.append(orig)
return forms
| 37.049645 | 100 | 0.612749 |
from __future__ import unicode_literals
from collections import OrderedDict
from .symbols import NOUN, VERB, ADJ, PUNCT, PROPN
from .errors import Errors
from .lookups import Lookups
from .parts_of_speech import NAMES as UPOS_NAMES
class Lemmatizer(object):
@classmethod
def load(cls, *args, **kwargs):
raise NotImplementedError(Errors.E172)
def __init__(self, lookups, *args, is_base_form=None, **kwargs):
if args or kwargs or not isinstance(lookups, Lookups):
raise ValueError(Errors.E173)
self.lookups = lookups
self.is_base_form = is_base_form
def __call__(self, string, univ_pos, morphology=None):
lookup_table = self.lookups.get_table("lemma_lookup", {})
if "lemma_rules" not in self.lookups:
return [lookup_table.get(string, string)]
if isinstance(univ_pos, int):
univ_pos = UPOS_NAMES.get(univ_pos, "X")
univ_pos = univ_pos.lower()
if univ_pos in ("", "eol", "space"):
return [string.lower()]
.is_base_form(univ_pos, morphology):
return [string.lower()]
index_table = self.lookups.get_table("lemma_index", {})
exc_table = self.lookups.get_table("lemma_exc", {})
rules_table = self.lookups.get_table("lemma_rules", {})
if not any((index_table.get(univ_pos), exc_table.get(univ_pos), rules_table.get(univ_pos))):
if univ_pos == "propn":
return [string]
else:
return [string.lower()]
lemmas = self.lemmatize(
string,
index_table.get(univ_pos, {}),
exc_table.get(univ_pos, {}),
rules_table.get(univ_pos, []),
)
return lemmas
def noun(self, string, morphology=None):
return self(string, "noun", morphology)
def verb(self, string, morphology=None):
return self(string, "verb", morphology)
def adj(self, string, morphology=None):
return self(string, "adj", morphology)
def det(self, string, morphology=None):
return self(string, "det", morphology)
def pron(self, string, morphology=None):
return self(string, "pron", morphology)
def adp(self, string, morphology=None):
return self(string, "adp", morphology)
def num(self, string, morphology=None):
return self(string, "num", morphology)
def punct(self, string, morphology=None):
return self(string, "punct", morphology)
def lookup(self, string, orth=None):
lookup_table = self.lookups.get_table("lemma_lookup", {})
key = orth if orth is not None else string
if key in lookup_table:
return lookup_table[key]
return string
def lemmatize(self, string, index, exceptions, rules):
orig = string
string = string.lower()
forms = []
oov_forms = []
for old, new in rules:
if string.endswith(old):
form = string[: len(string) - len(old)] + new
if not form:
pass
elif form in index or not form.isalpha():
forms.append(form)
else:
oov_forms.append(form)
forms = list(OrderedDict.fromkeys(forms))
# frequencies on this. We can at least prune out problematic exceptions,
# if they shadow more frequent analyses.
for form in exceptions.get(string, []):
if form not in forms:
forms.insert(0, form)
if not forms:
forms.extend(oov_forms)
if not forms:
forms.append(orig)
return forms
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.