code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import re # noqa: F401
import sys # noqa: F401
from rhoas_service_accounts_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_service_accounts_mgmt_sdk.exceptions import ApiAttributeError
def lazy_import():
from rhoas_service_accounts_mgmt_sdk.model.otp import Otp
globals()['Otp'] = Otp
class AuthenticationFactors(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'otp': (Otp,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'otp': 'otp', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""AuthenticationFactors - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
otp (Otp): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AuthenticationFactors - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
otp (Otp): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/service_accounts_mgmt_sdk/rhoas_service_accounts_mgmt_sdk/model/authentication_factors.py | 0.51562 | 0.213224 | authentication_factors.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_service_accounts_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_service_accounts_mgmt_sdk.exceptions import ApiAttributeError
class Error(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'error': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'error': 'error', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, error, *args, **kwargs): # noqa: E501
"""Error - a model defined in OpenAPI
Args:
error (str): The cause of the Error.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.error = error
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, error, *args, **kwargs): # noqa: E501
"""Error - a model defined in OpenAPI
Args:
error (str): The cause of the Error.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.error = error
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/service_accounts_mgmt_sdk/rhoas_service_accounts_mgmt_sdk/model/error.py | 0.561936 | 0.213029 | error.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_service_accounts_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_service_accounts_mgmt_sdk.exceptions import ApiAttributeError
class AcsClientResponseData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'client_id': (str,), # noqa: E501
'secret': (str,), # noqa: E501
'name': (str,), # noqa: E501
'created_at': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'client_id': 'clientId', # noqa: E501
'secret': 'secret', # noqa: E501
'name': 'name', # noqa: E501
'created_at': 'createdAt', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""AcsClientResponseData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
client_id (str): [optional] # noqa: E501
secret (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
created_at (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AcsClientResponseData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
client_id (str): [optional] # noqa: E501
secret (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
created_at (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/service_accounts_mgmt_sdk/rhoas_service_accounts_mgmt_sdk/model/acs_client_response_data.py | 0.524638 | 0.207516 | acs_client_response_data.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_service_accounts_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_service_accounts_mgmt_sdk.exceptions import ApiAttributeError
class RedHatErrorRepresentation(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('error',): {
'SERVICE_ACCOUNT_LIMIT_EXCEEDED': "service_account_limit_exceeded",
'SERVICE_ACCOUNT_NOT_FOUND': "service_account_not_found",
'SERVICE_ACCOUNT_USER_NOT_FOUND': "service_account_user_not_found",
'SERVICE_ACCOUNT_ACCESS_INVALID': "service_account_access_invalid",
'ACS_TENANT_LIMIT_EXCEEDED': "acs_tenant_limit_exceeded",
'ACS_TENANT_NOT_FOUND': "acs_tenant_not_found",
'ACS_ACCESS_INVALID': "acs_access_invalid",
'ACS_INVALID_REDIRECT_URI': "acs_invalid_redirect_uri",
'ACS_INVALID_CLIENT': "acs_invalid_client",
'ACS_DISABLED': "acs_disabled",
'SMOKETEST_ACCESS_INVALID': "smoketest_access_invalid",
'SMOKETEST_NOT_FOUND': "smoketest_not_found",
'GENERAL_FAILURE': "general_failure",
'ORGANIZATION_API_ACCESS_INVALID': "organization_api_access_invalid",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'error': (str,), # noqa: E501
'error_description': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'error': 'error', # noqa: E501
'error_description': 'error_description', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""RedHatErrorRepresentation - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
error (str): [optional] # noqa: E501
error_description (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""RedHatErrorRepresentation - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
error (str): [optional] # noqa: E501
error_description (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/service_accounts_mgmt_sdk/rhoas_service_accounts_mgmt_sdk/model/red_hat_error_representation.py | 0.498779 | 0.171789 | red_hat_error_representation.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_service_accounts_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_service_accounts_mgmt_sdk.exceptions import ApiAttributeError
class AcsClientRequestData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('redirect_uris',): {
},
('org_id',): {
'regex': {
'pattern': r'\d{1,10}', # noqa: E501
},
},
('name',): {
'max_length': 50,
'min_length': 1,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'redirect_uris': ([str],), # noqa: E501
'org_id': (str,), # noqa: E501
'name': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'redirect_uris': 'redirectUris', # noqa: E501
'org_id': 'orgId', # noqa: E501
'name': 'name', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, redirect_uris, org_id, *args, **kwargs): # noqa: E501
"""AcsClientRequestData - a model defined in OpenAPI
Args:
redirect_uris ([str]):
org_id (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.redirect_uris = redirect_uris
self.org_id = org_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, redirect_uris, org_id, *args, **kwargs): # noqa: E501
"""AcsClientRequestData - a model defined in OpenAPI
Args:
redirect_uris ([str]):
org_id (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.redirect_uris = redirect_uris
self.org_id = org_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/service_accounts_mgmt_sdk/rhoas_service_accounts_mgmt_sdk/model/acs_client_request_data.py | 0.547222 | 0.183447 | acs_client_request_data.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_service_accounts_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_service_accounts_mgmt_sdk.exceptions import ApiAttributeError
class ValidationExceptionData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'fields': ({str: (str,)},), # noqa: E501
'error': (str,), # noqa: E501
'error_description': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'fields': 'fields', # noqa: E501
'error': 'error', # noqa: E501
'error_description': 'error_description', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ValidationExceptionData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fields ({str: (str,)}): [optional] # noqa: E501
error (str): [optional] # noqa: E501
error_description (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ValidationExceptionData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fields ({str: (str,)}): [optional] # noqa: E501
error (str): [optional] # noqa: E501
error_description (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/service_accounts_mgmt_sdk/rhoas_service_accounts_mgmt_sdk/model/validation_exception_data.py | 0.467089 | 0.188697 | validation_exception_data.py | pypi |
<p align="center">
<a href="https://rhoknp.readthedocs.io/en/latest/" rel="noopener" target="_blank">
<img width="150" src="https://raw.githubusercontent.com/ku-nlp/rhoknp/develop/docs/_static/logo.png" alt="rhoknp logo">
</a>
</p>
<h1 align="center">rhoknp: Yet another Python binding for Juman++/KNP/KWJA</h1>
<p align="center">
<a href="https://github.com/ku-nlp/rhoknp/actions/workflows/test.yml"><img alt="Test" src="https://img.shields.io/github/actions/workflow/status/ku-nlp/rhoknp/test.yml?branch=main&logo=github&label=test&style=flat-square"></a>
<a href="https://codecov.io/gh/ku-nlp/rhoknp"><img alt="Codecov" src="https://img.shields.io/codecov/c/github/ku-nlp/rhoknp?logo=codecov&style=flat-square"></a>
<a href="https://www.codefactor.io/repository/github/ku-nlp/rhoknp"><img alt="CodeFactor" src="https://img.shields.io/codefactor/grade/github/ku-nlp/rhoknp?style=flat-square"></a>
<a href="https://pypi.org/project/rhoknp/"><img alt="PyPI" src="https://img.shields.io/pypi/v/rhoknp?style=flat-square"></a>
<a href="https://pypi.org/project/rhoknp/"><img alt="PyPI - Python Version" src="https://img.shields.io/pypi/pyversions/rhoknp?style=flat-square">
<a href="https://rhoknp.readthedocs.io/en/latest/"><img alt="Documentation" src="https://img.shields.io/readthedocs/rhoknp?style=flat-square"></a>
<a href="https://github.com/psf/black"><img alt="Code style - black" src="https://img.shields.io/badge/code%20style-black-222222?style=flat-square"></a>
</p>
---
**Documentation**: [https://rhoknp.readthedocs.io/en/latest/](https://rhoknp.readthedocs.io/en/latest/)
**Source Code**: [https://github.com/ku-nlp/rhoknp](https://github.com/ku-nlp/rhoknp)
---
_rhoknp_ is a Python binding for [Juman++](https://github.com/ku-nlp/jumanpp), [KNP](https://github.com/ku-nlp/knp), and [KWJA](https://github.com/ku-nlp/kwja).[^1]
[^1]: The logo was generated by OpenAI DALL·E 2.
```python
import rhoknp
# Perform morphological analysis by Juman++
jumanpp = rhoknp.Jumanpp()
sentence = jumanpp.apply_to_sentence(
"電気抵抗率は電気の通しにくさを表す物性値である。"
)
# Access to the result
for morpheme in sentence.morphemes: # a.k.a. keitai-so
...
# Save the result
with open("result.jumanpp", "wt") as f:
f.write(sentence.to_jumanpp())
# Load the result
with open("result.jumanpp", "rt") as f:
sentence = rhoknp.Sentence.from_jumanpp(f.read())
```
## Requirements
- Python 3.7+
- (Optional) [Juman++](https://github.com/ku-nlp/jumanpp) v2.0.0-rc3+
- (Optional) [KNP](https://github.com/ku-nlp/knp) 5.0+
- (Optional) [KWJA](https://github.com/ku-nlp/kwja) 1.0.0+
## Installation
```shell
pip install rhoknp
```
## Quick tour
Let's begin by using Juman++ with rhoknp.
Here, we present a simple example demonstrating how Juman++ can be used to analyze a sentence.
```python
# Perform morphological analysis by Juman++
jumanpp = rhoknp.Jumanpp()
sentence = jumanpp.apply_to_sentence("電気抵抗率は電気の通しにくさを表す物性値である。")
```
You can easily access the individual morphemes that make up the sentence.
```python
for morpheme in sentence.morphemes: # a.k.a. keitai-so
...
```
Sentence objects can be saved in the JUMAN format.
```python
# Save the sentence in the JUMAN format
with open("sentence.jumanpp", "wt") as f:
f.write(sentence.to_jumanpp())
# Load the sentence
with open("sentence.jumanpp", "rt") as f:
sentence = rhoknp.Sentence.from_jumanpp(f.read())
```
Almost the same APIs are available for KNP.
```python
# Perform language analysis by KNP
knp = rhoknp.KNP()
sentence = knp.apply_to_sentence("電気抵抗率は電気の通しにくさを表す物性値である。")
```
KNP performs language analysis at multiple levels.
```python
for clause in sentence.clauses: # a.k.a., setsu
...
for phrase in sentence.phrases: # a.k.a. bunsetsu
...
for base_phrase in sentence.base_phrases: # a.k.a. kihon-ku
...
for morpheme in sentence.morphemes: # a.k.a. keitai-so
...
```
Sentence objects can be saved in the KNP format.
```python
# Save the sentence in the KNP format
with open("sentence.knp", "wt") as f:
f.write(sentence.to_knp())
# Load the sentence
with open("sentence.knp", "rt") as f:
sentence = rhoknp.Sentence.from_knp(f.read())
```
Furthermore, rhoknp provides convenient APIs for document-level language analysis.
```python
document = rhoknp.Document.from_raw_text(
"電気抵抗率は電気の通しにくさを表す物性値である。単に抵抗率とも呼ばれる。"
)
# If you know sentence boundaries, you can use `Document.from_sentences` instead.
document = rhoknp.Document.from_sentences(
[
"電気抵抗率は電気の通しにくさを表す物性値である。",
"単に抵抗率とも呼ばれる。",
]
)
```
Document objects can be handled in a similar manner as Sentence objects.
```python
# Perform morphological analysis by Juman++
document = jumanpp.apply_to_document(document)
# Access language units in the document
for sentence in document.sentences:
...
for morpheme in document.morphemes:
...
# Save language analysis by Juman++
with open("document.jumanpp", "wt") as f:
f.write(document.to_jumanpp())
# Load language analysis by Juman++
with open("document.jumanpp", "rt") as f:
document = rhoknp.Document.from_jumanpp(f.read())
```
For more information, please refer to the [examples](./examples) and [documentation](https://rhoknp.readthedocs.io/en/latest/).
## Main differences from [pyknp](https://github.com/ku-nlp/pyknp/)
[_pyknp_](https://pypi.org/project/pyknp/) serves as the official Python binding for Juman++ and KNP.
In the development of rhoknp, we redesigned the API, considering the current use cases of pyknp.
The key differences between the two are as follows:
- **Support for document-level language analysis**: rhoknp allows you to load and instantiate the results of document-level language analysis, including cohesion analysis and discourse relation analysis.
- **Strict type-awareness**: rhoknp has been thoroughly annotated with type annotations, ensuring strict type checking and improved code clarity.
- **Comprehensive test suite**: rhoknp is extensively tested with a comprehensive test suite. You can view the code coverage report on [Codecov](https://app.codecov.io/gh/ku-nlp/rhoknp).
## License
MIT
## Contributing
We warmly welcome contributions to rhoknp.
You can get started by reading the [contribution guide](https://rhoknp.readthedocs.io/en/latest/contributing/index.html).
## Reference
- [KNP FORMAT](http://cr.fvcrc.i.nagoya-u.ac.jp/~sasano/knp/format.html)
- [KNP - KUROHASHI-CHU-MURAWAKI LAB](https://nlp.ist.i.kyoto-u.ac.jp/?KNP)
| /rhoknp-1.3.2.tar.gz/rhoknp-1.3.2/README.md | 0.547706 | 0.947624 | README.md | pypi |
import asyncio
import redis.asyncio as redis
import re
def parse_http_message(message: str):
parsed = message.split("\r\n\r\n")
header_lines = parsed[0].split("\r\n")
body = parsed[1] if len(parsed) > 1 else ""
# Parse the request line or status line
match = re.match(r"(?P<version>HTTP/\d\.\d) (?P<status>\d{3}) (?P<message>.+)$", header_lines[0])
if match:
return {
"status": int(match.group("status")),
"message": match.group("message"),
"headers": parse_headers(header_lines[1:]),
"body": body
}
else:
# Parse the request line
method, path, query = parse_request_line(header_lines[0])
return {
"method": method,
"path": path,
"query": query,
"headers": parse_headers(header_lines[1:]),
"body": body
}
def parse_request_line(request_line: str):
match = re.match(r"(?P<method>\w+) (?P<path>[^\s\?]+)(\?(?P<query>.+))?\sHTTP/\d\.\d", request_line)
return match.group("method"), match.group("path"), match.group("query")
def parse_headers(header_lines):
headers = {}
for line in header_lines:
key, value = line.split(": ")
headers[key] = value
return headers
def serialize_http_message(message):
if "status" in message:
# Serialize a response
status_line = f"HTTP/1.1 {message['status']} {message['message']}\r\n"
else:
# Serialize a request
status_line = f"{message['method']} {message['path']}"
if message["query"]:
status_line += f"?{message['query']}"
status_line += " HTTP/1.1\r\n"
headers = "".join([f"{key}: {value}\r\n" for key, value in message["headers"].items()])
body = message["body"]
return f"{status_line}{headers}\r\n{body}"
def random_name():
import random
return "".join(
[random.choice("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890") for _ in range(0, 20)])
def http_status_code_to_message(status_code):
messages = {
100: "Continue",
101: "Switching Protocols",
200: "OK",
201: "Created",
202: "Accepted",
203: "Non-Authoritative Information",
204: "No Content",
205: "Reset Content",
206: "Partial Content",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Found",
303: "See Other",
304: "Not Modified",
305: "Use Proxy",
307: "Temporary Redirect",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Timeout",
409: "Conflict",
410: "Gone",
411: "Length Required",
412: "Precondition Failed",
413: "Request Entity Too Large",
414: "Request-URI Too Long",
415: "Unsupported Media Type",
416: "Requested Range Not Satisfiable",
417: "Expectation Failed",
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway",
503: "Service Unavailable",
504: "Gateway Timeout",
505: "HTTP Version Not Supported"
}
return messages.get(status_code, "Unknown")
class Response:
def __init__(self, req):
self.request = req
self.content_type_ = "plain/text"
self.body = ""
self.status_code = 200
self.message = http_status_code_to_message(self.status_code)
self.headers = {}
def status(self, code: int):
self.status_code = code
self.message = http_status_code_to_message(self.status_code)
return self
def header(self, headers):
self.headers.update(headers)
def content_type(self, content_type: str):
self.content_type_ = content_type
return self
def send(self, body: str) -> str:
self.body = body
return serialize_http_message({
"message": self.message,
"status": self.status_code,
"headers": {
"Content-Type": self.content_type_,
"Content-Length": len(self.body),
"X-Socket-ID": self.request["headers"]["X-Socket-ID"],
**self.headers
},
"body": self.body,
})
async def reader(channel: redis.client.PubSub, redis_context: redis.Redis, name: str, desc: str, endpoints):
while True:
message = await channel.get_message(ignore_subscribe_messages=True)
if message is not None and message['type'] == "message":
if message["channel"].decode('utf-8') == "HEARTBEAT":
await redis_context.publish("ACKNOWLEDGE_PIPE", f"{name}{chr(14)}{desc}")
elif message["channel"].decode('utf-8') == "REQUEST_PIPE":
data = message['data'].decode('utf-8')
req = parse_http_message(data)
found = False
for ep in endpoints:
if ep["path"] == req["path"] and ep["method"] == req["method"]:
res_pre = Response(req)
res_pre.headers.update({"X-RES-SERVER": name})
res = ep["callback"](req, res_pre)
await redis_context.publish("RESPONSE_PIPE", res)
found = True
break
if not found:
await redis_context.publish("REJECT_PIPE", req["headers"]["X-Socket-ID"])
class RHTTPServer:
def __init__(self, host: str, port: int, name: str = random_name(), desc: str = "PYTHON"):
self.host, self.port = host, port
self.name, self.desc = name, desc
self.endpoints = []
def __add_ep(self, path, method, callback):
method = method.upper()
self.endpoints.append({
"path": path,
"method": method,
"callback": callback,
})
def route(self, path, method):
def route_func(func):
self.__add_ep(path, method, func)
return func
return route_func
def get(self, path, callback):
self.__add_ep(path, "GET", callback)
def post(self, path, callback):
self.__add_ep(path, "POST", callback)
def put(self, path, callback):
self.__add_ep(path, "PUT", callback)
def patch(self, path, callback):
self.__add_ep(path, "PATCH", callback)
def delete(self, path, callback):
self.__add_ep(path, "DELETE", callback)
def head(self, path, callback):
self.__add_ep(path, "HEAD", callback)
def options(self, path, callback):
self.__add_ep(path, "OPTIONS", callback)
def connect(self, path, callback):
self.__add_ep(path, "CONNECT", callback)
def trace(self, path, callback):
self.__add_ep(path, "TRACE", callback)
async def __listen_asyncio(self):
print("Server is listening to Redis")
r = await redis.from_url(f"redis://{self.host}:{self.port}")
self.redis_context = r
async with self.redis_context.pubsub() as pubsub:
await pubsub.subscribe("REQUEST_PIPE", "HEARTBEAT")
future = asyncio.create_task(reader(pubsub, self.redis_context, self.name, self.desc, self.endpoints))
await future
def listen(self):
asyncio.run(self.__listen_asyncio()) | /rhttp_python-0.1.0.tar.gz/rhttp_python-0.1.0/rhttp_python/RHTTPServer.py | 0.551574 | 0.229686 | RHTTPServer.py | pypi |
import dataclasses
import datetime
import decimal
import inspect
import re
import time
import uuid
from functools import total_ordering
from typing import TypeVar, Protocol, ClassVar, Union, NewType
import phonenumbers
from psycopg import AsyncConnection
from strawberry import scalar
from strawberry.scalars import JSON, Base16, Base32, Base64
from strawberry.types import Info
from strawberry.types.types import TypeDefinition
from rhubarb.errors import RhubarbException, RhubarbValidationError
Elem = TypeVar("Elem")
Binary = scalar(
NewType("Binary", bytes),
serialize=lambda v: v,
parse_value=lambda v: v,
)
Serial = scalar(
NewType("Serial", int),
serialize=lambda v: v,
parse_value=lambda v: v,
)
@total_ordering
class RhubarbPhoneNumber(phonenumbers.PhoneNumber):
"""
Borrowed from django-phonenumber-field.
A extended version of phonenumbers.PhoneNumber that provides
some neat and more pythonic, easy to access methods. This makes using a
PhoneNumber instance much easier, especially in templates and such.
"""
format_map = {
"E164": phonenumbers.PhoneNumberFormat.E164,
"INTERNATIONAL": phonenumbers.PhoneNumberFormat.INTERNATIONAL,
"NATIONAL": phonenumbers.PhoneNumberFormat.NATIONAL,
"RFC3966": phonenumbers.PhoneNumberFormat.RFC3966,
}
@classmethod
def from_string(cls, phone_number, region=None):
"""
:arg str phone_number: parse this :class:`str` as a phone number.
:keyword str region: 2-letter country code as defined in ISO 3166-1.
When not supplied, defaults to :setting:`PHONENUMBER_DEFAULT_REGION`
"""
phone_number_obj = cls()
if region is None:
region = None
phonenumbers.parse(
number=phone_number,
region=region,
keep_raw_input=True,
numobj=phone_number_obj,
)
return phone_number_obj
def __str__(self):
if self.is_valid():
format_string = "E164"
fmt = self.format_map[format_string]
return self.format_as(fmt)
else:
return self.raw_input
def __repr__(self):
if not self.is_valid():
return f"Invalid{type(self).__name__}(raw_input={self.raw_input})"
return super().__repr__()
def is_valid(self):
"""
Whether the number supplied is actually valid.
:return: ``True`` when the phone number is valid.
:rtype: bool
"""
return phonenumbers.is_valid_number(self)
def format_as(self, format):
return phonenumbers.format_number(self, format)
@property
def as_international(self):
return self.format_as(phonenumbers.PhoneNumberFormat.INTERNATIONAL)
@property
def as_e164(self):
return self.format_as(phonenumbers.PhoneNumberFormat.E164)
@property
def as_national(self):
return self.format_as(phonenumbers.PhoneNumberFormat.NATIONAL)
@property
def as_rfc3966(self):
return self.format_as(phonenumbers.PhoneNumberFormat.RFC3966)
def __len__(self):
return len(str(self))
def __eq__(self, other):
"""
Override parent equality because we store only string representation
of phone number, so we must compare only this string representation
"""
if other in validators.EMPTY_VALUES:
return False
elif isinstance(other, str):
default_region = getattr(settings, "PHONENUMBER_DEFAULT_REGION", None)
other = to_python(other, region=default_region)
elif isinstance(other, type(self)):
# Nothing to do. Good to compare.
pass
elif isinstance(other, phonenumbers.PhoneNumber):
# The parent class of PhoneNumber does not have .is_valid().
# We need to make it match ours.
old_other = other
other = type(self)()
other.merge_from(old_other)
else:
return False
format_string = "E164"
fmt = self.format_map[format_string]
self_str = self.format_as(fmt) if self.is_valid() else self.raw_input
other_str = other.format_as(fmt) if other.is_valid() else other.raw_input
return self_str == other_str
def __lt__(self, other):
if isinstance(other, phonenumbers.PhoneNumber):
old_other = other
other = type(self)()
other.merge_from(old_other)
elif not isinstance(other, type(self)):
raise TypeError(
"'<' not supported between instances of "
"'%s' and '%s'" % (type(self).__name__, type(other).__name__)
)
invalid = None
if not self.is_valid():
invalid = self
elif not other.is_valid():
invalid = other
if invalid is not None:
raise ValueError("Invalid phone number: %r" % invalid)
format_string = "E164"
fmt = self.format_map[format_string]
return self.format_as(fmt) < other.format_as(fmt)
def __hash__(self):
return hash(str(self))
def parse_phone(v):
v = str(v)
phone_number_obj = phonenumbers.PhoneNumber()
return RhubarbPhoneNumber.from_string(v)
PhoneNumber = scalar(
NewType("PhoneNumber", RhubarbPhoneNumber),
serialize=lambda v: str(v),
parse_value=parse_phone,
)
EMAIL_REGEX = re.compile(
r"([A-Za-z0-9]+[.-_])*[A-Za-z0-9]+@[A-Za-z0-9-]+(\.[A-Z|a-z]{2,})+"
)
def parse_email(v):
v = str(v)
if not EMAIL_REGEX.fullmatch(v):
raise RhubarbValidationError("Invalid Email {v}")
return v
Email = scalar(
NewType("Email", str),
serialize=lambda v: v,
parse_value=parse_email,
)
def parse_small_int(v):
v = int(v)
if v >= 256:
raise RhubarbValidationError("Invalid {v}")
return SmallIntType(v)
SmallIntType = NewType("SmallInt", int)
SmallInt = scalar(
SmallIntType,
serialize=lambda v: v,
parse_value=parse_small_int,
)
def new_ref_id() -> str:
return str(time.monotonic_ns())[-5:]
def get_conn(info: Info) -> AsyncConnection:
return info.context["conn"]
class SqlModel(Protocol):
_type_definition: ClassVar[TypeDefinition]
__schema__: str
__table__: str
__pk__: str | tuple[str]
ScalarSQLValue = Union[
None,
str,
bytes,
datetime.datetime,
datetime.date,
bool,
int,
float,
dict,
list,
decimal.Decimal,
SqlModel,
JSON,
Binary,
Base16,
Base32,
Base64,
]
SQLValue = Union[ScalarSQLValue, list[ScalarSQLValue], dict[str, ScalarSQLValue]]
T = TypeVar("T", bound=SqlModel)
J = TypeVar("J", bound=SqlModel)
V = TypeVar("V", bound=SQLValue)
@dataclasses.dataclass(frozen=True)
class Unset:
"""Values that aren't loaded from the database"""
def __sql__(self, builder):
builder.write("DEFAULT")
UNSET = Unset()
def call_with_maybe_info(f, obj, info):
sig = inspect.signature(f)
if len(sig.parameters) == 1:
return f(obj)
else:
return f(obj, info) | /rhubarb_graphql-0.1.0-py3-none-any.whl/rhubarb/core.py | 0.785309 | 0.195114 | core.py | pypi |
import copy
from typing import Type, Callable, Optional
import psycopg.errors
from psycopg import AsyncConnection, Rollback
from strawberry.types import Info
from rhubarb.core import (
T,
V,
call_with_maybe_info,
Unset,
SQLValue,
)
from rhubarb.object_set import (
InsertSet,
columns,
ModelSelector,
Selector,
ObjectSet,
pk_column_names,
ColumnField,
pk_concrete,
UpdateSet,
DeleteSet,
ModelUpdater,
pk_selection,
default_function_to_python
)
def query(
conn: AsyncConnection, m: Type[T], info: Info = None, one=False
) -> ObjectSet[T, ModelSelector[T]]:
return ObjectSet(m, conn=conn, info=info, one=one)
def reload(
conn: AsyncConnection, m: T, info: Info = None
) -> ObjectSet[T, ModelSelector[T]]:
return by_pk(conn, m.__class__, pk_concrete(m), info)
def by_pk(
conn: AsyncConnection,
m: Type[T],
pk: SQLValue | tuple[SQLValue, ...],
info: Info = None,
) -> ObjectSet[T, ModelSelector[T]]:
return ObjectSet(m, conn=conn, info=info, one=True).where(
lambda x: pk_selection(x) == pk
)
def by_kw(
conn: AsyncConnection,
m: Type[T],
info: Info = None,
**kwargs
) -> ObjectSet[T, ModelSelector[T]]:
return ObjectSet(m, conn=conn, info=info, one=True).kw_where(
**kwargs
)
def delete(
conn: AsyncConnection,
model: Type[T],
where: Callable[[ModelSelector], Selector[bool] | bool],
info: Info | None = None,
one: bool = False,
returning: Callable[[ModelSelector], Selector[bool] | bool] | None | bool = None,
):
selected_fields = info.selected_fields
object_set = ObjectSet(model, conn=conn, fields=selected_fields)
model_reference = object_set.model_reference
model_selector = object_set.model_selector
where_selector, returning_selector = build_where_and_returning(
model, model_selector, info, where, returning
)
return DeleteSet(
model_reference,
conn=conn,
where=where_selector,
returning=returning_selector,
one=one,
)
def build_where_and_returning(model, model_selector, info, where, returning):
where_selector = where(model_selector)
if hasattr(model, "__where__"):
where_selector = call_with_maybe_info(model.__where__, model_selector, info)
returning_selector = None
if returning is not None:
if isinstance(returning, bool) and returning:
returning_selector = model_selector
else:
returning_selector = returning(model_selector)
return where_selector, returning_selector
def build_update_set(
info, conn, model, model_reference, setters, where_selector, returning_selector, one
):
for default_update in columns(model, update_default=True):
if default_update.column_name not in setters:
setters[default_update.column_name] = default_function_to_python(
default_update.update_default
)()
return UpdateSet(
model_reference,
conn=conn,
setters=setters,
where=where_selector,
one=one,
returning=returning_selector,
)
def update(
conn: AsyncConnection,
model: Type[T],
set_fn: Callable[[ModelUpdater], ModelUpdater | None],
where: Callable[[ModelSelector], Selector[bool] | bool],
info: Info | None = None,
one=False,
returning: Callable[[ModelSelector], Selector[bool] | bool] | None | bool = None,
):
object_set = ObjectSet(model, conn=conn, info=info)
model_reference = object_set.model_reference
model_selector = object_set.model_selector
model_updater = ModelUpdater(model_selector)
set_fn(model_updater)
where_selector, returning_selector = build_where_and_returning(
model, model_selector, info, where, returning
)
setters = model_updater._setters
return build_update_set(
info,
conn,
model,
model_reference,
setters,
where_selector,
returning_selector,
one,
)
async def find_or_create(
conn: AsyncConnection, obj: T, info: Info = None, **kwargs
) -> T:
model = obj.__class__
if obj := await by_kw(model, info=info, **kwargs).one():
return obj
async with conn.transaction() as txn:
try:
return await save(obj, conn, info=info).execute()
except psycopg.errors.UniqueViolation:
raise Rollback(txn)
return await by_kw(model, info=info, **kwargs).one()
def empty_pk(obj: T):
pk = pk_concrete(obj)
return (
pk is None
or isinstance(pk, Unset)
or isinstance(obj, tuple)
and all(p is None or isinstance(p, Unset) for p in pk)
)
def save(conn: AsyncConnection, obj: T, info: Info | None = None, insert_with_pk=False):
model = obj.__class__
if empty_pk(obj) or insert_with_pk:
return insert_objs(
conn, model, [obj], skip_pks=not insert_with_pk, one=True, returning=True
)
object_set = ObjectSet(model, conn=conn, info=info)
model_reference = object_set.model_reference
model_selector = object_set.model_selector
setters = {
col_name: v
for col_name, v in (
(col.column_name, getattr(obj, col.name))
for col in columns(model)
if not col.virtual
)
if not isinstance(v, Unset)
}
pk_selectors = object_set.pk_selector
pk_real = pk_concrete(obj)
where_selector = Selector.__eq__(pk_selectors, pk_real)
if hasattr(model, "__where__"):
where_selector = call_with_maybe_info(model.__where__, model_selector, info)
return build_update_set(
info,
conn,
model,
model_reference,
setters,
where_selector,
model_selector,
one=True,
)
def insert(
conn: AsyncConnection,
model: Type[T],
cols_fn: Callable[[ModelSelector], list[ColumnField, ...]],
values_fn: Callable[[ModelSelector], list[tuple[V, ...]]],
info: Info | None = None,
returning: Callable[[ModelSelector], Selector[bool] | bool] | None | bool = None,
):
object_set = ObjectSet(model, conn=conn, info=info)
model_reference = object_set.model_reference
model_selector = object_set.model_selector
insert_columns = cols_fn(model_selector)
values = values_fn(model_selector)
return build_insert_set(
info,
conn,
model,
model_reference,
model_selector,
insert_columns,
values,
returning,
one=True,
)
def build_insert_set(
info,
conn,
model,
model_reference,
model_selector,
insert_columns,
values,
returning,
one,
):
returning_selector = None
if returning is not None:
if isinstance(returning, bool) and returning:
returning_selector = model_selector
else:
returning_selector = returning(model_selector)
for default_insert in columns(model, insert_default=True):
if default_insert not in insert_columns:
insert_columns.append(default_insert)
val = default_function_to_python(default_insert.insert_default)()
new_values = copy.copy(values)
for xx, value in enumerate(new_values):
values[xx] = val + (val,)
return InsertSet(
model_reference,
conn=conn,
columns=insert_columns,
values=values,
one=one,
returning=returning_selector,
)
def insert_objs(
conn: AsyncConnection,
model: Type[T],
values: list[T],
skip_pks=True,
exclude_columns: set[str] = None,
info: Info | None = None,
one=False,
returning: Callable[[ModelSelector], Selector[bool] | bool] | None | bool = None,
):
object_set = ObjectSet(model, conn=conn, info=info)
model_reference = object_set.model_reference
model_selector = object_set.model_selector
exclude = exclude_columns or set()
if skip_pks:
exclude |= pk_column_names(model)
insert_columns = [
col for col in columns(model, virtual=False) if col.name not in exclude
]
insert_values = []
for row in values:
insert_values.append(tuple(getattr(row, f.name) for f in insert_columns))
return build_insert_set(
info,
conn,
model,
model_reference,
model_selector,
insert_columns,
insert_values,
returning,
one,
) | /rhubarb_graphql-0.1.0-py3-none-any.whl/rhubarb/crud.py | 0.674372 | 0.253537 | crud.py | pypi |
from typing import Any, Callable
from rhubarb.core import V
from rhubarb.object_set import (
ModelSelector,
Aggregate,
Selector,
SqlType,
Computed,
Case,
Value,
RawSQL,
UseSelector,
func,
)
func = func
def sum_agg(model_selector: ModelSelector, sel: Selector):
return Aggregate(model_selector, args=[sel], op="SUM", infixed=False)
def count_agg(model_selector: ModelSelector, sel: Selector):
return Aggregate(model_selector, args=[sel], op="COUNT", infixed=False)
def avg_agg(model_selector: ModelSelector, sel: Selector):
return Aggregate(model_selector, args=[sel], op="AVG", infixed=False)
def max_agg(model_selector: ModelSelector, sel: Selector):
return Aggregate(model_selector, args=[sel], op="MAX", infixed=False)
def min_agg(model_selector: ModelSelector, sel: Selector):
return Aggregate(model_selector, args=[sel], op="MIN", infixed=False)
def string_agg(
model_selector: ModelSelector, column: Selector, delimeter: Selector[str]
):
return Aggregate(
model_selector, args=[column, delimeter], op="STRING_AGG", infixed=False
)
def array_agg(model_selector: ModelSelector, column: Selector):
return Aggregate(model_selector, args=[column], op="ARRAY_AGG", infixed=False)
def json_agg(model_selector: ModelSelector, column: Selector):
return Aggregate(model_selector, args=[column], op="JSON_AGG", infixed=False)
def concat(*args: Selector[str] | str):
return Computed(args=list(args), op="CONCAT", infixed=False)
def coalesce(*args: Selector[str] | str):
return Computed(args=list(args), op="COALESCE", infixed=False)
def cast(o: Selector, t: SqlType):
return Computed(args=[o, t], op="CAST", infixed=False, sep="AS")
def is_null(o: Selector):
return Computed(args=[o], op="IS NULL", infixed=True)
def is_not_null(o: Selector):
return Computed(args=[o], op="IS NOT NULL", infixed=True)
def case(*whens: tuple[Selector[bool], Selector[V]], default: Selector[V] = None):
return Case(list(whens), default=default)
def val(v: Any):
return Value(v)
def raw(v: Any):
return RawSQL(v)
def agg(ms: ModelSelector, fn: str, *args: Selector, infixed=False):
return Aggregate(ms, args=list(args), op=fn, infixed=infixed)
def use(
fn: Callable[..., V], *depends_on: Selector, **kw_depends_on: Selector
) -> UseSelector[V]:
return UseSelector(fn, list(depends_on), kw_depends_on) | /rhubarb_graphql-0.1.0-py3-none-any.whl/rhubarb/functions.py | 0.832849 | 0.351144 | functions.py | pypi |
import copy
import inspect
import pprint
from typing import Callable, Type, Any, Awaitable, Optional, Self
from rhubarb.core import SqlModel, T, UNSET, Unset
from rhubarb.errors import RhubarbException
from rhubarb.object_set import (
SqlBuilder,
SqlType,
DEFAULT_REGISTRY,
ColumnField,
column,
columns,
pk_column_names,
Index,
Constraint,
ObjectSet,
References,
ON_DELETE,
DEFAULT_SQL_FUNCTION
)
from rhubarb.object_set import table as table_decorator
import dataclasses
from psycopg import AsyncConnection
@dataclasses.dataclass(frozen=True)
class FrozenReference:
table_name: str | None
constraint_name: str | None = None
on_delete: ON_DELETE | None = None
@classmethod
def from_reference(cls, reference: References) -> Optional[Self]:
if reference:
return cls(
table_name=reference.real_table_name,
constraint_name=reference.constraint_name,
on_delete=reference.on_delete,
)
def as_reference(self) -> References:
return References(
self.table_name,
constraint_name=self.constraint_name,
on_delete=self.on_delete,
)
def compute_constraint_name(self, column_name: str):
if self.constraint_name:
return self.constraint_name
return f"{column_name}_fk"
@dataclasses.dataclass
class MigrationStateColumn:
name: str
python_name: str
type: SqlType
default: DEFAULT_SQL_FUNCTION | None = None
references: FrozenReference | None = None
def as_column_field(self) -> ColumnField:
return column(
virtual=False,
column_name=self.name,
python_name=self.python_name,
references=self.references and self.references.as_reference(),
)
@dataclasses.dataclass(frozen=True)
class MigrationStateTable:
schema: str
name: str
class_name: str
primary_key: tuple[str, ...]
columns: dict[str, MigrationStateColumn]
constraints: dict[str, "MigrationConstraint"] = dataclasses.field(
default_factory=dict
)
indexes: dict[str, "MigrationIndex"] = dataclasses.field(default_factory=dict)
@dataclasses.dataclass(frozen=True)
class MigrationStateDatabase:
tables: dict[(str, str), MigrationStateTable] = dataclasses.field(
default_factory=dict
)
meta: dict[str, Any] = dataclasses.field(default_factory=dict)
@classmethod
def from_registry(cls, registry=None):
registry = registry or DEFAULT_REGISTRY
tables = {}
for model in registry.values(set()):
if getattr(model, "__managed__", True):
state_m = state_from_table(model)
tables[(state_m.schema, state_m.name)] = state_m
return cls(tables=tables)
ALL_OPERATIONS = {}
def register_operation(t):
ALL_OPERATIONS[t.__name__] = t
return t
def state_from_table(m: Type[T]):
cols = {}
for column_field in columns(m, virtual=False):
default = (
UNSET
if column_field.sql_default == dataclasses.MISSING
else column_field.sql_default
)
cols[column_field.column_name] = MigrationStateColumn(
name=column_field.column_name,
type=column_field.column_type,
default=default,
python_name=column_field.python_name,
references=FrozenReference.from_reference(column_field.references),
)
pk = tuple(pk_column_names(m))
schema = m.__schema__
name = m.__table__
pks = ", ".join(pk)
constraints = {f"{name}_pk": MigrationConstraint(check=f"{pks}", primary_key=True)}
if hasattr(m, "__constraints__"):
os = ObjectSet(m, None)
additional_constraints = m.__constraints__(os.model_selector)
for k, v in additional_constraints.items():
if not isinstance(v, Constraint):
raise RhubarbException(
f"__constraints__ should return a dict[str, Constraint]. Got {v}"
)
constraints.setdefault(k, MigrationConstraint.from_constraint(v))
indexes = {}
if hasattr(m, "__indexes__"):
os = ObjectSet(m, None)
additional_indexes = m.__indexes__(os.model_selector)
for k, v in additional_indexes.items():
if not isinstance(v, Index):
raise RhubarbException(
f"__indexes__ should return a dict[str, Index]. Got {v}"
)
indexes.setdefault(k, MigrationIndex.from_index(v))
return MigrationStateTable(
schema=schema,
name=name,
primary_key=pk,
columns=cols,
class_name=m.__name__,
indexes=indexes,
constraints=constraints,
)
@register_operation
@dataclasses.dataclass(frozen=True)
class MigrationOperation:
def __as_py__(self) -> str:
param_str = "\n ".join(pprint.pformat(self).split("\n"))
return f" {param_str}"
async def run(self, state: MigrationStateDatabase, conn: AsyncConnection):
raise NotImplementedError
def forward(self, state: MigrationStateDatabase) -> MigrationStateDatabase:
raise NotImplementedError
@dataclasses.dataclass(frozen=True)
class AlterOperation:
def __sql__(self, table: MigrationStateTable) -> MigrationStateDatabase:
pass
def alter(self, table: MigrationStateTable) -> MigrationStateDatabase:
pass
@dataclasses.dataclass(frozen=True)
class CreateColumn(AlterOperation):
name: str
python_name: str
type: SqlType
default: DEFAULT_SQL_FUNCTION | None = None
references: FrozenReference | None = None
def __sql__(self, builder: SqlBuilder):
builder.write(f"ADD COLUMN {self.name} {self.type.sql}")
if self.type.optional:
builder.write(" NULL")
else:
builder.write(" NOT NULL")
if not isinstance(self.default, Unset):
default = self.default
builder.write(f" DEFAULT ")
builder.write_value(default, self.type)
if self.references:
builder.write(f" REFERENCES {self.references.table_name}")
if self.references.on_delete:
builder.write(f" ON DELETE {self.references.on_delete}")
def alter(self, table: MigrationStateTable) -> MigrationStateTable:
columns = copy.copy(table.columns)
columns[self.name] = MigrationStateColumn(
name=self.name,
type=self.type,
default=self.default,
python_name=self.python_name,
references=self.references,
)
return dataclasses.replace(table, columns=columns)
@dataclasses.dataclass(frozen=True)
class DropColumn:
name: str
def __sql__(self, builder: SqlBuilder):
builder.write(f"DROP COLUMN {self.name}")
def alter(self, table):
columns = copy.copy(table.columns)
columns.pop(self.name)
return dataclasses.replace(table, columns=columns)
@dataclasses.dataclass(frozen=True)
class AlterTypeUsing:
name: str
new_type: SqlType
using: str | None = None
def __sql__(self, builder: SqlBuilder):
builder.write(f"ALTER {self.name} TYPE ")
self.new_type.__sql__(builder)
if self.using is None:
using = f"{self.name}::TEXT::{self.new_type.sql}"
else:
using = self.using
builder.write(f" USING {using}")
def alter(self, table):
columns = copy.copy(table.columns)
col = columns.pop(self.name)
new_col = dataclasses.replace(col, type=self.new_type)
columns[self.name] = new_col
return dataclasses.replace(table, columns=columns)
@dataclasses.dataclass(frozen=True)
class SetDefault:
name: str
type: SqlType
default: DEFAULT_SQL_FUNCTION | None = None
def __sql__(self, builder: SqlBuilder):
builder.write(f"ALTER {self.name} SET ")
default = self.default
builder.write(f" DEFAULT ")
builder.write_value(default, self.type)
def alter(self, table):
columns = copy.copy(table.columns)
col = columns.pop(self.name)
new_col = dataclasses.replace(col, default=self.default)
columns[self.name] = new_col
return dataclasses.replace(table, columns=columns)
@dataclasses.dataclass(frozen=True)
class DropDefault:
name: str
def __sql__(self, builder: SqlBuilder):
builder.write(f"ALTER {self.name} DROP DEFAULT")
def alter(self, table):
columns = copy.copy(table.columns)
col = columns.pop(self.name)
new_col = dataclasses.replace(col, default=UNSET)
columns[self.name] = new_col
return dataclasses.replace(table, columns=columns)
@dataclasses.dataclass(frozen=True)
class AddConstraint:
constraint_name: str
constraint: "MigrationConstraint"
def __sql__(self, builder: SqlBuilder):
builder.write(
f"ADD CONSTRAINT {self.constraint_name} {self.constraint.modifier} ({self.constraint.check})"
)
def alter(self, table: MigrationStateTable):
new_constraints = copy.copy(table.constraints)
new_constraints[self.constraint_name] = self.constraint
return dataclasses.replace(table, constraints=new_constraints)
@dataclasses.dataclass(frozen=True)
class DropConstraint:
constraint_name: str
def __sql__(self, builder: SqlBuilder):
builder.write(f"DROP CONSTRAINT {self.constraint_name}")
def alter(self, table: MigrationStateTable):
new_constraints = copy.copy(table.constraints)
new_constraints.pop(self.constraint_name)
return dataclasses.replace(table, constraints=new_constraints)
@dataclasses.dataclass(frozen=True)
class AddIndex:
table_name: (str, str)
index_name: str
index: "MigrationIndex"
async def run(self, state: MigrationStateDatabase, conn: AsyncConnection):
builder = SqlBuilder(dml_mode=True)
unique = ""
if self.index.unique:
unique = "UNIQUE "
concurrently = ""
if self.index.concurrently:
concurrently = "CONCURRENTLY "
builder.write(
f"CREATE {unique}INDEX {concurrently}{self.index_name} ON {self.table_name[1]} {self.index.on}"
)
await conn.execute(builder.q)
def forward(self, state: MigrationStateDatabase) -> MigrationStateDatabase:
tables = copy.copy(state.tables)
table = state.tables[self.table_name]
new_indexes = copy.copy(table.indexes)
new_indexes[self.index_name] = self.index
new_table = dataclasses.replace(table, indexes=new_indexes)
tables[self.table_name] = new_table
return dataclasses.replace(state, tables=tables)
@dataclasses.dataclass(frozen=True)
class RenameIndex:
old_index_name: str
index_name: str
table_name: (str, str)
async def run(self, state: MigrationStateDatabase, conn: AsyncConnection):
builder = SqlBuilder()
builder.write(f"ALTER INDEX {self.old_index_name} RENAME TO {self.index_name}")
await conn.execute(builder.q)
def forward(self, state: MigrationStateDatabase) -> MigrationStateDatabase:
tables = copy.copy(state.tables)
table = state.tables[self.table_name]
new_indexes = copy.copy(table.indexes)
new_indexes[self.index_name] = new_indexes.pop(self.old_index_name)
new_table = dataclasses.replace(table, indexes=new_indexes)
tables[self.table_name] = new_table
return dataclasses.replace(state, tables=tables)
@dataclasses.dataclass(frozen=True)
class DropIndex:
table_name: (str, str)
index_name: str
async def run(self, state: MigrationStateDatabase, conn: AsyncConnection):
builder = SqlBuilder()
builder.write(f"DROP INDEX {self.index_name}")
await conn.execute(builder.q)
def forward(self, state: MigrationStateDatabase) -> MigrationStateDatabase:
tables = copy.copy(state.tables)
table = state.tables[self.table_name]
new_indexes = copy.copy(table.indexes)
new_indexes.pop(self.index_name)
tables[self.table_name] = dataclasses.replace(table, indexes=new_indexes)
return dataclasses.replace(state, tables=tables)
@dataclasses.dataclass(frozen=True)
class AddReferencesConstraint:
name: str
constraint_name: str
references: FrozenReference
def __sql__(self, builder: SqlBuilder):
builder.write(
f"ADD CONSTRAINT {self.constraint_name} FOREIGN KEY ({self.name}) REFERENCES {self.references.table_name}"
)
if self.references.on_delete:
builder.write(f" ON DELETE {self.references.on_delete}")
def alter(self, table: MigrationStateTable):
columns = copy.copy(table.columns)
col = columns.pop(self.name)
new_col = dataclasses.replace(col, references=self.references)
columns[self.name] = new_col
return dataclasses.replace(table, columns=columns)
AlterOperations = (
DropColumn
| CreateColumn
| SetDefault
| DropDefault
| AddReferencesConstraint
| DropConstraint
| AddConstraint
| AlterTypeUsing
)
@dataclasses.dataclass(kw_only=True)
class MigrationIndex:
on: str
unique: bool = True
concurrently: bool = True
@classmethod
def from_index(cls, idx: Index):
builder = SqlBuilder(dml_mode=True)
builder.write("(")
if isinstance(idx.on, tuple):
on_cols = idx.on
else:
on_cols = [idx.on]
wrote_val = False
for on in on_cols:
if wrote_val:
builder.write(", ")
wrote_val = True
on.__sql__(builder)
builder.write(")")
if builder.vars:
raise RhubarbException(
f"Cannot use variables when defining Index {builder.q} {builder.vars}"
)
on_str = builder.q
return cls(
on=on_str,
unique=idx.unique,
concurrently=idx.concurrently,
)
@dataclasses.dataclass(kw_only=True)
class MigrationConstraint:
check: str
unique: bool = False
primary_key: bool = False
@property
def modifier(self):
if self.primary_key:
return "PRIMARY KEY"
elif self.unique:
return "UNIQUE"
else:
return "CHECK"
@classmethod
def from_constraint(cls, cst: Constraint):
builder = SqlBuilder(dml_mode=True)
cst.check.__sql__(builder)
if builder.vars:
raise RhubarbException(
f"Cannot use variables when defining Constraint {builder.q} {builder.vars}"
)
check_str = builder.q
return cls(check=check_str, unique=cst.unique)
@register_operation
@dataclasses.dataclass(frozen=True)
class CreateTable(MigrationOperation):
schema: str
name: str
class_name: str
primary_key: tuple[str, ...]
columns: list[CreateColumn]
constraints: dict[str, MigrationConstraint] = dataclasses.field(
default_factory=dict
)
indexes: dict[str, MigrationIndex] = dataclasses.field(default_factory=dict)
async def run(self, state: MigrationStateDatabase, conn: AsyncConnection):
builder = SqlBuilder(dml_mode=True)
builder.write(f"CREATE TABLE {self.name} (")
wrote_val = False
for column in self.columns:
if wrote_val:
builder.write(", ")
wrote_val = True
builder.write(f"{column.name} {column.type.sql}")
if column.type.optional:
builder.write(" NULL")
else:
builder.write(" NOT NULL")
if not isinstance(column.default, Unset):
default = column.default
builder.write(f" DEFAULT ")
builder.write_value(default, column.type)
if column.references:
constraint_name = column.references.compute_constraint_name(column.name)
builder.write(
f" CONSTRAINT {constraint_name} REFERENCES {column.references.table_name}"
)
if column.references.on_delete:
builder.write(f" ON DELETE {column.references.on_delete}")
for constraint_name, constraint in self.constraints.items():
if wrote_val:
builder.write(", ")
wrote_val = True
builder.write(
f"CONSTRAINT {constraint_name} {constraint.modifier} ({constraint.check})"
)
builder.write(f")")
await conn.execute(builder.q)
for index_name, index in self.indexes.items():
builder = SqlBuilder()
unique = ""
if index.unique:
unique = "UNIQUE "
concurrently = ""
if index.concurrently and conn.autocommit:
concurrently = "CONCURRENTLY "
builder.write(
f"CREATE {unique}INDEX {concurrently}{index_name} ON {self.name} {index.on}"
)
await conn.execute(builder.q)
def forward(self, state: MigrationStateDatabase) -> MigrationStateDatabase:
columns = {}
for column in self.columns:
columns[column.name] = MigrationStateColumn(
name=column.name,
type=column.type,
default=column.default,
python_name=column.python_name,
references=column.references,
)
new_table = MigrationStateTable(
schema=self.schema,
name=self.name,
class_name=self.class_name,
primary_key=self.primary_key,
columns=columns,
constraints=self.constraints,
indexes=self.indexes,
)
tables = copy.copy(state.tables)
tables[(self.schema, self.name)] = new_table
return dataclasses.replace(state, tables=tables)
@register_operation
@dataclasses.dataclass(frozen=True)
class DropTable(MigrationOperation):
schema: str
name: str
def forward(self, state: MigrationStateDatabase) -> MigrationStateDatabase:
tables = copy.copy(state.tables)
tables.pop((self.schema, self.name))
return dataclasses.replace(state, tables=tables)
async def run(self, state: MigrationStateDatabase, conn: AsyncConnection):
await conn.execute(f"DROP TABLE {self.name}")
@register_operation
@dataclasses.dataclass(frozen=True)
class RenameTable(MigrationOperation):
schema: str
old_name: str
new_name: str
new_class_name: str
async def run(self, state: MigrationStateDatabase, conn: AsyncConnection):
builder = SqlBuilder()
builder.write(f'ALTER TABLE "{self.name}" RENAME TO {self.new_name}')
await conn.execute(builder.q)
def forward(self, state: MigrationStateDatabase) -> MigrationStateDatabase:
tables = copy.copy(state.tables)
old_table = tables.pop((self.schema, self.old_name))
new_table = dataclasses.replace(
old_table, name=self.new_name, class_name=self.new_class_name
)
tables[(self.schema, new_table.name)] = new_table
return dataclasses.replace(state, tables=tables)
@register_operation
@dataclasses.dataclass(frozen=True)
class RenameColumn(MigrationOperation):
schema: str
name: str
old_column_name: str
new_column_name: str
new_python_name: str
async def run(self, state: MigrationStateDatabase, conn: AsyncConnection):
builder = SqlBuilder()
builder.write(
f'ALTER TABLE "{self.name}" RENAME COLUMN {self.old_column_name} TO {self.new_column_name}'
)
await conn.execute(builder.q)
def forward(self, state: MigrationStateDatabase) -> MigrationStateDatabase:
tables = copy.copy(state.tables)
old_table = tables.pop((self.schema, self.old_name))
columns = copy.deepcopy(old_table.columns)
col = columns.pop(self.old_column_name)
new_col = dataclasses.replace(
col, name=self.new_column_name, python_name=self.new_python_name
)
columns[self.new_column_name] = new_col
new_table = dataclasses.replace(old_table, columns=columns)
tables[(self.schema, new_table.name)] = new_table
return dataclasses.replace(state, tables=tables)
@register_operation
@dataclasses.dataclass(frozen=True)
class AlterTable(MigrationOperation):
schema: str
name: str
alter_operations: list[AlterOperations]
async def run(self, state: MigrationStateDatabase, conn: AsyncConnection):
builder = SqlBuilder()
builder.write(f'ALTER TABLE "{self.name}" ')
wrote_val = False
for op in self.alter_operations:
if wrote_val:
builder.write(", ")
wrote_val = True
op.__sql__(builder)
await conn.execute(builder.q)
def forward(self, state: MigrationStateDatabase) -> MigrationStateDatabase:
new_tables = copy.copy(state.tables)
old_table = new_tables.pop((self.schema, self.name))
for op in self.alter_operations:
old_table = op.alter(old_table)
new_tables[(self.schema, old_table.name)] = old_table
return dataclasses.replace(state, tables=new_tables)
@register_operation
@dataclasses.dataclass(frozen=True)
class SetMeta(MigrationOperation):
new_meta_kvs: dict[str, Any]
async def run(self, state: MigrationStateDatabase, conn: AsyncConnection):
pass
def forward(self, state: MigrationStateDatabase) -> MigrationStateDatabase:
new_meta = copy.copy(state.meta)
new_meta.update(self.new_meta_kvs)
return dataclasses.replace(state, meta=new_meta)
@dataclasses.dataclass
class MigrationInfo:
state: MigrationStateDatabase
conn: AsyncConnection
_model_cache: dict[(str, str), Type[SqlModel]] = dataclasses.field(
default_factory=dict
)
def get_model(self, table_name: str, schema="public") -> Type[T]:
if (schema, table_name) not in self._model_cache:
table = self.state.tables[(schema, table_name)]
data_class = dataclasses.make_dataclass(
table.class_name,
[
(c.python_name, c.type.to_python(), c.as_column_field())
for c in table.columns.values()
],
kw_only=True,
)
data_class = type(table.class_name, (data_class,), {})
if len(table.primary_key) == 1:
data_class.__pk__ = table.primary_key[0]
else:
data_class.__pk__ = table.primary_key
data_class.__schema__ = table.schema
data_class.__table__ = table.name
self._model_cache[(schema, table_name)] = table_decorator(data_class)
return self._model_cache[(schema, table_name)]
@register_operation
@dataclasses.dataclass(frozen=True)
class RunPython(MigrationOperation):
python_function: Callable[[MigrationInfo], Optional[Awaitable[None]]]
async def run(self, state: MigrationStateDatabase, conn: AsyncConnection):
result = self.python_function(MigrationInfo(state=state, conn=conn))
if inspect.isawaitable(result):
result = await result
return result
def forward(self, state: MigrationStateDatabase) -> MigrationStateDatabase:
return state
@register_operation
@dataclasses.dataclass(frozen=True)
class RawSQL(MigrationOperation):
sql: str
async def run(self, state: MigrationStateDatabase, conn: AsyncConnection):
await conn.execute(self.sql)
def forward(self, state: MigrationStateDatabase) -> MigrationStateDatabase:
return state
@dataclasses.dataclass(frozen=True)
class Migration:
depends_on: list[str]
operations: list[MigrationOperation]
atomic: bool = True | /rhubarb_graphql-0.1.0-py3-none-any.whl/rhubarb/migrations/data.py | 0.853745 | 0.152473 | data.py | pypi |
import dataclasses
import datetime
import secrets
import string
import uuid
import random
from typing import Optional, TypeVar, Type
from psycopg import AsyncConnection
from starlette.authentication import BaseUser
from rhubarb import (
PhoneNumber,
Email,
Constraint,
ModelSelector,
column,
BaseModel,
query,
references,
save,
Registry,
table,
)
from rhubarb.config import config
from rhubarb.functions import is_null
from rhubarb.password import Password, PasswordHash
from rhubarb.model import BaseUpdatedAtModel
from rhubarb.permission_classes import IsSuperUser
user_registry = Registry(prefix="users_")
@dataclasses.dataclass
class User(BaseUser, BaseUpdatedAtModel):
username: str = column()
first_name: Optional[str] = column(sql_default=None)
last_name: Optional[str] = column(sql_default=None)
password: Optional[Password] = column(
sql_default=None, permission_classes=[IsSuperUser]
)
email: Optional[Email] = column(sql_default=None)
phone_number: Optional[PhoneNumber] = column(sql_default=None)
activated: Optional[datetime.datetime] = column(sql_default=None)
opt_in_communication_email: Optional[datetime.datetime] = column(sql_default=None)
opt_in_communication_sms: Optional[datetime.datetime] = column(sql_default=None)
last_login: Optional[datetime.datetime] = column(sql_default=None)
is_staff: bool = column(sql_default=False)
is_superuser: bool = column(sql_default=False)
def __constraints__(self: ModelSelector):
return {
"unique_username": Constraint(check=self.username, unique=True),
"unique_phone_number": Constraint(check=self.phone_number, unique=True),
"unique_email": Constraint(check=self.email, unique=True),
}
@property
def is_authenticated(self) -> bool:
return True
@property
def display_name(self) -> str:
return self.username
@property
def identity(self) -> str:
return str(self.id)
U = TypeVar("U", bound=User)
@dataclasses.dataclass
class VerificationMixin(BaseModel):
sent: Optional[datetime.datetime] = column(sql_default=None)
user_id: uuid.UUID = references(
lambda: config().users.user_model.__table__, on_delete="CASCADE"
)
verified: Optional[datetime.datetime] = column()
canceled: Optional[datetime.datetime] = column()
def random_digits():
return "".join(random.choices(string.digits, k=6))
def random_token():
return secrets.token_urlsafe(24)
@table(registry=user_registry)
class PhoneVerification(VerificationMixin):
phone_number: PhoneNumber = column()
code: str = column(default_factory=random_digits)
@table(registry=user_registry)
class EmailVerification(VerificationMixin):
email: Email = column()
code: str = column(default_factory=random_token)
@table(registry=user_registry)
class ResetPasswordVerification(VerificationMixin):
code: str = column(default_factory=random_token)
async def get_user(conn, user_id=None, /, **kwargs) -> U:
UserModel = config().users.user_model
if user_id is not None:
return await query(conn, UserModel).where(lambda x: x.id == user_id).one()
else:
return await query(conn, UserModel).kw_where(**kwargs).one()
Verif = TypeVar("Verif", bound=VerificationMixin)
async def get_and_complete_verification(
conn, cls: Type[Verif], verification_id, code
) -> Verif:
time_delta = config().users.verification_timeout
last_valid_time = datetime.datetime.utcnow() - time_delta
def set_fn(m):
m.verified = datetime.datetime.utcnow()
return (
await query(conn, cls)
.where(
lambda x: x.id == verification_id
and x.code == code
and x.sent > last_valid_time
and is_null(x.canceled)
and is_null(x.verified)
)
.update(set_fn)
.execute(one=True)
)
@dataclasses.dataclass
class RegistrationResult:
user: User
phone_verification: Optional[PhoneVerification]
email_verification: Optional[EmailVerification]
async def register(conn: AsyncConnection, **kwargs) -> RegistrationResult:
UserModel = config().users.user_model
password = kwargs.pop("password", None)
if isinstance(password, str):
kwargs["password"] = PasswordHash.new(password)
elif isinstance(password, PasswordHash):
kwargs["password"] = password
new_user: User = await save(conn, UserModel(**kwargs)).execute()
email_verification = None
phone_verification = None
if new_user.email:
email_verification = await set_email(conn, new_user, new_user.email)
if new_user.phone_number:
phone_verification = await set_phone_number(
conn, new_user, new_user.phone_number
)
return RegistrationResult(
user=new_user,
email_verification=email_verification,
phone_verification=phone_verification,
)
async def set_email(
conn: AsyncConnection, user: User, new_email: str, mark_sent=True
) -> EmailVerification:
verif = EmailVerification(user_id=user.id, email=new_email)
if mark_sent:
verif.sent = datetime.datetime.utcnow()
await query(conn, EmailVerification).kw_where(user_id=user.id).kw_update(
canceled=datetime.datetime.utcnow()
).execute()
return await save(conn, verif).execute()
async def set_phone_number(
conn: AsyncConnection, user: User, phone_number: PhoneNumber, mark_sent=True
) -> PhoneVerification:
verif = PhoneVerification(user_id=user.id, phone_number=phone_number)
if mark_sent:
verif.sent = datetime.datetime.utcnow()
return await save(conn, verif).execute()
async def reset_password(
conn: AsyncConnection, user: User, mark_sent=True
) -> ResetPasswordVerification:
verif = ResetPasswordVerification(user_id=user.id)
if mark_sent:
verif.sent = datetime.datetime.utcnow()
return await save(conn, verif).execute()
async def verify_email(
conn: AsyncConnection, verification_id: uuid.UUID, code: str, update_user=False
) -> Optional[U]:
if verification := await get_and_complete_verification(
conn, EmailVerification, verification_id, code
):
user = await get_user(conn, verification.user_id)
if update_user:
user.email = verification.email
return await save(conn, user).execute()
return user
async def verify_phone(
conn: AsyncConnection, verification_id: uuid.UUID, code: str, update_user=False
) -> Optional[U]:
if verification := await get_and_complete_verification(
conn, EmailVerification, verification_id, code
):
user = await get_user(conn, verification.user_id)
if update_user:
user.phone_number = verification.phone_number
return await save(conn, user).execute()
return user
async def verify_password_reset(
conn: AsyncConnection, verification_id: uuid.UUID, code: str
) -> bool:
if verification := await get_and_complete_verification(
conn, ResetPasswordVerification, verification_id, code
):
return True
async def set_password(conn: AsyncConnection, user: User, new_password: str) -> U:
user.password = PasswordHash.new(new_password)
return await save(conn, user).execute() | /rhubarb_graphql-0.1.0-py3-none-any.whl/rhubarb/pkg/users/models.py | 0.73914 | 0.170404 | models.py | pypi |
import hashlib
import uuid
import datetime
from contextlib import asynccontextmanager
from typing import Optional
from starlette.requests import HTTPConnection
from strawberry.types.graphql import OperationType
from rhubarb import (
BaseModel,
column,
table,
Index,
save,
Registry,
relation,
)
from rhubarb.config import config
from rhubarb.pkg.postgres.connection import connection
from rhubarb.pkg.redis.cache import local_only_cache
from rhubarb.core import SqlModel
from rhubarb.crud import by_pk
from rhubarb.object_set import BUILTINS
@asynccontextmanager
async def audit_connection(timeout=30):
conf = config()
if conf.audit.reuse_conn:
async with connection() as conn:
yield conn
else:
pool = await config().audit.postgres.get_pool()
async with pool.connection(timeout=timeout) as conn:
yield conn
audit_registry = Registry(prefix="auditing_")
@table(registry=audit_registry)
class GqlQuery(SqlModel):
__pk__ = "sha_hash"
sha_hash: bytes = column()
raw_query: str = column()
@table(registry=audit_registry)
class AuditEvent(BaseModel):
timestamp: datetime.datetime = column(sql_default=BUILTINS.NOW)
gql_query_sha_hash: Optional[bytes] = column(sql_default=None)
variables: Optional[dict] = column(sql_default=None)
meta: Optional[dict] = column(sql_default=None)
ip: Optional[str] = column(sql_default=None)
session_id: Optional[str] = column(sql_default=None)
user_id: Optional[uuid.UUID] = column(sql_default=None)
impersonator_id: Optional[uuid.UUID] = column(sql_default=None)
resource_url: Optional[str] = column(sql_default=None)
operation_type: Optional[str] = column(sql_default=None)
event_name: Optional[str] = column(sql_default=None)
duration_ns: Optional[int] = column(sql_default=None)
def __indexes__(self):
return {
"user_by_ts": Index(on=(self.user_id, self.timestamp)),
"user_by_query": Index(on=(self.user_id, self.event_name, self.timestamp)),
"by_query": Index(on=(self.event_name, self.timestamp)),
}
@relation
def graphql_query(self, gql_query: GqlQuery):
return self.gql_query_sha_hash == gql_query.sha_hash
@local_only_cache(key_arg="hash_digest")
async def do_get_or_create_gql_query(
conn, raw_query: str, hash_digest: bytes = ...
) -> GqlQuery:
gql_query = await by_pk(conn, GqlQuery, hash_digest).one()
if not gql_query:
gql_query = await save(
conn,
GqlQuery(sha_hash=hash_digest, raw_query=raw_query),
insert_with_pk=True,
).execute()
return gql_query
async def get_or_create_gql_query(conn, raw_query: str) -> GqlQuery:
hash_digest = hashlib.sha1(raw_query.encode()).digest()
return await do_get_or_create_gql_query(conn, raw_query, hash_digest=hash_digest)
async def log_gql_event(raw_query: str, operation_type: OperationType, **kwargs):
if conn := kwargs.pop("conn", None):
return await do_log_gql_event(conn, raw_query, operation_type, **kwargs)
async with audit_connection(timeout=1) as conn:
return await do_log_gql_event(conn, raw_query, operation_type, **kwargs)
async def do_log_gql_event(
conn, raw_query: str, operation_type: OperationType, **kwargs
):
conf = config()
if operation_type == OperationType.MUTATION and not conf.audit.audit_mutations:
return
elif operation_type == OperationType.QUERY and not conf.audit.audit_queries:
return
elif (
operation_type == OperationType.SUBSCRIPTION
and not conf.audit.audit_subscriptions
):
return
gql_query = await get_or_create_gql_query(conn, raw_query)
kwargs["gql_query_sha_hash"] = gql_query.sha_hash
await log_event(conn, **kwargs)
async def log_event(conn=None, request: HTTPConnection = None, **kwargs):
if conn is None:
async with audit_connection() as conn:
return await do_log_event(conn=conn, request=request, **kwargs)
else:
return await do_log_event(conn=conn, request=request, **kwargs)
async def do_log_event(conn, request: HTTPConnection = None, **kwargs):
if request:
kwargs.setdefault("resource_url", str(request.url))
kwargs.setdefault("ip", request.client.host)
kwargs.setdefault(
"user_id", request.user.id if request.user.is_authenticated else None
)
kwargs.setdefault(
"impersonator_id", request.session.get("impersonator_id", None)
)
kwargs.setdefault("session_id", request.scope.get("__session_key", None))
await save(conn, AuditEvent(**kwargs)).execute() | /rhubarb_graphql-0.1.0-py3-none-any.whl/rhubarb/pkg/audit/models.py | 0.724675 | 0.191214 | models.py | pypi |
import base64
import uuid
import webauthn
from psycopg import AsyncConnection
from pydantic import validator
from rhubarb import save, RhubarbException
from rhubarb.config import config
from rhubarb.pkg.redis.rate_limit import rate_limit
from rhubarb.pkg.users.models import U
from rhubarb.pkg.webauthn.models import UserAuthnKey
from rhubarb.crud import by_pk, query
from starlette.requests import Request
from webauthn.helpers.structs import (
PublicKeyCredentialCreationOptions,
RegistrationCredential,
PublicKeyCredentialDescriptor,
PublicKeyCredentialRequestOptions,
AuthenticationCredential,
)
def generate_registration_options(
user: U, request: Request
) -> PublicKeyCredentialCreationOptions:
user_id = user.identity
user_name = user.username
user_display_name = user.email
conf = config().webauthn
public_key = webauthn.generate_registration_options(
rp_id=conf.rp_id,
rp_name=conf.rp_name,
user_id=user_id,
user_name=user_name,
user_display_name=user_display_name,
authenticator_selection=conf.selection_criteria,
)
request.session["webauthn_register_challenge"] = base64.b64encode(
public_key.challenge
).decode()
if referer := request.headers.get("referer"):
request.session["webauthn_register_origin"] = referer
return public_key
def b64decode(s: str) -> bytes:
return base64.urlsafe_b64decode(s.encode())
class CustomRegistrationCredential(RegistrationCredential):
@validator("raw_id", pre=True)
def convert_raw_id(cls, v: str):
assert isinstance(v, str), "raw_id is not a string"
return b64decode(v)
@validator("response", pre=True)
def convert_response(cls, data: dict):
assert isinstance(data, dict), "response is not a dictionary"
return {k: b64decode(v) for k, v in data.items()}
async def register_complete(
conn: AsyncConnection,
request: Request,
user_id: uuid.UUID,
credential: CustomRegistrationCredential,
) -> UserAuthnKey:
cors = config().cors
conf = config().webauthn
with rate_limit(key=f"authn-{request.client.host}", max_times=5, ttl_seconds=60):
challenge = request.session.pop("webauthn_register_challenge", None)
if not challenge:
raise RhubarbException(
f"User {user_id} tried to finish registering webauthn without a challenge session"
)
expected_challenge = base64.b64decode(challenge.encode())
expected_origin = request.session.pop("webauthn_register_origin", None)
registration = webauthn.verify_registration_response(
credential=credential,
expected_challenge=expected_challenge,
expected_rp_id=conf.rp_id,
expected_origin=expected_origin or cors.origins,
require_user_verification=True,
)
return await save(
conn,
UserAuthnKey(
user_id=user_id,
public_key=registration.credential_public_key,
sign_count=registration.sign_count,
credential_id=registration.credential_id,
),
).execute(one=True)
async def auth_options(
conn: AsyncConnection, request: Request, user_id: uuid.UUID
) -> PublicKeyCredentialRequestOptions:
conf = config().webauthn
all_keys = await query(conn, UserAuthnKey).kw_where(user_id=user_id).as_list()
public_key = webauthn.generate_authentication_options(
rp_id=conf.rp_id,
allow_credentials=[
PublicKeyCredentialDescriptor(
id=key.credential_id, transports=conf.transports
)
for key in all_keys
],
user_verification=conf.authentication_user_verification,
)
request.session["webauthn_auth_challenge"] = base64.b64encode(
public_key.challenge
).decode()
if origin := request.headers.get("referer", None):
request.session["webauthn_auth_origin"] = origin
return public_key
class CustomAuthenticationCredential(AuthenticationCredential):
@validator("raw_id", pre=True)
def convert_raw_id(cls, v: str):
assert isinstance(v, str), "raw_id is not a string"
return b64decode(v)
@validator("response", pre=True)
def convert_response(cls, data: dict):
assert isinstance(data, dict), "response is not a dictionary"
return {k: b64decode(v) for k, v in data.items()}
async def auth_complete(
conn: AsyncConnection,
request: Request,
credential_id: uuid.UUID,
credential: CustomAuthenticationCredential,
):
expected_challenge = request.session.pop("webauthn_auth_challenge", None)
if not expected_challenge:
raise RhubarbException(
f"Credential {credential_id} tried to finish completing webauthn without a challenge session"
)
expected_challenge = base64.b64decode(expected_challenge.encode())
origin = request.session.pop("webauthn_auth_origin", None)
auth = config().users
cors = config().cors
conf = config().webauthn
with rate_limit(key=f"authn-{request.client.host}", max_times=auth.auth_rate_limit_max_attempts, ttl_seconds=auth.auth_rate_limit_timeout_seconds):
db_credential: UserAuthnKey = await by_pk(
conn, UserAuthnKey, credential_id
).one()
auth = webauthn.verify_authentication_response(
credential=credential,
expected_challenge=expected_challenge,
expected_rp_id=conf.rp_id,
expected_origin=origin or cors.origins,
credential_public_key=db_credential.public_key,
credential_current_sign_count=db_credential.sign_count,
)
return (
by_pk(conn, UserAuthnKey, credential_id)
.kw_update(sign_count=auth.new_sign_count)
.execute()
) | /rhubarb_graphql-0.1.0-py3-none-any.whl/rhubarb/pkg/webauthn/client.py | 0.521715 | 0.170957 | client.py | pypi |
import dataclasses
import logging
from collections import deque
from contextlib import contextmanager
from typing import Optional, Protocol, ContextManager
import phonenumbers
from psycopg import AsyncConnection, AsyncCursor, postgres
from psycopg.abc import Query, Params
from psycopg.pq import Format
from psycopg.rows import Row
import time
from psycopg.types.string import StrBinaryDumper, BytesBinaryDumper
from rhubarb.password import PasswordHash
class QueryListener(Protocol):
def new_query(self, query: Query, params: Optional[Params], duration_ns):
pass
@dataclasses.dataclass(slots=True)
class TrackedQuery:
query: Query
params: Optional[Params]
duration_ns: int
class QueryTracker(QueryListener):
def __init__(self):
self.queries: deque[TrackedQuery] = deque(maxlen=500)
def new_query(self, query: Query, params: Optional[Params], duration_ns):
self.queries.append(TrackedQuery(query, params, duration_ns))
class LocalQueryListeners:
listeners: dict[int, QueryListener]
def __init__(self):
self.listeners: dict[int, QueryListener] = {}
def register(self, listener_id: int, listener: QueryListener):
self.listeners[listener_id] = listener
def unregister(self, listener_id: int):
del self.listeners[listener_id]
def new_query(self, query, params, duration_ns):
logging.debug(f"[QUERY] {query}")
for listener in self.listeners.values():
listener.new_query(query, params, duration_ns)
local_queries = LocalQueryListeners()
@contextmanager
def track_queries() -> ContextManager[QueryTracker]:
tracker = QueryTracker()
tracker_id = time.monotonic_ns()
local_queries.register(tracker_id, tracker)
try:
yield tracker
finally:
local_queries.unregister(tracker_id)
class AsyncConnectionWithStats(AsyncConnection):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cursor_factory = AsyncCursorWithStats
self.adapters.register_dumper(phonenumbers.PhoneNumber, PhoneNumberDumper)
self.adapters.register_dumper(PasswordHash, PasswordHashDumper)
class AsyncCursorWithStats(AsyncCursor):
async def execute(
self,
query: Query,
params: Optional[Params] = None,
*,
prepare: Optional[bool] = None,
binary: bool = False,
) -> AsyncCursor[Row]:
start_ns = time.perf_counter_ns()
result = await super().execute(query, params, prepare=prepare, binary=binary)
end_ns = time.perf_counter_ns()
local_queries.new_query(query, params, end_ns - start_ns)
return result
class PhoneNumberDumper(StrBinaryDumper):
format = Format.TEXT
oid = postgres.types["text"].oid
def dump(self, obj):
return super().dump(str(obj))
class PasswordHashDumper(BytesBinaryDumper):
def dump(self, obj: PasswordHash):
return super().dump(obj.hash) | /rhubarb_graphql-0.1.0-py3-none-any.whl/rhubarb/pkg/postgres/connection_base.py | 0.787768 | 0.166879 | connection_base.py | pypi |
from typing import Any, Union
import asyncio
from abc import ABC, abstractmethod
from collections.abc import AsyncIterator
from rhubarb.event import Event
from .exceptions import HistoryError
class BaseBackend(ABC):
@abstractmethod
def __init__(self, url: str) -> None:
"""
:param url: URL of the backend service
:type url: str
"""
@abstractmethod
async def connect(self) -> None:
"""Connect to queue service"""
@abstractmethod
async def disconnect(self) -> None:
"""Disconnect from queue service"""
@abstractmethod
async def subscribe(self, channel: str) -> None:
"""Subscribe to a channel
:param channel: name of the channel in the queue to subscribe to
:type channel: str
"""
@abstractmethod
async def unsubscribe(self, channel: str) -> None:
"""Unsubscribe from a channel
:param channel: name of the channel in the queue to subscribe to
:type channel: str
"""
async def group_subscribe(
self,
channel: str,
group_name: str,
consumer_name: str,
queue: asyncio.Queue[Union[Event, None]],
) -> None:
"""Called to subscribe to a channel as part of a consumer (``consumer_name``) within a group (``groupd_name``)
:param channel: name of the channel in the queue to subscribe to
:type channel: str
:param group_name: the name of the group this subscriber will join
:type group_name: str
:param consumer_name: the unique name in the group for this subscriber
:type consumer_name: str
"""
raise NotImplementedError()
async def group_unsubscribe(
self, channel: str, group_name: str, consumer_name: str
) -> None:
"""Called to unsubscribe from a channel based on the group name and consumer name
:param channel: name of the channel in the queue to subscribe to
:type channel: str
:param group_name: the name of the group this subscriber will join
:type group_name: str
:param consumer_name: the unique name in the group for this subscriber
:type consumer_name: str
"""
raise NotImplementedError()
@abstractmethod
async def publish(self, channel: str, message: Any) -> None:
"""Publish a message to a given channel
:param channel: name of the channel in the queue to publish the message to
:type channel: str
:param message: the message to publish to the channel
:type channel: str
"""
@abstractmethod
async def next_event(self) -> Event:
"""Get the next published Event from the queue
:return: The next event read
:rtype: Event
"""
async def history(
self, channel: str, count: int = 0
) -> AsyncIterator[Union[Event, None]]:
"""Optionally get a history of the last `n` events
:return: A list of the last events
:type: List
"""
yield None
raise HistoryError("History not supported for backend") | /rhubarb_py-1.8.6-py3-none-any.whl/rhubarb/backends/base.py | 0.914561 | 0.305076 | base.py | pypi |
from logging import getLogger
from logging import StreamHandler
from logging import WARNING
import re
from poetry.data_constructor.data_constructor import DataConstructor
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(WARNING)
logger.setLevel(WARNING)
logger.addHandler(handler)
class WordsDataConstructor(DataConstructor):
FIVE_STR = 'five'
SEVEN_STR = 'seven'
LENGTH = 'length'
def _extract_data(self, csv_data_list):
"""
親クラスのメソッドをオーバーライド.
下記のようなデータ構造を返す.
{
'five': [
[
{
"word": "あ",
"vowel": "ア",
"length": 1,
"part": "名詞"
},
...,
{
"word": "い",
"vowel": "イ",
"length": 1,
"part": "名詞"
}
],
[
{
"word": "か",
"vowel": "カ",
"length": 1,
"part": "名詞"
},
{
"word": "き",
"vowel": "キ",
"length": 1,
"part": "名詞"
}
,
...
]
],
'seven': [
...
]
}
"""
extracted_data_list = [[self._construct_words_dict(csv_data) for csv_data in sentence] for sentence in csv_data_list]
# 5, 7 文字からなる文章以外除外
sentence_five = self._extract_n_char_sentence(5, extracted_data_list)
sentence_seven = self._extract_n_char_sentence(7, extracted_data_list)
return {self.FIVE_STR: sentence_five,
self.SEVEN_STR: sentence_seven}
def _construct_words_dict(self, csv_data):
"""
単語,品詞,読みの母音,読みの字数の辞書を返す
"""
if not isinstance(csv_data, list):
logger.warning("Input value is not List: %s." % csv_data)
self.yomi = csv_data[self.PRONOUNCIATION]
self._substitute_vowel()
return {
"word": csv_data[self.WORD],
"part": csv_data[self.PART],
"vowel": self.vowel_pronounciation,
"length": len(self.vowel_pronounciation)
}
def _substitute_vowel(self):
"""
読み の子音を母音に置換する
"""
# 拗音をリストの先頭にする
try:
substituted_yomi = self._substitute_diphthong()
self.vowel_pronounciation = self._substitute_straight_syllables(substituted_yomi)
except SubstitutionError as e:
print('Substitution was incompleted.', e.origin, e.value)
def _substitute_diphthong(self):
"""
拗音を母音に置換する.
拗音と定義されていない特殊な音も置換.
ローマ字変換を参考.
"""
a = 'ア'
i = 'イ'
u = 'ウ'
e = 'エ'
o = 'オ'
a_diphthong = (
'キャ', 'シャ', 'チャ', 'ニャ', 'ヒャ', 'ミャ', 'リャ', 'ギャ', 'ジャ', 'ヂャ', 'ビャ', 'ピャ',
'クァ', 'クヮ', 'グヮ', 'ツァ', 'テャ', 'デャ', 'ファ', 'フャ', 'ブャ', 'ウァ', 'ヴァ', 'ブァ'
)
i_diphthong = (
'クィ', 'グィ', 'ジィ', 'チィ', 'ヂィ', 'ツィ', 'ティ', 'ディ', 'フィ', 'ウィ', 'ヴィ', 'ブィ'
)
u_diphthong = (
'キュ', 'シュ', 'チュ', 'ニュ', 'ヒュ', 'ミュ', 'リュ', 'ギュ', 'ジュ', 'ヂュ', 'ビュ', 'ピュ',
'クゥ', 'グゥ', 'テュ', 'デュ', 'フュ', 'ブュ'
)
e_diphthong = (
'クェ', 'グェ', 'シェ', 'ジェ', 'チェ', 'ヂェ', 'ツェ', 'テェ', 'デェ', 'フェ', 'ウェ', 'ヴェ', 'ブェ'
)
o_diphthong = (
'キョ', 'ショ', 'チョ', 'ニョ', 'ヒョ', 'ミョ', 'リョ', 'ギョ', 'ジョ', 'ヂョ', 'ビョ', 'ピョ',
'クォ', 'グォ', 'ツォ', 'テョ', 'デョ', 'フォ', 'フョ', 'ブョ', 'ウォ', 'ヴォ', 'ブォ'
)
vowel_dict = {
a: a_diphthong,
i: i_diphthong,
u: u_diphthong,
e: e_diphthong,
o: o_diphthong
}
substituted_yomi = self.yomi
for vowel, consonants in vowel_dict.items():
for c in consonants:
substituted_yomi = re.sub(c, vowel, substituted_yomi)
return substituted_yomi
def _substitute_straight_syllables(self, yomi):
"""
直音を母音に置換する.
"""
a = 'ア'
i = 'イ'
u = 'ウ'
e = 'エ'
o = 'オ'
a_consonants = ('カ', 'サ', 'タ', 'ナ', 'ハ', 'マ', 'ヤ', 'ラ', 'ワ', 'ガ', 'ザ', 'ダ', 'バ', 'パ', "ャ")
i_consonants = ('キ', 'シ', 'チ', 'ニ', 'ヒ', 'ミ', 'リ', 'ギ', 'ジ', 'ヂ', 'ビ', 'ピ')
u_consonants = ('ク', 'ス', 'ツ', 'ヌ', 'フ', 'ム', 'ユ', 'ル', 'グ', 'ズ', 'ヅ', 'ブ', 'プ', "ュ")
e_consonants = ('ケ', 'セ', 'テ', 'ネ', 'ヘ', 'メ', 'レ', 'ゲ', 'ゼ', 'デ', 'ベ', 'ペ')
o_consonants = ('コ', 'ソ', 'ト', 'ノ', 'ホ', 'モ', 'ヨ', 'ロ', 'ヲ', 'ゴ', 'ゾ', 'ド', 'ボ', 'ポ', "ョ")
vowel_dict = {
a: a_consonants,
i: i_consonants,
u: u_consonants,
e: e_consonants,
o: o_consonants
}
substituted_yomi = yomi
for vowel, consonants in vowel_dict.items():
for c in consonants:
substituted_yomi = re.sub(c, vowel, substituted_yomi)
if self._completed_substitution(substituted_yomi):
return substituted_yomi
# 置換完了しなかった場合
raise SubstitutionError(yomi, substituted_yomi)
def _completed_substitution(self, current_yomi):
"""
拗音が全て置換済みであることが前提条件.
母音と子音の置換が終わった場合 True を返す
"""
if not isinstance(current_yomi, str):
logger.warning("Input value is not String: %s." % current_yomi)
# 可換な文字リスト
substitutable_list = (
'カ', 'サ', 'タ', 'ナ', 'ハ', 'マ', 'ヤ', 'ラ', 'ワ', 'ガ', 'ザ', 'ダ', 'バ', 'パ', "ャ",
'キ', 'シ', 'チ', 'ニ', 'ヒ', 'ミ', 'リ', 'ギ', 'ジ', 'ヂ', 'ビ', 'ピ',
'ク', 'ス', 'ツ', 'ヌ', 'フ', 'ム', 'ユ', 'ル', 'グ', 'ズ', 'ヅ', 'ブ', 'プ', "ュ",
'ケ', 'セ', 'テ', 'ネ', 'ヘ', 'メ', 'レ', 'ゲ', 'ゼ', 'デ', 'ベ', 'ペ',
'コ', 'ソ', 'ト', 'ノ', 'ホ', 'モ', 'ヨ', 'ロ', 'ヲ', 'ゴ', 'ゾ', 'ド', 'ボ', 'ポ', "ョ"
)
origin_yomi = current_yomi
for o, c in zip(origin_yomi, current_yomi):
if o not in substitutable_list:
continue
if o == c:
return False
return True
def _extract_n_char_sentence(self, n, sentence_list):
"""
n 文字の文章のみ取得
"""
n_char_sentence_list = list()
for sentence in sentence_list:
len_sum = 0
for word in sentence:
len_sum += word[self.LENGTH]
if len_sum > n:
break
if len_sum == n:
n_char_sentence_list.append(sentence)
return n_char_sentence_list
class SubstitutionError(Exception):
def __init__(self, origin, value):
self.origin = origin
self.value = value
def __str__(self):
return repr(self.origin, self.value) | /rhyme-haiker-2.0.6.tar.gz/rhyme-haiker-2.0.6/poetry/data_constructor/words_data_constructor.py | 0.449876 | 0.173989 | words_data_constructor.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /ri_distributions-1.0.tar.gz/ri_distributions-1.0/ri_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
mean = p * n
standard deviation = sqrt(n * p * (1 - p))
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
"""
def __init__(self, prob=.5, size=20):
self.p = prob
self.n = size
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.n * self.p
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set. The function updates the p and n variables of the object.
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
return self.p, self.n
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the binomial distribution.
Args:
k (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# B(k| p, n) = (n over k) * p^k (1-p)^(n-k)
a = math.factorial(self.n) / (math.factorial(k) * math.factorial(self.n - k)) # (n over k)
b = (self.p ** k) * (1 - self.p) ** (self.n - k) # p^k (1-p)^(n-k)
return a * b
def plot_bar_pdf(self):
# write a method to plot the probability density function of the binomial distribution
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate x values
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
#plot pdf
plt.bar(x,y)
plt.title('Distribution of Outcomes')
plt.xlabel('Outcome')
plt.ylabel('Probability')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Binomial object
"""
return 'mean {}, standard deviation {}, p {}, n {}'.\
format(self.mean, self.stdev, self.p, self.n) | /ri_distributions-1.0.tar.gz/ri_distributions-1.0/ri_distributions/Binomialdistribution.py | 0.901131 | 0.786623 | Binomialdistribution.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /ria_probability-0.1.tar.gz/ria_probability-0.1/ria_probability/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
"""Export an archive of a local annex object store, suitable for RIA"""
__docformat__ = 'restructuredtext'
import logging
import os
import os.path as op
from hashlib import md5
import subprocess
from argparse import REMAINDER
from datalad.utils import (
rmtree,
)
from datalad.interface.base import (
Interface,
build_doc,
)
from datalad.interface.results import (
get_status_dict,
)
from datalad.interface.utils import eval_results
from datalad.support.param import Parameter
from datalad.support.constraints import (
EnsureNone,
EnsureStr,
)
from datalad.distribution.dataset import (
EnsureDataset,
datasetmethod,
require_dataset,
resolve_path,
)
from datalad.log import log_progress
from datalad.dochelpers import (
exc_str,
)
lgr = logging.getLogger('ria_remote.export_archive')
@build_doc
class ExportArchive(Interface):
"""Export an archive of a local annex object store for the RIA remote.
Keys in the local annex object store are reorganized in a temporary
directory (using links to avoid storage duplication) to use the
'hashdirlower' setup used by git-annex for bare repositories and
the directory-type special remote. This alternative object store is
then moved into a 7zip archive that is suitable for use in a
RIA remote dataset store. Placing such an archive into::
<dataset location>/archives/archive.7z
Enables the RIA special remote to locate and retrieve all key contained
in the archive.
"""
_params_ = dict(
dataset=Parameter(
args=("-d", "--dataset"),
doc="""specify the dataset to process. If
no dataset is given, an attempt is made to identify the dataset
based on the current working directory""",
constraints=EnsureDataset() | EnsureNone()),
target=Parameter(
args=("target",),
metavar="TARGET",
doc="""if an existing directory, an 'archive.7z' is placed into
it, otherwise this is the path to the target archive""",
constraints=EnsureStr() | EnsureNone()),
opts=Parameter(
args=("opts",),
nargs=REMAINDER,
metavar="...",
doc="""list of options for 7z to replace the default '-mx0' to
generate an uncompressed archive"""),
)
@staticmethod
@datasetmethod(name='ria_export_archive')
@eval_results
def __call__(
target,
dataset=None,
opts=None):
# only non-bare repos have hashdirmixed, so require one
ds = require_dataset(
dataset, check_installed=True, purpose='RIA archive export')
ds_repo = ds.repo
# TODO remove once datalad 0.12rc7 or later is released
if not hasattr(ds_repo, 'dot_git'):
from datalad.support.gitrepo import GitRepo
ds_repo.dot_git = ds_repo.pathobj / GitRepo.get_git_dir(ds_repo)
annex_objs = ds_repo.dot_git / 'annex' / 'objects'
archive = resolve_path(target, dataset)
if archive.is_dir():
archive = archive / 'archive.7z'
else:
archive.parent.mkdir(exist_ok=True, parents=True)
if not opts:
# uncompressed by default
opts = ['-mx0']
res_kwargs = dict(
action="export-ria-archive",
logger=lgr,
)
if not annex_objs.is_dir():
yield get_status_dict(
ds=ds,
status='notneeded',
message='no annex keys present',
**res_kwargs,
)
return
exportdir = ds_repo.dot_git / 'datalad' / 'tmp' / 'ria_archive'
if exportdir.exists():
yield get_status_dict(
ds=ds,
status='error',
message=(
'export directory already exists, please remove first: %s',
str(exportdir)),
**res_kwargs,
)
return
keypaths = [
k for k in annex_objs.glob(op.join('**', '*'))
if k.is_file()
]
log_progress(
lgr.info,
'riaarchiveexport',
'Start RIA archive export %s', ds,
total=len(keypaths),
label='RIA archive export',
unit=' Keys',
)
for keypath in keypaths:
key = keypath.name
hashdir = op.join(keypath.parts[-4], keypath.parts[-3])
log_progress(
lgr.info,
'riaarchiveexport',
'Export key %s to %s', key, hashdir,
update=1,
increment=True)
keydir = exportdir / hashdir / key
keydir.mkdir(parents=True, exist_ok=True)
os.link(str(keypath), str(keydir / key))
log_progress(
lgr.info,
'riaarchiveexport',
'Finished RIA archive export from %s', ds
)
try:
subprocess.run(
['7z', 'u', str(archive), '.'] + opts,
cwd=str(exportdir),
)
yield get_status_dict(
path=str(archive),
type='file',
status='ok',
**res_kwargs)
except Exception as e:
yield get_status_dict(
path=str(archive),
type='file',
status='error',
message=('7z failed: %s', exc_str(e)),
**res_kwargs)
return
finally:
rmtree(str(exportdir)) | /ria_remote-0.7.tar.gz/ria_remote-0.7/ria_remote/export_archive.py | 0.418459 | 0.175962 | export_archive.py | pypi |
from blinker import signal
import copy
from riak import RiakHttpTransport, RiakClient
from riakdoc.keygen import simple_hex_key
from riakdoc.indexes import BaseIndex, GetterShell
__author__ = 'Dan Ostrowski <dan.ostrowski@gmail.com>'
__all__ = ['BaseDocument']
class BaseDocumentMeta(type):
def __new__(mcs, name, bases, d):
"""
Sets up a document class.
2i Indexes:
This metaclass takes `riakdoc.indexes.BaseIndex` subclasses and puts a getter around them so that they
can provide the actual value of the index, were it to be saved, as well as saving the index object in `_indexes`
Example: if you have my_index = TimestampIndex(), then the new object will have:
new_object.my_index # will be an int
new_object._indexes['my_index'] # which will contain the actual BaseIndex subclass.
"""
d['_indexes'] = {}
for k in d:
if isinstance(d[k], BaseIndex):
d[k].set_name(k)
d['_indexes'][k] = d[k]
d[k] = GetterShell(d[k])
keygen_func = None
if 'keygen_func' in d:
keygen_func = d.pop('keygen_func')
r = super(BaseDocumentMeta, mcs).__new__(mcs, name, bases, d)
if keygen_func:
r.keygen_func = staticmethod(keygen_func)
return r
class BaseDocument(object):
"""
Subclass-able Riak Document class.
Default Signals:
pre-document save - Called right before document storage in Riak, sender is class, document kwarg is instance.
post-document save - Called right after document save in Riak, sender is class, document kwarg is instance.
data initialized - Called right after initialize_data, sender is class, document kwarg is instance.
"""
clients = {}
using = None
bucket_name = None
enable_2i = None
keygen_func = simple_hex_key
__metaclass__ = BaseDocumentMeta
def __init__(self, key, obj=None, d=None, using=None, noclobber=True, indexes=None, *args, **kwargs):
"""
Create a basic Riak model.
@param key: The key to store/retrieve from Riak.
@type key: str
@param obj: A seed object to use for this model.
@type obj: riak.riak_object.RiakObject
@param d: A seed dictionary to use.
@type d: dict
@param using: Name of the Riak connection to use.
@type using: str
@param noclobber: Whether or not to accept both a `d` and a `obj` with data.
@type noclobber: bool
"""
self.key = str(key)
if not obj:
obj = self.get_bucket(using=using).get(self.key)
obj.set_content_type('application/json')
self._obj = obj
self.indexes = {}
if self.using_2i(using=using):
for ri in self._obj.get_indexes():
self.indexes[ri.get_field()] = ri.get_value()
if indexes:
self.indexes.update(indexes)
if d:
if self._obj.exists() and self._obj.get_data() and noclobber:
raise Exception('No clobber set but data and Riak object passed.')
self.data = d
self._obj.set_data(d)
else:
if self._obj.exists():
self.data = obj.get_data()
else:
self.data = self.initialize_data(*args, **kwargs)
signal('data initialized').send(self.__class__, document=self)
def __unicode__(self):
return u'{0} document "{1}": {2}'.format(self.__class__.__name__, self.key, str(self.data)[:100])
def __repr__(self):
return "{klass}('{key}', d={data})".format(klass=self.__class__.__name__, key=self.key, data=repr(self.data))
def __str__(self):
return str(self.__unicode__())
def using_2i(self, using=None):
"""
Figure out if this object is supposed to use 2i or not. (2i is enabled as a backend option.)
@param using: The name of the connection to use.
@type using: str
@rtype: bool
"""
if self.enable_2i is not None:
return self.enable_2i
return self.get_config_for(using=using).get('ENABLE_2I', False)
@classmethod
def create(cls, keygen_func=None, *args, **kwargs):
"""
Shortcut for creating an object without providing a key.
Uses a keygen func to generate it, which can be specified per class or passed in.
"""
key = keygen_func(cls, *args, **kwargs) if keygen_func else cls.keygen_func(cls, *args, **kwargs)
return cls(key, *args, **kwargs)
@classmethod
def get_config(cls):
"""
Return the config dict.
@rtype: dict
"""
from riakdoc.settings import config
return config.get()
@classmethod
def get_config_for(cls, using=None):
"""
Get the configuration for a given connection name.
@param using: The name of the client to use.
@type using: str
@rtype: dict
"""
using = using or cls.using or 'DEFAULT'
try:
return cls.get_config()['DATABASES'][using]
except KeyError:
raise Exception('Improperly configured riakdoc database configuration for {0}'.format(using))
@classmethod
def get_or_create_client(cls, using=None):
"""
Returns a new instance of a riak.RiakClient based on the name and the project settings.
"""
using = using or cls.using or 'DEFAULT'
if not using in cls.clients:
settings = cls.get_config_for(using=using)
cls.clients[using] = RiakClient(
host=settings.get('HOST', 'localhost'),
port=settings.get('PORT', 8098),
prefix=settings.get('PREFIX', 'riak'),
mapred_prefix=settings.get('MAPRED_PREFIX', 'mapred'),
transport_class=settings.get('TRANSPORT', RiakHttpTransport),
solr_transport_class=settings.get('SOLR_TRANSPORT', None)
)
return cls.clients[using]
@classmethod
def get_if_exists(cls, key, using=None):
"""
@todo: Should this be in a Manager style object?
"""
riak_obj = cls.get_or_create_client(using=using).bucket(cls.get_bucket_name()).get(key)
if riak_obj.exists():
return cls(key, obj=riak_obj)
else:
return None
@classmethod
def get_bucket(cls, using=None):
"""
Returns the Riak bucket to use.
@param using: The name of the connection to use.
@type using: str
@rtype: riak.RiakBucket
"""
try:
return cls._bucket
except AttributeError:
cls._bucket = cls.get_or_create_client(using=using).bucket(cls.get_bucket_name())
return cls._bucket
@classmethod
def get_bucket_name(cls):
"""
Returns the bucket name for this document (cls.bucket_name or cls.__name__)
TODO: Does this actually have to be a class method anymore? Probably not, since it's only used in __init__ of
this class.
You could override this if you wanted to do something fancy with get_bucket_name, but pretty much nothing of
value is passed in due to this being a class method.
@rtype: str
"""
return cls.bucket_name or cls.__name__
def initialize_data(self, *args, **kwargs):
"""
Called when the object is created without existing in Riak, *must return the data this object should have.*
@param args: Any extra arguments to the constructor end up here.
@param kwargs: Any extra kwargs from the constructor end up here.
"""
return {}
def save(self, using=None, *args, **kwargs):
"""
Save the object in Riak.
"""
self._obj.set_data(self.data)
if self.using_2i(using=using):
indexes = copy.deepcopy(self.indexes)
for i in self._indexes.itervalues():
i.update(self.data, indexes)
indexes = self.update_indexes(indexes)
self._obj.set_indexes([(k, indexes[k]) for k in indexes])
self.indexes = indexes
signal('pre-document save').send(self.__class__, document=self)
self._obj.store()
signal('post-document save').send(self.__class__, document=self)
def update_indexes(self, indexes):
"""
Override this to add custom indexery to this a document class.
You must RETURN the updated indexes, simply updating index don't worky.
@type indexes: dict
@rtype: dict
"""
return indexes
def delete(self):
"""
Delete this object's contents on the server.
"""
self._obj.delete()
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
# I don't want to write out everything like .append() :)
def __getattr__(self, item):
return getattr(self.data, item) | /riak-docs-0.1.8.zip/riak-docs-0.1.8/riakdoc/documents.py | 0.698638 | 0.194024 | documents.py | pypi |
from time import time
__author__ = 'Dan Ostrowski <dan.ostrowski@gmail.com>'
__all__ = ['BaseIndex', 'TimestampIndex', 'StrFieldsIndex']
class GetterShell(object):
def __init__(self, index):
self.index = index
def __get__(self, instance, klass):
"""
This getter could be changed to pass instance, but I like get_value() not having to check if the instance is None.
"""
return self.index.get_value(instance.get_data() if instance else {}, instance.indexes or {})
class BaseIndex(object):
def __init__(self, required=False, *args, **kwargs):
"""
Create a named index on a document.
"""
self.required = required
self.name = None
def set_name(self, name):
self.name = name
def update(self, data, indexes):
"""
Called just before indexes are saved on a document.
This method should just update indexes, nothing else.
@param data: The data for the document.
@type data: dict (or list)
@param indexes: The indexes on the document as a dictionary.
@type indexes: dict
@rtype: None
"""
value = self.get_value(data, indexes)
if value is None:
if self.required:
raise ValueError('Index {0} is missing, cannot save.'.format(self.name))
else:
del indexes[self.name]
else:
indexes[self.name] = value
def get_value(self, data, indexes):
"""
Gets the value that will be set for this index based on the state of the data in the Riak document.
@param data: Data in the Riak doc.
@type data: dict
"""
raise NotImplementedError()
class TimestampIndex(BaseIndex):
"""
Update a timestamp index with every save or just initially. (See: on_every_save)
"""
def __init__(self, required=False, on_every_save=False, *args, **kwargs):
"""
@type name: str
@type required: bool
@param on_every_save: If true, update the timestamp every time the object is saved, otherwise just when
initially saved.
@type on_every_save: bool
"""
self.on_every_save = on_every_save
super(TimestampIndex, self).__init__(required=required, *args, **kwargs)
def get_value(self, data, indexes):
if self.on_every_save or not self.name in indexes:
return int(time())
else:
return indexes[self.name]
class StrFieldsIndex(BaseIndex):
def __init__(self, fields, required=False, separator='_', *args, **kwargs):
"""
Use one (or more) fields as the value of an index.
@type field: str
@type required: bool
@param separator: When joining multiple fields together, which string separator to use.
@type separator: str
@type name: str
"""
if isinstance(fields, str):
fields = [fields]
self.fields = list(fields)
self.required = required
self.separator = separator
super(StrFieldsIndex, self).__init__(*args, **kwargs)
def get_value(self, data, indexes):
values = []
for field in self.fields:
value = self.get_field_value(field, data)
if not value:
return None
values.append(value)
return self.separator.join(values)
def get_field_value(self, field, data):
bits = field.split('.')
value = None
d = data
try:
for bit in bits:
d = d[bit]
value = str(d)
except (TypeError, KeyError, IndexError):
pass
return value | /riak-docs-0.1.8.zip/riak-docs-0.1.8/riakdoc/indexes.py | 0.677901 | 0.314182 | indexes.py | pypi |
from __future__ import print_function
from os.path import dirname, isdir, join
import re
from subprocess import CalledProcessError, Popen, PIPE
try:
from subprocess import check_output
except ImportError:
def check_output(*popenargs, **kwargs):
"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> import sys
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=sys.stdout)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be '
'overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return output
version_re = re.compile('^Version: (.+)$', re.M)
__all__ = ['get_version']
def get_version():
d = dirname(__file__)
if isdir(join(d, '.git')):
# Get the version using "git describe".
cmd = 'git describe --tags --match [0-9]*'.split()
try:
version = check_output(cmd).decode().strip()
except CalledProcessError:
print('Unable to get version number from git tags')
exit(1)
# PEP 386 compatibility
if '-' in version:
version = '.post'.join(version.split('-')[:2])
else:
# Extract the version from the PKG-INFO file.
import codecs
with codecs.open(join(d, 'PKG-INFO'), 'r', 'utf-8') as f:
version = version_re.search(f.read()).group(1)
return version
if __name__ == '__main__':
print(get_version()) | /riak-2.7.0.tar.gz/riak-2.7.0/version.py | 0.505615 | 0.197367 | version.py | pypi |
.. _datatypes:
.. currentmodule:: riak.datatypes
==========
Data Types
==========
Traditionally all data stored in Riak was an opaque binary type. Then
in version 1.4 came the introduction of a :ref:`counter
<legacy_counters>`, the first Convergent Data Type supported in Riak.
In Riak 2.0, several additional Data Types were introduced. Riak
"knows" about these data types, and conflicting writes to them will
converge automatically without presenting :ref:`sibling values
<siblings>` to the user.
Here is the list of current Data Types:
* :py:class:`~riak.datatypes.Counter` increments or decrements
integer values
* :py:class:`~riak.datatypes.Set` allows you to store multiple
distinct opaque binary values against a key
* :py:class:`~riak.datatypes.Map` is a nested, recursive
struct, or associative array. Think of it as a container for
composing ad hoc data structures from multiple Data Types.
Inside a map you may store sets, counters, flags,
registers, and even other maps
* :py:class:`~riak.datatypes.Register` stores binaries
accoring to last-write-wins logic within
:py:class:`~riak.datatypes.Map`
* :py:class:`~riak.datatypes.Flag` is similar to a boolean
and also must be within :py:class:`~riak.datatypes.Map`
All Data Types must be stored in buckets bearing a
:class:`~riak.bucket.BucketType` that sets the
:attr:`~riak.bucket.BucketType.datatype` property to one of
``"counter"``, ``"set"``, or ``"map"``. Note that the bucket must have
the ``allow_mult`` property set to ``true``.
These Data Types are stored just like :class:`RiakObjects
<riak.riak_object.RiakObject>`, so size constraints that apply to
normal Riak values apply to Riak Data Types too.
An in-depth discussion of Data Types, also known as CRDTs,
can be found at `Data Types
<http://docs.basho.com/riak/2.0.0/theory/concepts/crdts/>`_.
Examples of using Data Types can be found at
`Using Data Types
<http://docs.basho.com/riak/2.0.0/dev/using/data-types/>`_.
------------------
Sending Operations
------------------
Riak Data Types provide a further departure from Riak's usual operation,
in that the API is operation-based. Rather than fetching the data structure,
reconciling conflicts, mutating the result, and writing it back, you instead
tell Riak what operations to perform on the Data Type. Here are some example
operations:
* increment a :class:`Counter` by ``10``
* add ``'joe'`` to a :class:`Set`
* remove the :class:`Set` field called ``'friends'`` from a :class:`Map`
* enable the prepay :class:`Flag` in a :class:`Map`
Datatypes can be fetched and created just like
:class:`~riak.riak_object.RiakObject` instances, using
:meth:`RiakBucket.get <riak.bucket.RiakBucket.get>` and
:meth:`RiakBucket.new <riak.bucket.RiakBucket.new>`, except that the
bucket must belong to a bucket-type that has a valid datatype
property. If we have a bucket-type named "social-graph" that has the
datatype `"set"`, we would fetch a :class:`Set` like so::
graph = client.bucket_type('social-graph')
graph.datatype # => 'set'
myfollowers = graph.bucket('followers').get('seancribbs')
# => a Set datatype
Once we have a datatype, we can stage operations against it and then
send those operations to Riak::
myfollowers.add('javajolt')
myfollowers.discard('roach')
myfollowers.update()
While this looks in code very similar to manipulating
:class:`~riak.riak_object.RiakObject` instances, only mutations are
enqueued locally, not the new value.
---------------------------
Context and Observed-Remove
---------------------------
In order for Riak Data Types to behave well, you must have an opaque
context received from a read when you:
* :meth:`disable <Flag.disable>` a :class:`Flag`
(set it to ``false``)
* remove a field from a :class:`Map`
* :meth:`remove <Set.discard>` an element from a :py:class:`Set`
The basic rule is "you cannot remove something you haven't seen", and
the context tells Riak what you've actually seen, similar to the
:ref:`vclock` on :class:`~riak.riak_object.RiakObject`. The Python
client handles opaque contexts for you transparently as long as you
fetch before performing one of these actions.
------------------------
Datatype abstract class
------------------------
.. autoclass:: Datatype
.. autoattribute:: value
.. autoattribute:: context
.. autoattribute:: modified
^^^^^^^^^^^^^^^^^^^
Persistence methods
^^^^^^^^^^^^^^^^^^^
.. automethod:: Datatype.reload
.. automethod:: Datatype.update
.. function:: Datatype.store(**params)
This is an alias for :meth:`~riak.datatypes.Datatype.update`.
.. automethod:: Datatype.delete
.. automethod:: Datatype.clear
-------
Counter
-------
.. autoclass:: Counter
.. attribute:: Counter.value
The current value of the counter.
:rtype: int
.. automethod:: Counter.increment
.. automethod:: Counter.decrement
---
Set
---
.. autoclass:: Set
.. attribute:: Set.value
An immutable copy of the current value of the set.
:rtype: frozenset
.. automethod:: Set.add
.. automethod:: Set.discard
---
Map
---
.. autoclass:: Map
.. autoattribute:: Map.value
.. attribute:: Map.counters
Filters keys in the map to only those of counter types. Example::
map.counters['views'].increment()
del map.counters['points']
.. attribute:: Map.flags
Filters keys in the map to only those of flag types. Example::
map.flags['confirmed'].enable()
del map.flags['attending']
.. attribute:: Map.maps
Filters keys in the map to only those of map types. Example::
map.maps['emails'].registers['home'].set("user@example.com")
del map.maps['spam']
.. attribute:: Map.registers
Filters keys in the map to only those of register types. Example::
map.registers['username'].set_value("riak-user")
del map.registers['access_key']
.. attribute:: Map.sets
Filters keys in the map to only those of set types. Example::
map.sets['friends'].add("brett")
del map.sets['favorites']
------------------
Map-only datatypes
------------------
Two of the new Data Types may only be embedded in
:py:class:`Map <riak.datatypes.Map>` objects (in addition to
:py:class:`Map <riak.datatypes.Map>` itself):
--------
Register
--------
.. autoclass:: Register
.. autoattribute:: Register.value
.. automethod:: Register.assign
----
Flag
----
.. autoclass:: Flag
.. attribute:: Flag.value
The current value of the flag.
:rtype: bool, None
.. automethod:: Flag.enable
.. automethod:: Flag.disable
| /riak-2.7.0.tar.gz/riak-2.7.0/docs/datatypes.rst | 0.906288 | 0.679271 | datatypes.rst | pypi |
from __future__ import print_function
__all__ = ('get_version')
from os.path import dirname, isdir, join
import re
from subprocess import CalledProcessError, Popen, PIPE
try:
from subprocess import check_output
except ImportError:
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, '
'it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
version_re = re.compile('^Version: (.+)$', re.M)
def get_version():
d = dirname(__file__)
if isdir(join(d, '.git')):
# Get the version using "git describe".
cmd = 'git describe --tags --match [0-9]*'.split()
try:
version = check_output(cmd).decode().strip()
except CalledProcessError:
print('Unable to get version number from git tags')
exit(1)
# PEP 386 compatibility
if '-' in version:
version = '.post'.join(version.split('-')[:2])
else:
# Extract the version from the PKG-INFO file.
with open(join(d, 'PKG-INFO')) as f:
version = version_re.search(f.read()).group(1)
return version
if __name__ == '__main__':
print(get_version()) | /riak_pb-2.1.1.0.tar.gz/riak_pb-2.1.1.0/version.py | 0.53048 | 0.173849 | version.py | pypi |
import abc
from enum import Enum
from fhir.resources.claim import Claim, ClaimItem
from pydantic.types import PositiveInt
from enginelib.errors import ClaimError
class CompareResult(Enum):
EQ = "equal"
NEQ = "not equal"
class _BaseComparator(abc.ABC):
# Class name of compairing items.
_restricted_class = None
@staticmethod
@abc.abstractmethod
def compare(cls, first: object, second: object) -> CompareResult:
pass
@classmethod
def _check_types(cls, first: object, second: object) -> None:
"""Check that objects have the same class.
Raises:
TypeError: if classes are different
"""
if cls._restricted_class is not None and (
not isinstance(first, cls._restricted_class)
or not isinstance(second, cls._restricted_class)
):
raise TypeError(
f"{cls._restricted_class} object is comparable only "
f"with another {cls._restricted_class} object. "
f"Found {type(first)} and {type(second)}."
)
class ClaimComparator(_BaseComparator):
_restricted_class = Claim
@classmethod
def compare(cls, first: Claim, second: Claim) -> CompareResult:
"""
Compare 2 `Claim`-s.
Args:
first: claim
second: another claim
"""
cls._check_types(first, second)
# Check claimNum-s.
try:
claim_num_equal = cls._get_claim_num(first) == cls._get_claim_num(
second
)
except ClaimError:
claim_num_equal = None
# Check id-s.
try:
id1 = first.id
id2 = second.id
if id1 is None or id2 is None:
raise AttributeError("No id in claim")
claim_id_equal = id1 == id2
except AttributeError:
claim_id_equal = None
# Return an answer.
is_equal = False
if claim_num_equal is None and claim_id_equal is None:
raise ClaimError("Not enough information to compare")
elif claim_num_equal is None:
# Compare only by id.
is_equal = claim_id_equal
elif claim_id_equal is None:
# Compare only by claimNum.
is_equal = claim_num_equal
else:
# Compare by both claimNum and id since they are available.
is_equal = claim_num_equal and claim_id_equal
return CompareResult.EQ if is_equal else CompareResult.NEQ
# TODO: Refactor - duplicated method as in ClaimFocus class.
@staticmethod
def _get_claim_num(claim: Claim) -> str:
try:
return claim.identifier[0].value
except (IndexError, AttributeError, TypeError) as exc:
raise ClaimError("Could not find claimNum") from exc
class ClaimItemComparator(_BaseComparator):
"""Class to compare ClaimItem-s from the *same* Claim."""
_restricted_class = ClaimItem
@classmethod
def compare(cls, first: ClaimItem, second: ClaimItem) -> CompareResult:
"""
Compare 2 `ClaimItem`-s.
Args:
first: ClaimItem from the *same* Claim as second
second: ClaimItem from the *same* Claim as first
"""
cls._check_types(first, second)
try:
is_equal = cls._get_sequence(first) == cls._get_sequence(second)
except ClaimError as exc:
raise ClaimError("Not enough information to compare") from exc
return CompareResult.EQ if is_equal else CompareResult.NEQ
# TODO: Refactor - duplicated method as in ClaimLineFocus class.
@staticmethod
def _get_sequence(claim_item: ClaimItem) -> PositiveInt:
try:
return claim_item.sequence
except (AttributeError, ClaimError) as exc:
raise ClaimError(
"No sequence value found for this claim."
) from exc | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/comparator.py | 0.641422 | 0.273756 | comparator.py | pypi |
from typing import TypeVar
from enginelib.claim_line_focus import ClaimLineFocus
from enginelib.tree_node import ResultTreeNode
from fhir.resources.claim import ClaimItem
from enginelib.simple_insight import SimpleInsight
from schema.insight_engine_response import InsightType
A = TypeVar('A')
ClaimLine = ClaimItem
ClaimLineInsight = ResultTreeNode[ClaimLineFocus, SimpleInsight]
def stub(id: str) -> ResultTreeNode[A, SimpleInsight]:
return ResultTreeNode[A, SimpleInsight](
lambda *request: SimpleInsight(InsightType.Error, "TODO: " + id)
)
def insight_line_valid(s: str) -> ResultTreeNode[A, SimpleInsight]:
return ResultTreeNode[A, SimpleInsight](
lambda *request: SimpleInsight(InsightType.ClaimLineValid, s)
)
def insight_claim_not_payable(s: str, **kwargs) -> ResultTreeNode[A, SimpleInsight]:
return ResultTreeNode[A, SimpleInsight](
lambda *request: SimpleInsight(InsightType.ClaimNotPayable, s, defense=kwargs.get('defense', None),
defenseuuid=kwargs.get('defenseuuid', None))
)
def insight_not_applicable(s: str) -> ResultTreeNode[A, SimpleInsight]:
return ResultTreeNode[A, SimpleInsight](
lambda *request: SimpleInsight(InsightType.NotApplicable, s)
)
def insight_line_not_payable(s: str, **kwargs) -> ResultTreeNode[A, SimpleInsight]:
return ResultTreeNode[A, SimpleInsight](
lambda *request: SimpleInsight(InsightType.ClaimLineNotPayable, s, defense=kwargs.get('defense', None),
defenseuuid=kwargs.get('defenseuuid', None))
)
def insight_recode_line(s: str, **kwargs) -> ResultTreeNode[A, SimpleInsight]:
return ResultTreeNode[A, SimpleInsight](
lambda *request: SimpleInsight(InsightType.RecodeClaimLine, s, defense=kwargs.get('defense', None),
defenseuuid=kwargs.get('defenseuuid', None))
)
def insight_line_require_manual_review(s: str, **kwargs) -> ResultTreeNode[A, SimpleInsight]:
return ResultTreeNode[A, SimpleInsight](
lambda *request: SimpleInsight(InsightType.ManualReview, s, defense=kwargs.get('defense', None),
defenseuuid=kwargs.get('defenseuuid', None))
)
def insight_line_error(s: str) -> ResultTreeNode[A, SimpleInsight]:
return ResultTreeNode[A, SimpleInsight](
lambda *request: SimpleInsight(InsightType.Error, s)
) | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/insights.py | 0.640748 | 0.311309 | insights.py | pypi |
import datetime
from enum import Enum, unique
from typing import cast, List, Tuple, Union
from fhir.resources.claim import Claim, ClaimItem
from schema.insight_engine_request import InsightEngineRequest
from enginelib.claim_focus import ClaimFocus, ClaimTypeFocus
from enginelib.claim_line_focus import ClaimLineFocus
from enginelib.errors import ClaimError
from enginelib.types import Period
from fhir.resources.period import Period as FHIRPeriod
@unique
class SameDateResult(str, Enum):
NotApplicable100N = 'This classification tree is looking only at same date relationships for professional claims.'
NotApplicable200N = 'No other historical claim have the same start dates for its claim lines.'
Partial = 'CUE and OC have the same dateFrom, in all claim lines, but dateTo differs for at least one claim line.'
Same400Y = 'CUE and OC take place on the same day, for all claim lines.'
Same400N = 'CUE and OC take place during the exact same date range, for all claim lines.'
Error = 'Error'
@unique
class LineSameDateResult(str, Enum):
NotApplicable200N = 'No other claim lines in history that have the same start date, therefore cannot be "same date"'
Partial300N = 'CLUE and OCL have same Date From but Different Date To',
Same400Y = 'CLUE and OCL take place on the same day'
Same400N = 'CLUE and OCL take place during the exact same date range'
Error = 'Error'
ClaimOrItem = Union[Claim, ClaimItem]
def same_date(a: ClaimOrItem, b: ClaimOrItem) -> Union[SameDateResult, LineSameDateResult]:
if isinstance(a, Claim):
return claim_same_date(a, b)
elif isinstance(a, ClaimItem):
return claim_line_same_date(a, b)
def claim_same_date(cue: Claim, oc: Claim) -> SameDateResult:
"""Verify if two claims are for the same date or the same date range.
Args:
cue: the claim under investigation
oc: the claim we want to compare with the cue
"""
# noinspection PyBroadException
try:
return SameDatePolicy(ClaimFocus(cue)).evaluate(ClaimFocus(oc))
except Exception:
return SameDateResult.Error
def claim_line_same_date(clue: ClaimItem, ocl: ClaimItem) -> LineSameDateResult:
"""Verify if two claim lines are for the same date or the same date range.
"""
try:
return LineSameDatePolicy(clue).evaluate(ocl)
except Exception:
return LineSameDateResult.Error
class SameDatePolicy:
"""Class that implements same date verification policy for claim level."""
# [(dateFrom, dateTo), (dateFrom, dateTo), ...], one pair of dates for each claim line.
_oc_date_tuples: List[Tuple[datetime.date, datetime.date]]
def __init__(self, cue: ClaimFocus):
self.cue = cue
self.cue_date_tuples = self._extract_date_fields(cue)
def evaluate(self, oc: ClaimFocus) -> SameDateResult:
"""Check whether a given claim oc has same date (or same date range) as the cue."""
# 100
if ClaimTypeFocus.from_string(self.cue.claim_type) != ClaimTypeFocus.PROFESSIONAL:
return SameDateResult.NotApplicable100N
self._oc_date_tuples = self._extract_date_fields(oc)
# 200
if not self._verify_all_start_dates_match():
return SameDateResult.NotApplicable200N
# 300
if not self._verify_all_end_dates_match():
return SameDateResult.Partial
# 400
if self._verify_all_on_the_same_day():
return SameDateResult.Same400Y
return SameDateResult.Same400N
@staticmethod
def _extract_date_fields(cf: ClaimFocus) -> List[Tuple[datetime.date, datetime.date]]:
"""Extracts list of pairs of the form (datrFrom, dateTo) for each claim line in the given ClaimFocus.
Args:
cf: an instance of ClaimFocus.
Returns:
A list of pairs (start_date, end_date), one for each claim line in cf.
"""
date_fields = list()
for claim_line in cf.claim.item:
clue = ClaimLineFocus(
claim_line=cast(ClaimItem, claim_line),
request=InsightEngineRequest.construct(claim=cf.claim)
)
date_from, date_to = clue.service_period
date_fields.append((date_from, date_to))
# sort tuples by 1. increasing order of lineServicedDateFrom, and in case of a draw,
# sort by 2. increasing order of lineServicedDateTo:
date_fields.sort()
return date_fields
def _verify_all_start_dates_match(self) -> bool:
cue_date_from = [date_from for date_from, date_to in self.cue_date_tuples]
oc_date_form = [date_from for date_from, date_to in self._oc_date_tuples]
return cue_date_from == oc_date_form
def _verify_all_end_dates_match(self) -> bool:
cue_date_to = [date_to for date_from, date_to in self.cue_date_tuples]
oc_date_to = [date_to for date_from, date_to in self._oc_date_tuples]
return cue_date_to == oc_date_to
def _verify_all_on_the_same_day(self) -> bool:
first_date = self.cue_date_tuples[0]
last_date = self.cue_date_tuples[-1]
return first_date == last_date and first_date[0] == first_date[1]
class LineSameDatePolicy:
"""Class that implements same date verification policy for line level."""
def __init__(self, clue: ClaimItem):
self.clue = clue
self.start, self.end = self.service_period(clue)
def evaluate(self, ocl: ClaimItem) -> LineSameDateResult:
ocl_start, ocl_end = self.service_period(ocl)
# 200
if self.start != ocl_start:
return LineSameDateResult.NotApplicable200N
# 300
elif self.end != ocl_end:
return LineSameDateResult.Partial300N
# 400
elif self.start == self.end:
return LineSameDateResult.Same400Y
else:
return LineSameDateResult.Same400N
# This should use ClaimLineFocus,
# but the request parameter makes that tricky right now
# so I am reimplementing the method we need here
@staticmethod
def service_period(clue: ClaimItem) -> Period:
serv_date = clue.servicedDate
if serv_date:
return Period(serv_date, serv_date)
else:
period = cast(FHIRPeriod, clue.servicedPeriod)
if not (period.start and period.end):
raise ClaimError('Service period not found for this claim line')
return Period(period.start, period.end) | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/matchers/same_date.py | 0.873741 | 0.28113 | same_date.py | pypi |
from re import split
from deprecation import deprecated
from typing import Generator, List, Optional, cast
from fhir.resources.address import Address
from fhir.resources.codeableconcept import CodeableConcept
from fhir.resources.coverage import Coverage
from fhir.resources.reference import Reference
from enginelib.claim_focus import ClaimFocus, ClaimInsurance
__US_STATE_ABBRV = {
"AL": "alabama",
"AK": "alaska",
"AR": "arkansas",
"AZ": "arizona",
"CA": "california",
"CO": "colorado",
"CT": "connecticut",
"DE": "delaware",
"DC": "district of columbia",
"FL": "florida",
"GA": "georgia",
"HI": "hawaii",
"ID": "idaho",
"IL": "illinois",
"IN": "indiana",
"IA": "iowa",
"KS": "kansas",
"KY": "kentucky",
"LA": "louisiana",
"ME": "maine",
"MD": "maryland",
"MA": "massachusetts",
"MI": "michigan",
"MN": "minnesota",
"MS": "mississippi",
"MO": "missouri",
"MT": "montana",
"NE": "nebraska",
"NV": "nevada",
"NH": "new hampshire",
"NJ": "new jersey",
"NY": "new york",
"NC": "north carolina",
"ND": "north dakota",
"OH": "ohio",
"OK": "oklahoma",
"OR": "oregon",
"PA": "pennsylvania",
"PR": "puerto rico",
"RI": "rhode island",
"SC": "south carolina",
"SD": "south dakota",
"TN": "tennessee",
"TX": "texas",
"UT": "utah",
"VT": "vermont",
"VA": "virginia",
"WV": "west virginia",
"WI": "wisconsin",
"WY": "wyoming",
}
@deprecated(
deprecated_in="0.0.9",
removed_in="1.0.0",
details="Use `ClaimFocus.subscriber_id` instead."
)
def subscriber_ids(cue: ClaimFocus) -> Generator[str, None, None]:
"""DEPRECATED.
Use `ClaimFocus.subscriber_id` instead.
TODO(plyq): Remove after versioning implementation.
Not used in `same_patient` matcher.
"""
for ins in cue.claim.insurance:
coverage: Coverage = cue.contained[ins.coverage.reference]
yield coverage.subscriberId
def normalize(txt: Optional[str]) -> Optional[str]:
return txt.lower().strip().strip(",").strip(".") \
if txt is not None else None
def same_address(address1: Address, address2: Address) -> bool:
# both have a text description
if (
address1.text is not None
and address2.text is not None
and normalize(address2.text) ==
normalize(address1.text)
):
return True
are_same = [
_same_address_by_field(address1, address2),
_same_address_field2txt(address1, address2),
_same_address_field2txt(address2, address1),
]
return any(are_same)
def _same_address_field2txt(
field_address: Address, text_address: Address
) -> bool:
"""
Compare whether the address given in the attributes of one address
match the address in the text of another address
"""
if text_address.text is None or field_address.text is not None:
return False
return all(
elt is None or elt in normalize(text_address.text)
for elt in [normalize(field_address.line[0])]
+ [
normalize(field_address.city),
normalize(field_address.state),
normalize(field_address.district),
normalize(field_address.postalCode),
normalize(field_address.country),
]
)
def _get_state(state_name: Optional[str]) -> Optional[str]:
if state_name is None:
return None
elif state_name in __US_STATE_ABBRV:
return __US_STATE_ABBRV[state_name]
else:
return state_name.lower()
def _same_address_by_field(address1: Address, address2: Address) -> bool:
"""
Compare whether the fields of two Addresses describe the same address
"""
if address1.line is None or address2.line is None:
return False
def equal_or_null(a: Optional[str], b: Optional[str]):
return a is None or b is None or a == b
# fields: line, country, city, state, district, postal code
return (
len({normalize(address1.line[0])} &
{normalize(address2.line[0])}) > 0
and equal_or_null(normalize(address1.country),
normalize(address2.country))
and equal_or_null(normalize(address1.city),
normalize(address2.city))
and equal_or_null(
_get_state(address1.state), _get_state(address2.state)
)
and equal_or_null(normalize(address1.district),
normalize(address2.district))
and equal_or_null(normalize(address1.postalCode),
normalize(address2.postalCode))
)
def different_gender(gender1: Optional[str], gender2: Optional[str]) -> bool:
"""Are two gender codes determinative of different genders"""
if gender1 is None or gender2 is None:
return True
if gender1 == "unknown" or gender2 == "unknown":
return True
else:
return gender1 != gender2
@deprecated(
deprecated_in="0.0.9",
removed_in="1.0.0",
details="Use `ClaimFocus.relation_to_insured` instead."
)
def same_relation_to_insured(
claim1: ClaimFocus, claim2: ClaimFocus
) -> Generator[bool, None, None]:
"""DEPRECATED.
Use `ClaimFocus.relation_to_insured` instead.
TODO(plyq): Remove after versioning implementation.
Not used in `same_patient` matcher.
"""
for ins1 in claim1.claim.insurance:
for ins2 in claim2.claim.insurance:
r1 = _get_relations_to_insured(claim1, ins1)
r2 = _get_relations_to_insured(claim2, ins2)
yield any(
relation in r1 and relation != "other" for relation in r2
)
def _get_relations_to_insured(
claim_focus: ClaimFocus, ins: ClaimInsurance
) -> List[str]:
claim_ins = cast(ClaimInsurance, ins).coverage
try:
ref = cast(Reference, claim_ins).reference
cov = cast(Coverage, claim_focus.contained[ref])
except KeyError:
return []
relation = cast(CodeableConcept, cov.relationship)
if relation is not None:
return [coding.code for coding in relation.coding]
else:
return [] | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/matchers/same_patient/predicates.py | 0.805096 | 0.404331 | predicates.py | pypi |
from fhir.resources.claim import Claim
from fhir.resources.humanname import HumanName
from fhir.resources.address import Address
from enginelib.matchers.same_patient.result import SamePatientResult
from enginelib.claim_focus import ClaimFocus, ClaimError
from .predicates import same_address, different_gender, normalize
from typing import cast
from enginelib.errors import MissingFieldError
def same_patient(cue: Claim, oc: Claim) -> SamePatientResult:
mapping = {
'100N': SamePatientResult.Different,
'200Y': SamePatientResult.Same,
'300N': SamePatientResult.Different,
'400N': SamePatientResult.Different,
'600Y': SamePatientResult.Suspected600Y,
'700N': SamePatientResult.Different,
'800Y': SamePatientResult.Suspected800Y,
'800N': SamePatientResult.Different,
'900Y': SamePatientResult.Suspected900Y,
'900N': SamePatientResult.Different
}
# noinspection PyBroadException
try:
label = same_patient_label(cue, oc)
except ClaimError as err:
raise err
except Exception:
return SamePatientResult.Error
return mapping.get(label, SamePatientResult.Error)
def same_patient_label(cue: Claim, oc: Claim) -> str:
cue_focus = ClaimFocus(cue)
oc_focus = ClaimFocus(oc)
if not cue.insurance:
raise ClaimError("No insurance found for CUE")
if not oc.insurance:
raise ClaimError("No insurance found for OC")
try:
ids_equal = cue_focus.subscriber_id == oc_focus.subscriber_id
except MissingFieldError:
ids_equal = False
# node 100
# true -> node 200
# false -> SamePatientResult.Different
if not ids_equal:
return '100N'
try:
relations_equal = (
cue_focus.relation_to_insured in ['18', 'self'] and
oc_focus.relation_to_insured in ['18', 'self']
)
except MissingFieldError:
relations_equal = False
# node 100: insurance.subscriberID == oc.insurance.subscriberID
# node 200: cue.insurance.relationToInsured == oc.insurance.relationToInsured
# true -> SamePatient.Same
# false -> node 300
if relations_equal:
return '200Y'
# node 300: cue.patient.BirthDate == oc.patient.BirthDate
# true -> node 400
# false -> SamePatientResult.Different
if cue_focus.patient.birthDate != oc_focus.patient.birthDate:
return '300N'
# node 400: cue.patient.gender =? oc.patient.gender
# true -> node 500
# false -> SamePatientResult.Different
if different_gender(normalize(cue_focus.patient.gender),
normalize(oc_focus.patient.gender)):
return '400N'
# Names: could probably also think a bit more about the
# valid relations between names the same person can have.
# Kind of sceptical of the heuristic
# "nicknames start with the same first 3 letters as the
# given name they riff on".
# But until that's resolved:
# patients are guaranteed to only have 1 full name
cueName = cast(HumanName, cue_focus.patient.name[0])
ocName = cast(HumanName, oc_focus.patient.name[0])
# node 500: cue.patient.name.given === oc.patient.name.given
# true -> node 600
# false -> node 700
if normalize(' '.join(sorted(cueName.given))) == \
normalize(' '.join(sorted(ocName.given))):
# node 600: cue.patient.name.family == oc.patient.name.family
# true -> Suspected600Y
# false -> node 800
if normalize(cueName.family) == \
normalize(ocName.family):
return '600Y'
# node 800: cue and oc have the same address
# Note; while FHIR allows patients to have multiple addresses,
# Rialtic claims will only have 1 address per patient
# true -> Suspected800Y
# false -> Different
elif same_address(cast(Address, cue_focus.patient.address[0]),
cast(Address, oc_focus.patient.address[0]),
):
return '800Y'
else:
return '800N'
else:
# node 700: cue.patient.name.given[:3] == oc.patient.name.given[:3]
# true -> node 900
# false -> Different
if normalize(cueName.given[0][:3]) == \
normalize(ocName.given[0][:3]):
# node 900: cue.patient.name.family == oc.patient.name.family
# true -> Suspected900Y
# false -> Different
if normalize(cueName.family) == \
normalize(ocName.family):
return '900Y'
else:
return '900N'
else:
return '700N' | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/matchers/same_patient/policy.py | 0.717012 | 0.24901 | policy.py | pypi |
from typing import Optional
from fhir.resources.claim import Claim, ClaimItem
from schema.insight_engine_request import InsightEngineRequest
from enginelib.claim_focus import ClaimFocus
from enginelib.claim_line_focus import ClaimLineFocus
from enginelib.errors import ClaimError, MissingFieldError
from .predicates import valid_taxonomy_code, match_taxonomy_codes
from .result import SameProviderResult
# TODO: Remove `apikey` once versions will be implemented.
def same_provider(
cue: Claim,
clue: ClaimItem,
oc: Claim,
ocl: ClaimItem,
apikey: Optional[str] = None) -> SameProviderResult:
"""
Args:
cue: the claim under evaluation
clue: the claim line under evaluation
oc: other claim
ocl: other claim line
apikey: just a placeholder for backward-compatibility
Raises:
ClaimError if the following conditions are satisfied:
1. rendProvNPI exist for both the CLUE and the CUE and are different; and...
1.1 subject to 1, billProvNPI is missing in the CUE or in the OC; or
1.2 subject to 1, rendProvTaxonomy is missing in the CLUE or the OCL; or
1.3 subject to 1, billProvNPI exists for both the CUE or in the OC; and...
1.3.1 subject to 1 and 1.3, provTaxID is missing for either CUE or OC
"""
# Wrap clue and ocl inside ClaimLineFocus
cue_request = InsightEngineRequest.construct(claim=cue)
clue = ClaimLineFocus(claim_line=clue, request=cue_request)
oc_request = InsightEngineRequest.construct(claim=oc)
ocl = ClaimLineFocus(claim_line=ocl, request=oc_request)
# Wrap cue and oc inside ClaimFocus
cue = ClaimFocus(claim=cue)
oc = ClaimFocus(claim=oc)
# START -> Node 100: Are CLUE and OCL rendProvNPI populated?
try:
clue_npi = clue.rend_prov_npi
ocl_npi = ocl.rend_prov_npi
except MissingFieldError:
return SameProviderResult.Facility # 100N
try:
# 100Y -> Node 200: Do CLUE and OCL have the same rendProvNPI?
if clue_npi == ocl_npi:
return SameProviderResult.SameRendering # 200Y
# 200N -> Node 300: Do OCL and CLUE have the same billProvNPI?
if cue.bill_prov_npi == oc.bill_prov_npi:
# 300Y -> Node 400: Are OCL and CLUE rendProvTaxonomy values
# both valid codes (as listed in Taxonomy crosswalk)?
if valid_taxonomy_code(clue.rend_prov_taxonomy) and \
valid_taxonomy_code(ocl.rend_prov_taxonomy):
# 400Y -> Node 600: Do CLUE and OCL have the same "MEDICARE
# SPECIALTY CODE" based on rendProvTaxonomy match to
# "PROVIDER TAXONOMY CODE" in Taxonomy Crosswalk?
if match_taxonomy_codes(clue.rend_prov_taxonomy, ocl.rend_prov_taxonomy):
return SameProviderResult.Partial600Y
# 600N
return SameProviderResult.Partial600N
# 400N
return SameProviderResult.Partial400N
# 300N -> Node 500: CLUE and OCL have the same provTaxID?
# [new FHIR mapping]: assuming each billing provider has a unique provTaxID
if cue.prov_tax_id == oc.prov_tax_id:
# 500Y -> Node 700: Are OCL and CLUE rendProvTaxonomy values
# both valid codes (as listed in Taxonomy crosswalk)?
if valid_taxonomy_code(clue.rend_prov_taxonomy) and \
valid_taxonomy_code(ocl.rend_prov_taxonomy):
# 700Y -> Node 800: Do CLUE and OCL have the same "MEDICARE
# SPECIALTY CODE" based on rendProvTaxonomy match to
# "PROVIDER TAXONOMY CODE" in Taxonomy Crosswalk?
if match_taxonomy_codes(clue.rend_prov_taxonomy,
ocl.rend_prov_taxonomy):
return SameProviderResult.Partial800Y
# 800N
return SameProviderResult.Partial800N
# 700N
return SameProviderResult.Partial700N
# 500N
return SameProviderResult.Different
except (ClaimError, MissingFieldError):
return SameProviderResult.Error | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/matchers/same_provider/policy.py | 0.569494 | 0.268692 | policy.py | pypi |
from typing import List, Dict
from schema.insight_engine_request import InsightEngineRequest
from schema.insight_engine_response import InsightEngineResponse, Insight
from enginelib.claim_focus import ClaimFocus
from enginelib.claim_line_focus import ClaimLineFocus
from enginelib.decor.policy import Policy
from enginelib.decor.flow import Flow
from enginelib.mpe.interface import Prerequisite
class MultiPolicy:
def __init__(self, flow: Flow, engine_id: str):
self.flow = flow
self.engine_id = engine_id
def evaluate(self, request: InsightEngineRequest) -> InsightEngineResponse:
response = InsightEngineResponse()
response.engine_name = self.engine_id
response.insights = list()
cue = ClaimFocus(claim=request.claim, request=request)
for clue in cue.lines:
response.insights.extend(self._assess(clue))
return response
def _assess(self, clue: ClaimLineFocus) -> List[Insight]:
prerequisite = Prerequisite(clue)
if not prerequisite.execute():
return [prerequisite.insight]
insights: List[Insight] = list()
policies: Dict[str, List[dict]] = dict()
for row in prerequisite.rows:
policy_name = row['policy_name']
policies.setdefault(policy_name, list()).append(row)
for policy_name, rows in policies.items():
if len(rows) != 1:
self._log(f'Error: not able to narrow master table down to one row '
f'for policy {policy_name} when procedureCode is {clue.procedure_code}.')
continue
row = rows[0]
response = Policy(
request=clue.request,
decision_tree=self.flow,
historical_claims=list(),
data=row,
engine_id=self.engine_id
).evaluate()
for insight in response.insights:
insight.policy_name = policy_name
if 'policy_excerpt' in row and row['policy_excerpt']:
# Overwrites any previous defense for this insight with the policy_excerpt:
insight.defense = Policy.create_defense(text=row['policy_excerpt'])
insights.append(insight)
return insights
@staticmethod
def _log(*args, **kwargs):
print(*args, **kwargs) | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/mpe/multi_policy.py | 0.665084 | 0.180359 | multi_policy.py | pypi |
from copy import copy
from typing import Optional
from schema.insight_engine_response import Insight
from enginelib.claim_focus import ClaimFocus
from enginelib.claim_line_focus import ClaimLineFocus
from enginelib.decor.policy import Policy
from enginelib.decor.registry import Registry
from enginelib.decor.traversal import TreeTraversal
from enginelib.mpe.subtrees.prerequisite import tree, PrerequisiteResult
from enginelib.utils import unique_identifier
class Prerequisite:
"""To control the data source (file, database table name, etc), you can
set the following environment variables:
MASTER_TABLE_ENV
MASTER_TABLE_FILENAME
MASTER_TABLE_NAME
Please, refer to the docstring of functions in file `enginelib.rds.multipolicy`
for more information.
"""
def __init__(self, clue: ClaimLineFocus):
self.clue = clue
self.cue = ClaimFocus(claim=clue.request.claim)
self.registry = Registry(cue=self.cue, clue=clue, ocs=list(), data=dict())
self.label: Optional[str] = None
self.rows = list()
self.insight: Optional[Insight] = None
def execute(self) -> bool:
""" Executes the prerequisite subtree for the clue.
Returns
True if there were rows in the master table satisfying all the predicates
in the prerequisite tree. The selected rows are returned in the form
of a list of dictionaries, each containing a column_name: value
for each column in the master table.
In this case, the `rows` attribute holds the list of selected rows.
False if no row in the master table satisfied all requirements. An insight
is generated according to which end-node in the prerequisite tree was
reached.
IN this case, the `insight` attribute holds the corresponding insight.
"""
pre_req_tree = copy(tree)
pre_req_tree.assemble()
self.label = TreeTraversal(pre_req_tree, self.registry).execute()
if self.label == '300Y':
self.rows = self.registry.data['reference_data_table']
return True
simple_insight = PrerequisiteResult.simple_insight(self.label)
self.insight = Insight(
id=unique_identifier(self.clue.request.claim.id),
type=simple_insight.insight_type,
description=simple_insight.text,
claim_line_sequence_num=self.clue.sequence,
defense=Policy.create_defense()
)
return False | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/mpe/interface.py | 0.869991 | 0.262734 | interface.py | pypi |
from typing import Tuple
def _init_age_bounds(relation: str, age_min_str: str, age_max_str: str) -> Tuple[int, int]:
age_min = age_max = 0
if relation[-1] in ['O', 'X'] or relation[0] == relation[-1] == '—':
age_max = int(float(age_max_str))
if relation[0] in ['O', 'X'] or relation[0] == relation[-1] == '—':
age_min = int(float(age_min_str))
return age_min, age_max
def get_age_rationale(relation: str, age_min_str: str, age_max_str: str, age_uom: str):
age_min, age_max = _init_age_bounds(relation, age_min_str, age_max_str)
relation_rationale = {
'—O': f'less than {age_max} {age_uom}(s)',
'—X': f'less than or equal to {age_max} {age_uom}(s)',
'X—': f'greater than or equal to {age_min} {age_uom}(s)',
'O—': f'greater than {age_min} {age_uom}(s)',
'X—O': f'greater than or equal to {age_min} AND less than {age_max} {age_uom}(s)',
'X—X': f'greater than or equal to {age_min} and less than or equal to {age_max} {age_uom}(s)',
'O—X': f'greater than {age_min} and Less than or equal to {age_max} {age_uom}(s)',
'O—O': f'greater than {age_min} and less than {age_max} {age_uom}(s)',
'—OO—': f'less than {age_min} or greater than {age_max} {age_uom}(s)',
'—XX—': f'less than or equal to {age_min} or greater than or equal to {age_max} {age_uom}(s)',
'—OX—': f'less than {age_min} or greater than or equal to {age_max} {age_uom}(s)',
'—XO—': f'less than or equal to {age_min} or greater than {age_max} {age_uom}(s)'
}
return relation_rationale.get(relation, relation)
def is_patient_age_relation_satisfied(
patient_age_in_uom: int,
relation: str,
age_min_str: str,
age_max_str: str) -> bool:
age_min, age_max = _init_age_bounds(relation, age_min_str, age_max_str)
relation_condition = {
'—O': patient_age_in_uom < age_max,
# less than or equal to {age_exception_max_value} {age_exception_uom}(s)
'—X': patient_age_in_uom <= age_max,
# greater than or equal to {age_exception_min_value} {age_exception_uom}(s)
'X—': age_min <= patient_age_in_uom,
# greater than {age_exception_min_value} {age_exception_uom}(s)
'O—': age_min < patient_age_in_uom,
# greater than or equal to {age_exception_min_value} AND less than
# {age_exception_max_value} {age_exception_uom}(s)
'X—O': age_min <= patient_age_in_uom < age_max,
# greater than or equal to {age_exception_min_value} AND less than or
# equal to {age_exception_max_value} {age_exception_uom}(s)
'X—X': age_min <= patient_age_in_uom <= age_max,
# greater than {age_exception_min_value} AND less than or equal to
# {age_exception_max_value} {age_exception_uom}(s)
'O—X': age_min < patient_age_in_uom <= age_max,
# greater than {age_exception_min_value} AND less than
# {age_exception_max_value} {age_exception_uom}(s)
'O—O': age_min < patient_age_in_uom < age_max,
# less than {age_exception_min_value} or greater than
# {age_exception_max_value} {age_exception_uom}(s)
'—OO—': patient_age_in_uom < age_min or age_max < patient_age_in_uom,
# less than or equal to {age_exception_min_value} or greater than
# or equal to {age_exception_max_value} {age_exception_uom}(s)
'—XX—': patient_age_in_uom <= age_min or age_max <= patient_age_in_uom,
# less than {age_exception_min_value} or greater than or equal to
# {age_exception_max_value} {age_exception_uom}(s)
'—OX—': patient_age_in_uom < age_min or age_max <= patient_age_in_uom,
# less than or equal to {age_exception_min_value} or greater than
# {age_exception_max_value} {age_exception_uom}(s)
'—XO—': patient_age_in_uom <= age_min or age_max < patient_age_in_uom
}
return relation_condition[relation] | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/mpe/utils/age_relation.py | 0.875068 | 0.466359 | age_relation.py | pypi |
from typing import Any, Dict
from schema.insight_engine_response import InsightType
from enginelib.claim_line_focus import ClaimLineFocus
from enginelib.claim_focus import ClaimFocus
from enginelib.decor.result import AbstractResult
from enginelib.decor.tree import Tree
from enginelib.mpe.utils.age_relation import is_patient_age_relation_satisfied
from enginelib.mpe.utils.age_in_uom import get_patient_age_in_days, days_to_uom, yyyymmdd_to_date
from enginelib.rds.multipolicy import rows_for_cpt
class PrerequisiteResult(AbstractResult):
insight_text = {
'050N': 'The claim under evaluation is a voided claim. '
'Voided claims are considered in a different insight engine.',
'100N': 'There is no coverage policy associated with this procedure code.',
'150N': 'There is no procedure code with the applicable age requirement '
'associated with the claim under evaluation.',
'200N': 'There is no coverage policy associated with this procedure code '
'after the serviced date.',
'300N': 'The serviced date of the claim does not fall within a coverage '
'policy effective date range.',
# Placeholder
'300Y': 'PASS',
}
insight_type = {
'050N': InsightType.NotApplicable,
'100N': InsightType.NotApplicable,
'150N': InsightType.NotApplicable,
'200N': InsightType.NotApplicable,
'300N': InsightType.NotApplicable,
# Placeholder
'300Y': InsightType.Error,
}
tree = Tree(PrerequisiteResult, name='Prerequisite Subtree (library)')
@tree.node(50, 100, '050N')
def related_claim_check(cue: ClaimFocus) -> bool:
""" Is CLUE <relatedClaim> NOT EQUAL to '8'? """
return cue.related_claim != '8'
@tree.node(100, 150, '100N')
def filter_procedure_code(clue: ClaimLineFocus, data: Dict[str, Any]) -> bool:
""" Does the MPE Reference Table have at least one row
where the CLUE <procedureCode> = {cpt_procedurecode}? """
data['reference_data_table'] = rows_for_cpt(clue.procedure_code)
return bool(data['reference_data_table'])
@tree.node(150, 200, '150N')
def filter_age_requirements(clue: ClaimLineFocus, data: Dict[str, Any]) -> bool:
""" Does the CLUE also conform to at least one of the
age requirements associated with CLUE <procedureCode>? """
patient_age_in_days = get_patient_age_in_days(clue)
table = data['reference_data_table']
rows = list()
for row in table:
indicator = row['age_requirement_indicator'].strip()
if indicator == '1':
relation = row['age_requirement_relation'].strip()
age_min = row['age_requirement_min_value']
age_max = row['age_requirement_max_value']
age_uom = row['age_requirement_uom'].lower().strip()
patient_age_in_uom = days_to_uom(patient_age_in_days, age_uom)
if not is_patient_age_relation_satisfied(patient_age_in_uom, relation, age_min, age_max):
continue
rows.append(row)
data['reference_data_table'] = rows
return bool(rows)
@tree.node(200, 300, '200N')
def check_effective_start_date(clue: ClaimLineFocus, data: Dict[str, Any]) -> bool:
""" Is the CLUE <lineServicedDateFrom> also on or after the {eff_start_date}? """
table = data['reference_data_table']
rows = list()
for row in table:
if clue.service_period.start >= yyyymmdd_to_date(row['eff_start_date']):
rows.append(row)
data['reference_data_table'] = rows
return bool(rows)
@tree.node(300, '300Y', '300N')
def check_effective_end_date(clue: ClaimLineFocus, data: Dict[str, Any]) -> bool:
""" Is the CLUE <lineServicedDateFrom> also before {eff_end_date}? """
table = data['reference_data_table']
rows = list()
for row in table:
if clue.service_period.end < yyyymmdd_to_date(row['eff_end_date']):
rows.append(row)
data['reference_data_table'] = rows
return bool(rows) | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/mpe/subtrees/prerequisite.py | 0.782288 | 0.282303 | prerequisite.py | pypi |
from typing import Dict, Any
from schema.insight_engine_response import InsightType
from enginelib.claim_line_focus import ClaimLineFocus
from enginelib.decor.registry import Registry
from enginelib.decor.result import AbstractResult
from enginelib.decor.tree import Tree
from enginelib.mpe.utils.age_relation import is_patient_age_relation_satisfied, get_age_rationale
from enginelib.rds.icd10 import ICD10Collection
from enginelib.mpe.utils.age_in_uom import get_patient_age_in_days, days_to_uom
class ExceptionResult(AbstractResult):
insight_type = {
'10100Y': InsightType.ManualReview,
'10200N': InsightType.ClaimLineNotPayable,
'10300N': InsightType.ClaimLineNotPayable,
'10400Y': InsightType.ManualReview,
'10400N': InsightType.ClaimLineNotPayable,
'10500Y': InsightType.ManualReview,
'10500N': InsightType.ClaimLineNotPayable
}
insight_text = {
'10100Y': 'Patient age {patient_age} is {age_exception_rationale}, therefore '
'{clue_procedure_code} may be covered if medical necessity is established, '
'despite {check_specific_condition} per {policy_attribution}.',
'10200N': 'Procedure code {clue_procedure_code} {check_specific_reason}, '
'no applicable exceptions, according to {policy_attribution}.',
'10300N': 'Procedure code {clue_procedure_code} {check_specific_reason}, and patient '
'age exception ({age_exception_rationale} where patient age is {patient_age}) '
'criteria were not fulfilled, according to {policy_attribution}.',
'10400Y': 'The indication exception requirement was met ({diagnosis_exception}), therefore '
'{clue_procedure_code} may be covered if medical necessity is established, '
'despite {check_specific_condition}, per {policy_attribution}.',
'10400N': 'Procedure code {clue_procedure_code} {check_specific_reason}, '
'and diagnosis code {clue_diagnosis} does not fulfill condition '
'exception criteria, according to {policy_attribution}.',
'10500Y': 'While the patient age exception criteria was not met, the indication '
'exception requirement was met ({diagnosis_exception}), therefore '
'{clue_procedure_code} may be covered if medical necessity is established, '
'despite {check_specific_condition} per {policy_attribution}.',
'10500N': 'Procedure code {clue_procedure_code} {check_specific_reason}, and '
'neither age nor condition exception criteria were fulfilled, according '
'to {policy_attribution}. Age exception criteria: {age_exception_rationale} '
'where patient age is {patient_age}. Diagnosis code {clue_diagnosis} does '
'not fulfill condition exception criteria.',
}
tree = Tree(ExceptionResult, name='Exception Subtree (library)')
@tree.node(10000, 10100, 10200)
def is_there_applicable_age_exception(data: Dict[str, Any], registry: Registry) -> bool:
""" Is there an applicable patient age exception? """
row = data['reference_data_row']
# Set parameters policy_attribution and clue_procedure_code:
registry['policy_attribution'] = row['policy_attribution']
registry['clue_procedure_code'] = registry.clue.procedure_code
if row['age_exception_indicator'].strip() != '1':
return False
# Set parameters age_exception_rationale and patient_age:
row = data['reference_data_row']
age_uom = row['age_exception_uom'].lower().strip()
registry['age_exception_rationale'] = get_age_exception_rationale(data)
patient_age_in_days = get_patient_age_in_days(registry.clue)
patient_age_in_uom = days_to_uom(patient_age_in_days, age_uom)
registry['patient_age'] = f'{patient_age_in_uom} {age_uom}(s)'
return True
@tree.node(10100, '10100Y', 10300)
def does_patient_meet_age_exception_criteria(clue: ClaimLineFocus, data: Dict[str, Any]) -> bool:
""" Does the |patient age| meet the patient age exception criteria? """
row = data['reference_data_row']
relation = row['age_exception_relation'].strip()
age_max = row['age_exception_max_value']
age_min = row['age_exception_min_value']
age_uom = row['age_exception_uom'].lower().strip()
patient_age_in_days = get_patient_age_in_days(clue)
patient_age_in_uom = days_to_uom(patient_age_in_days, age_uom)
return is_patient_age_relation_satisfied(
patient_age_in_uom,
relation,
age_min,
age_max
)
@tree.node(10200, 10400, '10200N')
@tree.node(10300, 10500, '10300N')
def is_icd_exception_indicator_on(data: Dict[str, Any]) -> bool:
""" Does {icd_exception_indicator} = '1'? """
row = data['reference_data_row']
return row['icd_exception_indicator'].strip() == '1'
@tree.node(10400, '10400Y', '10400N')
@tree.node(10500, '10500Y', '10500N')
def does_patient_meet_icd_exception_criteria(
clue: ClaimLineFocus,
data: Dict[str, Any],
registry: Registry) -> bool:
""" Do any of the CUE diagnosis codes meet the Condition Exception criteria? """
row = data['reference_data_row']
icd10_collection = ICD10Collection(clue.service_period.start, row['icd_exception_list'])
diagnosis = registry.clue.diagnosis_codes
for diag_code in diagnosis:
if diag_code in icd10_collection:
registry['diagnosis_exception'] = diag_code
return True
registry['clue_diagnosis'] = f'{diagnosis}'
return False
def get_age_exception_rationale(data: Dict[str, Any]) -> str:
row = data['reference_data_row']
relation = row['age_exception_relation'].strip()
age_max = row['age_exception_max_value']
age_min = row['age_exception_min_value']
age_uom = row['age_exception_uom'].lower().strip()
return get_age_rationale(relation, age_min, age_max, age_uom) | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/mpe/subtrees/exception.py | 0.758421 | 0.235032 | exception.py | pypi |
from typing import List, Set
from enginelib.rds.client import db_name, db_client
from enginelib.errors import Error
from functools import lru_cache
class RialticDataInterfaceError(Error):
def __init__(self, message="there was a problem while fetching reference data"):
super().__init__(f'Error: {message}')
class ProviderTaxonomyCrosswalk:
@staticmethod
@lru_cache()
def medicare_specialty_code(taxonomy: str) -> str:
# noinspection SqlNoDataSourceInspection,SqlDialectInspection
query = f'''SELECT "medicare_specialty_code"
FROM {db_name}.ptxw_records
WHERE "provider_taxonomy_code"='{taxonomy}'
LIMIT 1;
'''
try:
records, err = db_client.GetReferenceData('transaction_id', query)
records = records or list()
if records:
return records[0]['medicare_specialty_code'].strip()
except (IndexError, KeyError, TypeError):
pass
raise Error('Could not fetch data from Provider Taxonomy Crosswalk'
f' reference data set for taxonomy code {taxonomy}.')
@staticmethod
@lru_cache()
def taxonomy_codes_for(specialty: str) -> List[str]:
# noinspection SqlNoDataSourceInspection,SqlDialectInspection
query = f'''
SELECT "provider_taxonomy_code"
FROM {db_name}.ptxw_records
WHERE "medicare_specialty_code"='{specialty}'
'''
try:
records, err = db_client.GetReferenceData('transaction_id', query)
if records:
return [record['provider_taxonomy_code'].strip() for record in records or list()]
except (IndexError, KeyError, TypeError):
pass
raise Error(f'Could not fetch data from Provider Taxonomy Crosswalk'
f' reference data set for specialty code {specialty}.')
@staticmethod
@lru_cache()
def get_taxonomy_groups(taxonomy_code: str) -> Set[str]:
# taxonomy code is unique alphanumeric and has length 10
if not (taxonomy_code.isalnum() and len(taxonomy_code) == 10):
return set()
# noinspection SqlNoDataSourceInspection,SqlDialectInspection
query = (
f'SELECT "medicare_specialty_code" '
f'FROM {db_name}.ptxw_records '
f"WHERE \"provider_taxonomy_code\"='{taxonomy_code}'"
)
ref_data, err = db_client.GetReferenceData("not_available", query)
if err:
raise RialticDataInterfaceError(message=str(err))
if ref_data:
return {r["medicare_specialty_code"] for r in ref_data}
else:
return set()
@classmethod
@lru_cache()
def is_taxonomy_code_valid(cls, taxonomy_code: str) -> bool:
return bool(cls.get_taxonomy_groups(taxonomy_code)) | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/rds/provider_taxonomy_crosswalk.py | 0.700383 | 0.239083 | provider_taxonomy_crosswalk.py | pypi |
import re
from typing import List, Dict, Union
from enginelib.rds.client import db_client, db_name
from enginelib.errors import Error
from functools import lru_cache
class DrugsAndBiologicals:
@staticmethod
def get_body_surface_dosed_data(drug_code: str, ndc_code: str, transaction_id: str) -> list:
if ndc_code:
query = f'''
SELECT "dxcode", "hcpcsconversion", "drugcode", "redlevel", "yellowlevela", "yellowlevelb", "greenlevela", "greenlevelb", "muot", "timeperiod", "timeperioduom", "mfot", "ptagemin", "ptageminuom", "ptagemax", "ptagemaxuom", "redlevelaverage", "yellowlevelaaverage", "yellowlevelbaverage", "greenlevelaaverage", "greenlevelbaverage", "doseunitofmeasure", "singleormultidose", "dailymaximumunitsround", "dailymaximumunits"
FROM {db_name}.body_surface_dosed
WHERE "drugcode"='{drug_code}' AND "associatedndcs"='{ndc_code}'
'''
else:
query = f'''
SELECT "dxcode", "hcpcsconversion", "drugcode", "redlevel", "yellowlevela", "yellowlevelb", "greenlevela", "greenlevelb", "muot", "timeperiod", "timeperioduom", "mfot", "ptagemin", "ptageminuom", "ptagemax", "ptagemaxuom", "redlevelaverage", "yellowlevelaaverage", "yellowlevelbaverage", "greenlevelaaverage", "greenlevelbaverage", "doseunitofmeasure", "singleormultidose", "dailymaximumunitsround", "dailymaximumunits"
FROM {db_name}.body_surface_dosed
WHERE "drugcode"='{drug_code}'
'''
refdata, err = db_client.GetReferenceData(transaction_id, query)
if err:
raise Error(err)
if refdata:
return [
{key: str(value) for key, value in row.items()}
for row in refdata
]
return []
@staticmethod
def get_weight_dosed_data(drug_code: str, ndc_code: str, transaction_id: str) -> list:
if ndc_code:
query = f'''
SELECT "dxcode", "hcpcsconversion", "drugcode", "redlevel", "yellowlevela", "yellowlevelb", "greenlevela", "greenlevelb", "muot", "timeperiod", "timeperioduom", "mfot", "ptagemin", "ptageminuom", "ptagemax", "ptagemaxuom", "redlevelaverage", "yellowlevelaaverage", "yellowlevelbaverage", "greenlevelaaverage", "greenlevelbaverage", "doseunitofmeasure", "singleormultidose", "dailymaximumunitsround", "dailymaximumunits"
FROM {db_name}.weight_dosed
WHERE "drugcode"='{drug_code}' AND "associatedndcs"='{ndc_code}'
'''
else:
query = f'''
SELECT "dxcode", "hcpcsconversion", "drugcode", "redlevel", "yellowlevela", "yellowlevelb", "greenlevela", "greenlevelb", "muot", "timeperiod", "timeperioduom", "mfot", "ptagemin", "ptageminuom", "ptagemax", "ptagemaxuom", "redlevelaverage", "yellowlevelaaverage", "yellowlevelbaverage", "greenlevelaaverage", "greenlevelbaverage", "doseunitofmeasure", "singleormultidose", "dailymaximumunitsround", "dailymaximumunits"
FROM {db_name}.weight_dosed
WHERE "drugcode"='{drug_code}'
'''
refdata, err = db_client.GetReferenceData(transaction_id, query)
if err:
raise Error(err)
if refdata:
return [
{key: str(value) for key, value in row.items()}
for row in refdata
]
return []
@staticmethod
@lru_cache()
def get_static_dosed_data(drug_code: str, ndc_code: str, transaction_id: str) -> Union[List[Dict], None]:
# sanitization check
if not re.match(r"^[a-zA-Z][a-zA-Z0-9]{2}(\.[a-zA-Z0-9]{0,4})?", drug_code):
return None
if ndc_code:
query = '''
SELECT * FROM {}.static_dosed WHERE "drugcode"='{}' AND "associatedndcs"='{}'
'''.format(db_name, drug_code, ndc_code)
else:
query = '''
SELECT * FROM {}.static_dosed WHERE "drugcode"='{}'
'''.format(db_name, drug_code)
reference_data, err = db_client.GetReferenceData(transaction_id, query)
if err or reference_data is None:
return None
return reference_data | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/rds/drugs_and_biologicals.py | 0.720958 | 0.257045 | drugs_and_biologicals.py | pypi |
from enum import Enum
from typing import Dict, List, Tuple, Set
import datetime as dt
from enginelib.errors import Error
from enginelib.rds.client import db_client, db_name
from enginelib.rds.utils import sql_sanitize
class CodeType(Enum):
valid = "valid"
deleted = "deleted"
incomplete = "incomplete"
class ICD10:
def __init__(self, service_date: dt.date):
self.icd10_hash: Dict[str, Dict[str, str]] = dict()
self.service_date: dt.date = service_date
def __contains__(self, code: str):
return self.is_present(code)
def code_info(self, code: str) -> Dict[str, str]:
formatted_code = code.replace(".", "")
if code not in self.icd10_hash:
self.icd10_hash[code] = self._fetch_code_info(formatted_code, self.service_date)
return self.icd10_hash[code]
@staticmethod
def _fetch_code_info(code: str, service_date: dt.date) -> Dict[str, str]:
"""If 'code' is in the ICD10 dataset pertaining the effective date of the CUE and with the appropriate code type,
and if the DB query was successful, this function returns a non-empty
dictionary with the information of the given ICD10 code.
If the DB query was not successful, this function raises a DataError.
If the DB query was successful, but the code was not in the DB, this
function returns an empty dictionary."""
# The validity period of an icd10 dataset for a given year goes from OCT 1 of the previous year
# to SEP 30 of the next
# So the validity period of the 2022 dataset goes from OCT 1 2021 to SEP 30 2022
year = service_date.year
if service_date >= dt.date(year, 10, 1):
year = year + 1
table_name = f"icd10_{year}" if year is not None else "icd10"
# noinspection SqlResolve
query = f'''
SELECT *
FROM {db_name}.{table_name}
WHERE "code"='{code}';
'''
ref_data, err = db_client.GetReferenceData("multi_policy_prefilter", query)
if err:
raise Error(f'Not able to access ICD10 reference data: {err}.')
try:
if ref_data is None or not isinstance(ref_data, list) or len(ref_data) == 0:
return dict()
return ref_data[0]
except (IndexError, KeyError, TypeError):
raise Error(f'Not able to access ICD10 reference data to fetch description for code {code}.')
def is_present(self, code: str) -> bool:
code_info = self.code_info(code)
return bool(code_info)
def in_range(self, code: str, code_min: str, code_max: str) -> bool:
"""The comparison below is alphanumeric (i.e. string)."""
try:
return code_min <= code <= code_max and self.is_present(code)
except TypeError:
raise Error(f"Either the code minimum: {code_min} or the code maximum: {code_max} "
f"is not a string.")
class ICD10Collection:
def __init__(self, service_date: dt.date, icd_list_of_codes_and_ranges: str):
self.icd10_instance = ICD10(service_date)
self.ranges, self.codes = self._process_list_of_codes_and_ranges(icd_list_of_codes_and_ranges)
def __contains__(self, code: str) -> bool:
if code in self.codes:
return True
for _range in self.ranges:
min_c, max_c = _range.split("-")
if self.icd10_instance.in_range(code, min_c, max_c):
return True
return False
@staticmethod
def _process_list_of_codes_and_ranges(icd_list_of_codes_and_ranges: str = "") -> Tuple[Set[str], Set[str]]:
normalized_icd_list = icd_list_of_codes_and_ranges.replace(',', '|')
ranges = {item.strip() for item in normalized_icd_list.split('|') if "-" in item}
codes = {item.strip() for item in normalized_icd_list.split('|') if "-" not in item}
return ranges, codes
class ICD10_DX10:
@staticmethod
def query_dx_codes(codes: List[str], start_date: dt.date, transaction_id: str) -> Dict[str, Dict]:
code_set = ", ".join(f"'{sql_sanitize(x)}'" for x in codes)
# noinspection SqlResolve,SqlDialectInspection,SqlNoDataSourceInspection
query = f'''
SELECT *
FROM {db_name}.icd10_dx10
WHERE diagnosis IN ({code_set});
'''
records, err = db_client.GetReferenceData(transactionId=transaction_id, query=query)
if err:
raise Error(f"Unable to query ICD-10 DX10 Data, error: {str(err)}")
return {rec['diagnosis']: rec for rec in (records or []) if
rec['effective_start_date'] <= start_date.strftime("%Y-%m-%d") < str(rec['effective_end_date'])} | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/rds/icd10.py | 0.791378 | 0.335514 | icd10.py | pypi |
from __future__ import annotations
from typing import Union, Iterable
import os, re, functools
import datetime as dt
from dataUtils.DBClient import DBClient
from enginelib.claim_line_focus import ClaimLineFocus
from enginelib.errors import Error
from enginelib.rds.utils import sql_sanitize
_bad_chars = re.compile(r'[^0-9a-zA-z\-_]')
DB_CLIENT = DBClient.GetDBClient(os.getenv('APIKEY'))
DB_NAME = os.getenv('RIALTIC_REF_DB')
@functools.lru_cache()
def _sql_query(tx_id: str, proc_code: str) -> MFSResultsList:
# noinspection SqlResolve,SqlNoDataSourceInspection,SqlDialectInspection
query = f'''
SELECT * FROM "{sql_sanitize(DB_NAME)}".mfs
WHERE "HCPCS" = '{sql_sanitize(proc_code)}'
'''
rows, error = DB_CLIENT.GetReferenceData(tx_id or "<no_tx_id>", query)
if not error:
return MFSResultsList(map(MFSResult, rows or ()), no_results=MFSNoResults.NoMatchingProcedure)
raise Error(f"Unable to query the MFS set, error: {str(error)}")
def _filter_dates(start_date: dt.date, rows: MFSResultsList) -> MFSResultsList:
return MFSResultsList(filter(lambda entry: entry.effective_start <= start_date
< entry.effective_end, rows), no_results=MFSNoResults.NoMatchingDateRange)
def _date_read(string: str) -> dt.date:
"""Convert 'strings' in the `mfs` table to an actual date object."""
return dt.datetime.strptime(string.strip().zfill(8), "%m%d%Y").date()
class MFSQuery:
""" MFSQuery(clue).first(filter_mods={}, restrict_dates=True).pctc_indicator"""
def __init__(self, clue: ClaimLineFocus):
self.clue = clue
self.sql_rows = _sql_query(clue.request.transaction_id, clue.procedure_code)
self.effective_rows = _filter_dates(clue.service_period.start, self.sql_rows)
def first(self, filter_mods=None, restrict_dates=True) -> Union[MFSNoResults, MFSResult]:
return self.rows(filter_mods, restrict_dates).first()
def rows(self, filter_mods=None, restrict_dates=True) -> MFSResultsList:
valid_rows = self.effective_rows if restrict_dates else self.sql_rows
if filter_mods:
valid_rows = MFSResultsList(filter(lambda x: x.listed_mod in filter_mods, valid_rows),
no_results=MFSNoResults.NoMatchingMods)
return valid_rows
class MFSResultsList(list):
def __init__(self, rows: Iterable[MFSResult], no_results):
self.no_results = no_results
super().__init__(rows)
def first(self) -> Union[MFSNoResults, MFSResult]:
if not len(self):
return self.no_results
return self[0]
class LocalEnum(type):
def __new__(mcs, _name, _bases, _dict):
obj = super().__new__(mcs, _name, _bases, _dict)
obj.__instances__ = []
for Name, Value in _dict.items():
if Name[0] != "_" and isinstance(Value, str):
option = obj(Name, Value)
setattr(obj, Name, option)
return obj
def __contains__(cls, item):
return item in cls.__instances__
class MFSNoResults(metaclass=LocalEnum):
def __init__(self, name, value):
self.name, self.value = name, value
self.__class__.__instances__.append(self)
def __getattr__(self, item):
return self
def __contains__(self, item) -> bool:
return False
def __bool__(self) -> bool:
return False
def __repr__(self) -> str:
return f"<NoMFSResults.{self.name}, reason = {self.value}>"
def __str__(self) -> str:
return repr(self)
NoMatchingField = "This field is not contained on the entry."
NoMatchingProcedure = "No matching procedure code when querying the DB"
NoMatchingDateRange = "No entry matching the required date range"
NoMatchingMods = "No entry matching the required mods"
class MFSResult:
def __init__(self, data):
self.data = data
def __getitem__(self, item):
""" throws KeyError if a field is missing"""
return self.data[item]
def get(self, item, default=MFSNoResults.NoMatchingField):
""" returns MFSNoResults.NoMatchingField if a field is missing"""
return self.data.get(item, default)
def float(self, name) -> Union[float, MFSNoResults]:
""" get value as a float"""
return _float(self.get(name))
def string(self, name) -> Union[str, MFSNoResults]:
""" get value as a stripped string"""
return _str(self.get(name))
@property
def listed_mod(self) -> str:
return self.string('MOD') or ''
# rjdrn: I'd rather missing dates fail here than elsewhere
@property
def effective_start(self) -> dt.date:
return _date_read(self['EFFECTIVESTARTDATE'])
@property
def effective_end(self) -> dt.date:
return _date_read(self['EFFECTIVEENDDATE'])
# rjdrn: IMO, nil values are best handled by individual engines
@property
def global_days(self) -> Union[str, MFSNoResults]:
return self.string('GLOBDAYS')
@property
def pctc_indicator(self) -> Union[str, MFSNoResults]:
return self.string('PCTCIND')
@property
def mult_proc(self) -> Union[str, MFSNoResults]:
return self.string('MULTPROC')
@property
def status_code(self) -> Union[str, MFSNoResults]:
return self.string('STATUSCODE')
# facilities
@property
def facility_total(self) -> Union[float, MFSNoResults]:
return self.float('FACILITYTOTAL')
@property
def non_facility_total(self) -> Union[float, MFSNoResults]:
return self.float('NONFACILITYTOTAL')
# surgery
@property
def asst_surgeons(self) -> Union[str, MFSNoResults]:
return self.string('ASSTSURG')
@property
def co_surgeons(self) -> Union[str, MFSNoResults]:
return self.string('COSURG')
def _convert(fn, text):
return fn(text) if text else text
def _float(text):
return _convert(float, text)
def _str(text):
if text is not None:
return str(text).strip() | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/rds/mfs2.py | 0.858896 | 0.165998 | mfs2.py | pypi |
import datetime as dt
import functools
from typing import List, Dict, Any
from enginelib.claim_line_focus import ClaimLineFocus
from enginelib.errors import Error
from enginelib.rds.client import db_client, db_name
from enginelib.rds.utils import sql_sanitize
def execute(transaction_id: str, query: str) -> List[Dict[str, Any]]:
records, error = db_client.GetReferenceData(transaction_id or "missing", query)
if error:
raise Error(f'Not able to access MFS reference data set.')
return records or list()
@functools.lru_cache()
def mfs_records_for(transaction_id: str, procedure_code: str) -> List[Dict[str, Any]]:
# noinspection SqlResolve,SqlNoDataSourceInspection,SqlDialectInspection
query = f'''
SELECT * FROM "{sql_sanitize(db_name)}".mfs
WHERE "HCPCS" = '{sql_sanitize(procedure_code)}'
'''
return execute(transaction_id, query)
def mfs_relevant_mod(mod_list: List[str]) -> str:
"""When querying the Medicare Physician Fee Schedule (table `mfs`)
we need to see if one of these modifiers is present to correctly
get the desired row.
Content team: these modifiers should never come together in a claim.
"""
relevant_mod_list = ['53', '26', 'TC']
for mod in relevant_mod_list:
if mod in mod_list:
return mod
return ''
@functools.lru_cache()
def date_from(string: str) -> dt.date:
"""Convert strings in the `mfs` table to an actual date object."""
return dt.datetime.strptime(string.zfill(8), "%m%d%Y").date()
def mfs_field_for(field: str, clf: ClaimLineFocus) -> Any:
"""Given the column name in table `mfs` (the Medicare Physician Fee Schedule) in
the variable `fields`, and given the claim line in variable `clf`, this function
finds the relevant row in the `mfs` table -- taking into account the service date
and the modifiers (relevant modifiers are '53', '26' and 'TC') -- and returns the
value in the desired field for the applicable row that was found."""
records = mfs_records_for(clf.request.transaction_id, clf.procedure_code)
mod = mfs_relevant_mod(clf.modifier_codes)
from_date = clf.service_period.start
effective_records: List[Dict[str, str]] = list()
for record in records:
effective_start = date_from(record['EFFECTIVESTARTDATE'].strip())
effective_end = date_from(record['EFFECTIVEENDDATE'].strip())
if effective_start <= from_date < effective_end:
effective_records.append(record)
if not effective_records:
raise Error(f'The date of service in the claim line ({from_date}) does not match any '
f'effective period in the MFS reference data set for procedure code '
f'{clf.procedure_code}. Unable to determine the value of {field} for '
f'this claim line.')
for record in effective_records:
record_mod = record['MOD'] or ''
if record_mod == mod:
return record[field]
# If there was no modifier-specific row, use the one with empty MOD.
for record in effective_records:
if not record['MOD']:
# None or empty string will do!
return record[field]
required_modifiers = {record["MOD"] for record in effective_records}
raise Error(f'For the date of service in the claim line ({from_date}), the MFS reference data '
f'set only contains rows for procedure code {clf.procedure_code} when one of the '
f'following modifiers is present: {required_modifiers}.')
def global_days(clf: ClaimLineFocus) -> str:
return str(mfs_field_for('GLOBDAYS', clf)).strip()
def pctc_indicator(clf: ClaimLineFocus) -> str:
return str(mfs_field_for('PCTCIND', clf)).strip()
def facility_total(clf: ClaimLineFocus) -> float:
try:
return float(mfs_field_for('FACILITYTOTAL', clf))
except ValueError:
raise Error(f'Value in column "FACILITYTOTAL" for procedure code {clf.procedure_code}, is not a number.')
def non_facility_total(clf: ClaimLineFocus) -> float:
try:
return float(mfs_field_for('NONFACILITYTOTAL', clf))
except ValueError:
raise Error(f'Value in column "NONFACILITYTOTAL" for procedure code {clf.procedure_code}, is not a number.')
def mult_proc(clf: ClaimLineFocus) -> str:
return str(mfs_field_for('MULTPROC', clf)).strip()
def status_code(clf: ClaimLineFocus) -> str:
return str(mfs_field_for('STATUSCODE', clf)).strip().upper() | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/rds/mfs.py | 0.803135 | 0.326781 | mfs.py | pypi |
import enum
from copy import copy
from typing import Dict, Any, List, Iterable, Callable
from enginelib.decor.node import Label
from enginelib.decor.predicate import Predicate
from enginelib.decor.tree import Tree
from enginelib.errors import Error
class DFS(enum.Enum):
UNSEEN = 0
DISCOVERED = 1
EXPLORED = 2
class Connection:
def __init__(self, label: Label, flow: 'Flow', ignore_insight: bool):
self.label = str(label)
self.flow = flow
self.ignore_insight = ignore_insight
class Flow:
_count: int = 0
def __init__(self, tree: Tree):
self.tree = copy(tree)
if self.tree.root:
raise Error(f'Flow cannot be created if the given tree has already been assembled.')
self.connections: Dict[str, Connection] = dict()
self.__class__._count += 1
self.number = self.__class__._count
self._has_on_start = False
def connect(self, labels: Any, flow: 'Flow', ignore_insight: bool = False):
labels = self._validate_and_normalize(labels)
for label in labels:
if label in self.connections:
raise Error(f'There is already a connection at [{self.tree.name}]@{label} '
f'for flow #{self.number}.')
if label not in self.tree.ResultClass.insight_type:
raise Error(f'Not possible to create a connection at [{self.tree.name}]@{label} '
f'for flow #{self.number}, as the label "{label}" does not exist in the tree.')
conn = Connection(label, flow, ignore_insight)
if flow._has_cycle(status={self.number: DFS.DISCOVERED}):
self._log(f'Flow #{self.number} [{self.tree.name}]@{label}')
raise Error(f'Circular flow detected. Please, make sure flow connections are acyclic.')
self.connections[label] = conn
def _has_cycle(self, status: Dict[int, DFS]) -> bool:
"""Depth first search."""
if status.setdefault(self.number, DFS.UNSEEN) == DFS.DISCOVERED:
self._log('Cycle detected (printed backwards): ')
self._log(f'Flow #{self.number} [{self.tree.name}]')
return True
status[self.number] = DFS.DISCOVERED
for label, conn in self.connections.items():
number = conn.flow.number
if status.setdefault(number, DFS.UNSEEN) == DFS.EXPLORED:
continue
if conn.flow._has_cycle(status):
self._log(f'Flow #{self.number} [{self.tree.name}]@{label}')
return True
status[self.number] = DFS.EXPLORED
return False
def _validate_and_normalize(self, labels: Any) -> List[str]:
valid_labels = self.tree.ResultClass.insight_type
if labels == any or labels == all:
labels = list(valid_labels)
elif isinstance(labels, str) or isinstance(labels, int):
labels = [labels]
elif not isinstance(labels, Iterable):
raise TypeError(f'Parameter `labels` should be a list of labels or a single label.')
normalized_labels: List[str] = list()
for label in labels:
if label not in valid_labels:
raise ValueError(f'Invalid label "{label}" given in the `labels` parameter.')
normalized_labels.append(str(label))
return normalized_labels
def on_start(self, func: Callable[..., Any]):
if self._has_on_start:
raise Error('Only one function may be decorated with `@on_start` for each flow.')
self._has_on_start = True
predicate = Predicate(func, require_return_bool=False)
root_label = self.tree.root_label
self.tree.execute_before.setdefault(root_label, list()).insert(0, predicate)
return func
@staticmethod
def _log(*args, **kwargs):
print(*args, **kwargs) | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/decor/flow.py | 0.819785 | 0.297349 | flow.py | pypi |
import datetime as dt
from string import Formatter
from typing import Dict, List, Optional, Callable, Tuple, Any, Iterator
from fhir.resources.claim import Claim
from schema.insight_engine_request import InsightEngineRequest
from enginelib.claim_focus import ClaimFocus
from enginelib.claim_line_focus import ClaimLineFocus
from enginelib.rds.client import db_client
ClaimDirectory = Dict[str, ClaimFocus]
ClaimLineDirectory = Dict[str, List[ClaimLineFocus]]
class Registry:
"""Keeps the "state" of tree traversals:
1. keeps track of qualifying claims and claim lines while the one CLUE is
going along the branches of a decision tree.
2. Serves as a storage for custom parameters that are set along the way,
i.e. the traversal of the tree(s)."""
#: Keeps a dictionary of qualifying OCs (claimNum is the index)
dict_oc: ClaimDirectory
#: list of qualifying OCL-S
list_ocl_s: List[ClaimLineFocus]
#: for each OC, keeps a list of its qualifying OCLs
dict_ocl_d: ClaimLineDirectory
_computed_parameters_values: Dict[str, Any]
def __init__(self, cue: ClaimFocus, clue: ClaimLineFocus, ocs: Optional[List[ClaimFocus]] = None,
data: Optional[Dict[str, Any]] = None):
#: the auxiliary data needed in certain predicate functions
self.data: Dict[str, Any] = data or dict()
#: the claim under evaluation
self.cue = cue
#: the claim line under evaluation
self.clue = clue
self._computed_parameters_values = dict()
self._param_lookup = dict()
self.dict_oc = dict()
self.list_ocl_s = list()
self.dict_ocl_d = dict()
self._historical_claims = ocs
self._index_oc_and_ocl(ocs)
def _index_oc_and_ocl(self, historical_claims: Optional[List[ClaimFocus]] = None):
self.dict_oc = dict()
self.list_ocl_s = list()
self.dict_ocl_d = dict()
if historical_claims is not None:
clue_seq = self.clue.sequence
self.dict_oc = {oc.claim_num: oc for oc in historical_claims if oc != self.cue}
self.list_ocl_s = [clf for clf in self.cue.lines if clf.sequence != clue_seq]
self.dict_ocl_d = {oc.claim_num: oc.lines for oc in self.dict_oc.values()}
def fetch_history(self, max_days_back: int, min_days_back: int):
"""Calls GetHistory(transaction_id, start_date, end_date) with the requested
period given by:
start_date = clue.service_period.start - max_days_back
end_date = clue.service_period.end - min_days_back
This function does not return any value, but rather updates the set of
qualifying OC/OCL-S/OCL-D in the registry to the set of returned claim and
claim lines.
You may give negative values to the parameters to retrieve claims in the
"future" of the CLUE.
"""
if max_days_back < min_days_back:
max_days_back, min_days_back = min_days_back, max_days_back
start_date = self.clue.service_period.start - dt.timedelta(days=max_days_back)
end_date = self.clue.service_period.end - dt.timedelta(days=min_days_back)
history, err = db_client.GetHistory(self.clue.request.transaction_id, start_date, end_date)
historical_claims: List[ClaimFocus] = list()
for claim_obj in history:
claim = Claim.parse_obj(claim_obj)
cf = ClaimFocus(
claim=claim,
request=InsightEngineRequest.construct(claim=claim)
)
historical_claims.append(cf)
self._historical_claims = historical_claims
self._index_oc_and_ocl(historical_claims)
def reset_history(self):
"""Used by a MPEs. Sometimes, when going from a tree to another, history needs to
be considered again and filtered according to different criteria. This function
makes it easy to start anew with the original set of qualifying claims and claim
lines as it was fetched from the platform the first time."""
self._index_oc_and_ocl(self._historical_claims)
def __getitem__(self, param_name: str):
"""Returns the value of a custom parameter."""
return self._computed_parameters_values[param_name]
def __setitem__(self, param_name: str, param_value: Any):
"""Sets the value of a custom parameter."""
self._computed_parameters_values[param_name] = param_value
def __contains__(self, param_name: str) -> bool:
"""Checks whether a custom parameter has been defined."""
return param_name in self._computed_parameters_values
def is_there_oc_such_that(self, func: Callable[..., bool]) -> bool:
"""Filter qualifying OCs with the given function.
Updates dict_ocl_d and dict_oc if at least one OC satisfies
the given predicate function.
Whenever OCs are filtered, OCLs are filtered implicitly as well:
only claim lines inside OCs that passed the filter should stay in
the registry (i.e. in dict_ocl_d).
Args:
func: a predicate function accepting exactly one parameter
named 'oc' of type ClaimFocus.
Returns:
True if at least one OC satisfies the given predicate function and False otherwise.
"""
dict_oc = {oc_id: oc for oc_id, oc in self.dict_oc.items() if func(oc=oc)}
if not dict_oc:
# Filter failed: no OC satisfied the given func.
return False
# Filter succeeded: at least one OC satisfied the given predicate,
# so we update everything with filtered objects (OCs and OCLs).
self.dict_oc = dict_oc
self.dict_ocl_d = {oc_id: self.dict_ocl_d[oc_id] for oc_id, oc in dict_oc.items()}
# Attribute list_ocl_s remains the same, because the filter was
# not about claim lines in the CUE.
return True
def _get_ocl_d_such_that(self, func: Callable[..., bool], param_name: str) \
-> Tuple[ClaimDirectory, ClaimLineDirectory]:
dict_oc: ClaimDirectory = dict()
dict_ocl_d: ClaimLineDirectory = dict()
for claim_id, lines in self.dict_ocl_d.items():
remaining_lines = [line for line in lines if func(**{param_name: line})]
if remaining_lines:
dict_ocl_d.setdefault(claim_id, remaining_lines)
dict_oc[claim_id] = self.dict_oc[claim_id]
return dict_oc, dict_ocl_d
def is_there_ocl_such_that(self, func: Callable[..., bool]) -> bool:
"""Filter qualifying OCLs (same or different claim) with the given
predicate function.
Updates dict_ocl_d, list_ocl_s and dict_oc if at least one OCL
(same or different claim) satisfies the given predicate function.
Keep dict_ocl_d, list_ocl_s and dict_oc unchanged otherwise.
Whenever OCLs are filtered, OCs are filtered implicitly as well:
only claims containing some OCL that passed the filter should stay
in the registry (i.e. in dict_oc).
Args:
func: a predicate function accepting exactly one parameter
named 'ocl' of type ClaimLineFocus.
Returns:
True if at least one OCL (same or different claim) satisfies the
given predicate function and False otherwise.
"""
list_ocl_s = [clf for clf in self.list_ocl_s if func(ocl=clf)]
dict_oc, dict_ocl_d = self._get_ocl_d_such_that(func, 'ocl')
if list_ocl_s or dict_ocl_d:
# Filter succeeded: at least one OCL satisfied the given predicate,
# so we update everything with filtered objects (OCs and OCLs).
self.dict_oc = dict_oc
self.list_ocl_s = list_ocl_s
self.dict_ocl_d = dict_ocl_d
return True
return False
def is_there_ocl_s_such_that(self, func: Callable[..., bool]) -> bool:
"""Filter qualifying OCL-S according to the given predicate function.
Update list_ocl_s only if at least one OCL (same claim) satisfies
the given predicate.
Args:
func: a predicate function accepting exactly one parameter
named 'ocl_s' of type ClaimLineFocus.
Returns:
True if at least one OCL (same claim) satisfies the given
predicate function, False otherwise.
"""
list_ocl_s = [clf for clf in self.list_ocl_s if func(ocl_s=clf)]
if list_ocl_s:
# Filter succeeded: at least one OCL (same claim) satisfied the given
# predicate, so we update list_ocl_s.
self.list_ocl_s = list_ocl_s
# Attributes dict_oc and dict_ocl_d remain the same, because the filter
# was not about claim lines in other claims.
return True
return False
def is_there_ocl_d_such_that(self, func: Callable[..., bool]) -> bool:
"""Filter qualifying OCL (different claim) according to the given
predicate function.
Update dict_ocl_d and dict_oc only if at least one OCL (different claim)
satisfies the given predicate.
Keep dict_ocl_d and dict_oc unchanged otherwise.
Args:
func: a predicate function accepting exactly one parameter
named 'ocl_d' of type ClaimLineFocus.
Returns:
True if at least one OCL (different claim) satisfies the given
predicate function, False otherwise.
"""
dict_oc, dict_ocl_d = self._get_ocl_d_such_that(func, 'ocl_d')
if dict_ocl_d:
# Filter succeeded: at least one OCL (different claim) satisfied the given
# predicate, so we update everything with filtered objects (OCs and OCLs).
self.dict_oc = dict_oc
self.dict_ocl_d = dict_ocl_d
# Attribute list_ocl_s remains the same, because the filter was
# not about claim lines in the CUE.
return True
return False
def is_there_acl_such_that(self, func: Callable[..., bool]) -> bool:
"""Not implemented."""
# Here, acl = any claim line
raise NotImplementedError('Predicates involving ACL need to be better understood (by this developer).')
@property
def computed_parameters_values(self):
return self._computed_parameters_values
def iter_oc(self) -> Iterator[ClaimFocus]:
for oc in self.dict_oc.values():
yield oc
def iter_ocl(self) -> Iterator[ClaimLineFocus]:
for ocl_s in self.list_ocl_s:
yield ocl_s
for list_ocl_d in self.dict_ocl_d.values():
for ocl_d in list_ocl_d:
yield ocl_d
def iter_ocl_s(self) -> Iterator[ClaimLineFocus]:
for ocl_s in self.list_ocl_s:
yield ocl_s
def iter_ocl_d(self) -> Iterator[ClaimLineFocus]:
for list_ocl_d in self.dict_ocl_d.values():
for ocl_d in list_ocl_d:
yield ocl_d
def iter_acl(self) -> Iterator[ClaimLineFocus]:
yield self.clue
for ocl_s in self.list_ocl_s:
yield ocl_s
for list_ocl_d in self.dict_ocl_d.values():
for ocl_d in list_ocl_d:
yield ocl_d
def format_text(self, text: str) -> str:
self._param_lookup = self._param_lookup or dict(self.data)
lookup = self._param_lookup
lookup.update(self._computed_parameters_values)
keys = [i[1] for i in Formatter().parse(text) if i[1] is not None]
data_dict = {
key: str(lookup[key]) if key in lookup else '{' f'{key}' '}'
for key in keys
}
return text.format(**data_dict) | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/decor/registry.py | 0.904793 | 0.336031 | registry.py | pypi |
from copy import deepcopy, copy
from typing import Dict, Optional, Tuple, Type, Any, Callable, List
from schema.insight_engine_response import InsightType
from enginelib.decor.errors import DecisionTreeError, CustomParameterError
from enginelib.decor.node import AbstractNode, DecisionNode, LeafNode, Label
from enginelib.decor.predicate import Predicate
from enginelib.decor.registry import Registry
from enginelib.decor.result import AbstractResult
from enginelib.simple_insight import SimpleInsight
class Tree:
"""Used by the developer that will build the engine to organize
the predicate functions that implements the various nodes of a
decision tree. The main functionality provided by this class is
the @node decorator.
"""
def __init__(self, ResultClass: Type[AbstractResult], name: str = 'untitled'):
#: the class that holds the data (type and text) of the
# insight nodes in the decision tree of the engine.
self.ResultClass = ResultClass
ResultClass.validate()
#: the root node of the decision tree.
self.root: Optional[AbstractNode] = None
self._root_label: Optional[str] = None
#: FUTURE: stores information about functions that implement
#: custom parameters.
self.parameter_decorated: Dict[str, List[Tuple[str, Callable[..., Any]]]] = dict()
# TEMPORARY DATA STRUCTURES:
# (no longer needed after Tree.assemble() is called)
# self._node_decorated[label] stores triple (yes_label, no_label, predicate_func)
self._node_decorated: Dict[str, Tuple[str, str, Callable[..., bool]]] = dict()
# The node with label x is temporarily stored in self._node[x]
self._node: Dict[str, AbstractNode] = dict()
#: the name of this decision tree (useful when there are many trees).
self.name = name
self.execute_before: Dict[str, List[Predicate]] = dict()
self.execute_after: Dict[str, List[Predicate]] = dict()
def __copy__(self):
new_tree = Tree(self.ResultClass, self.name)
new_tree.execute_before = {**self.execute_before}
new_tree.execute_after = {**self.execute_after}
new_tree.parameter_decorated = {label: copy(obj) for label, obj in self.parameter_decorated.items()}
if self.root is not None:
# the tree is assembled
new_tree.root = deepcopy(self.root)
else:
# the tree is not assembled
new_tree._root_label = self._root_label
new_tree._node_decorated = copy(self._node_decorated)
return new_tree
def before(self, label: Label, *args: Label):
"""Use this decorator to do computations like computing a parameter or filtering OC/OCL
right before a node predicate is executed. If many functions are decorated with the
@before(label) for the same label, then they will be executed in the order they are
decorated.
Functions decorated with @before(label) should not return any value. If some do, its
return value will be ignored."""
def inner(func):
# No validation on func is performed at this moment.
# Validations are delayed until Tree.assemble() is called.
for _label in [label, *args]:
self.execute_before.setdefault(str(_label), list()).append(Predicate(func, require_return_bool=False))
return func
return inner
def after(self, label: Label, *args: Label):
"""Use this decorator to do computations like computing a parameter or filtering OC/OCL
right after a node predicate is executed. If many functions are decorated with the
@after(label) for the same label, then they will be executed in the order they are
decorated.
Functions decorated with @after(label) should not return any value. If some do, its
return value will be ignored."""
def inner(func):
# No validation on func is performed at this moment.
# Validations are delayed until Tree.assemble() is called.
for _label in [label, *args]:
self.execute_after.setdefault(str(_label), list()).append(Predicate(func, require_return_bool=False))
return func
return inner
def node(self, label: Label, yes_label: Label, no_label: Label = ''):
"""Decorates a predicate function that is supposed to implement node
[label] in the decision tree with the YES branch going to node [yes_label]
and NO branch going to node [no_label].
Labels can be int or str, but they are all converted to str internally,
so 100 and '100' refer to the same node."""
label, yes_label, no_label = str(label), str(yes_label), str(no_label)
if not label:
raise DecisionTreeError(message='The label of a node cannot be the empty string.')
if self.root:
raise DecisionTreeError(message='Trying to use decorator @node(...), but the tree is already assembled.')
def decorator(func):
# No validation on func is performed at this moment.
# Validations are delayed until Tree.assemble() is called.
self._node_decorated[label] = yes_label, no_label, func
if self._root_label is None:
# The first function decorated with @node(...) sets the root label.
self._root_label = label
return func
return decorator
def _do_not_use_this_decorator(self, label: Label, name: Optional[str] = None):
"""
Former @parameter decorator. Removed support to this decorator because most parameters
are defined AFTER a node is executed and not before as it was implemented here.
OLD DOC STRING:
The parameter that is being defined with this decorator will be computed
immediately before the traversal of the tree reaches the node with the given label."""
label = str(label)
if not label:
raise DecisionTreeError(message='The label of a custom parameter cannot be the empty string.')
if self.root:
raise DecisionTreeError(
message='Trying to use decorator @parameter(...), but the tree is already assembled.')
def decorator(func):
# If no name is provided, the parameter name will be the name of the function:
param_name = name or func.__name__
self._validate_parameter_decorated_func(param_name, func)
self.parameter_decorated.setdefault(label, list()).append((param_name, func))
return func
return decorator
def _assemble_recursive(self, label: str, allowed_custom_parameters: Dict[str, Type], dev_mode: bool = False) \
-> AbstractNode:
if label in self._node:
print(f'Warning! The structure is not a tree: node {label} has more than one parent.')
return self._node[label]
if self.ResultClass.is_valid(label):
if label in self.execute_after and not dev_mode:
raise DecisionTreeError(
f'Not allowed to decorate a function with @after({label}) as label "{label}" is an end-node.'
)
return LeafNode(label=label, simple_insight=self.ResultClass.simple_insight(label))
if label not in self._node_decorated:
if dev_mode:
return LeafNode(label=label, simple_insight=SimpleInsight(
insight_type=InsightType.Error,
text='MISSING NODE'
))
else:
raise DecisionTreeError(message=f'Node {label} not found when assembling the tree.')
new_custom_parameters = {
param_name: func.__annotations__['return']
for param_name, func in self.parameter_decorated.get(label, list())
}
allowed_custom_parameters = {**allowed_custom_parameters, **new_custom_parameters}
yes_label, no_label, func = self._node_decorated.pop(label)
predicate = Predicate(func, allowed_custom_parameters)
yes_node = self._assemble_recursive(yes_label, allowed_custom_parameters, dev_mode=dev_mode)
no_node = self._assemble_recursive(no_label, allowed_custom_parameters, dev_mode=dev_mode)
node = DecisionNode(label, predicate, yes_node, no_node)
self._node[label] = node
return node
def assemble(self, new_root_label: Optional[Label] = None, dev_mode: bool = False):
"""After all predicate functions have been decorated with @node,
and before the tree can be traversed by the Policy class, it must
be assembled. This method calls a recursive function that creates
each node and links them together according to the given structure.
The developer do not need to call this method.
"""
if self.root:
# Tree already assembled: there is nothing to be done.
return
if new_root_label is not None:
self._root_label = str(new_root_label)
if self._root_label is None:
raise DecisionTreeError(message='No root node was defined for this tree.')
self.root = self._assemble_recursive(self._root_label, allowed_custom_parameters=dict(), dev_mode=dev_mode)
# Verify each @node decorated function appears in the tree.
if self._node_decorated:
print(end='WARNING! There are nodes that are not connected to the root of the tree:')
print(self._node_decorated.keys())
# Verify each @parameter decorated function is associated to a node of the tree.
_remaining_parameters = set(self.parameter_decorated.keys()).difference(self._node.keys())
for label in _remaining_parameters:
print('WARNING! The list of custom parameters',
[param_name for param_name, _ in self.parameter_decorated[label]],
f'was defined at node {label}, but this node is either not defined or',
'not connected to the root of the tree.')
del self._node
del self._node_decorated
def print(self):
"""Prints the tree in the standard output for verification."""
t = copy(self)
if t.root is None:
t.assemble(dev_mode=True)
t.root.print()
@staticmethod
def _validate_parameter_decorated_func(param_name: str, func):
assert 'return' in func.__annotations__, \
'@parameter decorated functions must have its return type annotated.'
assert param_name != 'data', \
'Error: parameter "data" is reserved. Please, use a different name for your custom parameter.'
if func.__code__.co_argcount != 1 or len(func.__annotations__) != 2:
raise CustomParameterError(f'@parameter decorated functions must have exactly '
f'one argument with annotated type {Registry.__name__}.')
for func_param_name, param_type in func.__annotations__:
if func_param_name != 'return':
if param_type is not Registry:
raise CustomParameterError(f'The single argument of a @parameter decorated '
f'function must have type {Registry.__name__}.')
@property
def root_label(self):
return self._root_label | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/decor/tree.py | 0.904867 | 0.344636 | tree.py | pypi |
from typing import Type, Dict, Callable, Set, Optional, Any
from enginelib.claim_focus import ClaimFocus
from enginelib.claim_line_focus import ClaimLineFocus
from enginelib.decor.errors import InvalidParameterError
from enginelib.decor.registry import Registry
class Predicate:
"""Wraps a function that was decorated with @Tree.node
At the time of creation, checks whether the given
function has its return type annotated and whether
the parameters it accepts are allowed and if their
types annotated and corresponds to what is expected.
It supports verification of pre-determined parameters
and also custom parameters (defined by the developer).
"""
allowed_standard_params: Dict[str, Type] = {
'cue': ClaimFocus,
'oc': ClaimFocus,
'clue': ClaimLineFocus,
'ocl': ClaimLineFocus,
'ocl_s': ClaimLineFocus,
'ocl_d': ClaimLineFocus,
'acl': ClaimLineFocus,
'registry': Registry,
'data': Dict[str, Any]
}
"""These are the names of the pre determined parameters
that are allowed for the wrapped function and their
expected types."""
def __init__(self,
func: Callable[..., bool],
allowed_custom_params: Optional[Dict[str, Type]] = None,
require_return_bool: bool = True):
#: the function being wrapped.
self.func = func
#: the names and types of custom parameters allowed
#: for the function being wrapped.
self.allowed_custom_params: Dict[str, Type] = allowed_custom_params or dict()
#: the actual set of standard parameters that the function
#: being wrapped takes
self.standard_params: Set[str] = set()
#: the actual set of custom parameters that the function
#: being wrapped takes
self.custom_params: Set[str] = set()
self._examine_annotations(require_return_bool)
@property
def description(self) -> str:
"""The textual description of the node being implemented
by the wrapped function.
It is extracted from the function's docstring.
"""
return self.func.__doc__
def _validate_parameter(self, param_name: str, param_type: Type):
if param_name in self.allowed_standard_params:
expected_param_type = self.allowed_standard_params[param_name]
elif param_name in self.allowed_custom_params:
expected_param_type = self.allowed_custom_params[param_name]
else:
raise InvalidParameterError(
message=f'Custom parameter "{param_name}" must be defined before it can be used.')
if expected_param_type is not param_type:
raise InvalidParameterError(
message=f'Parameter "{param_name}" must have annotated type "{expected_param_type.__name__}".')
def _examine_annotations(self, require_return_bool: bool = True):
func = self.func
if require_return_bool:
assert 'return' in func.__annotations__, 'Predicate functions must have return type annotated as bool.'
assert func.__annotations__['return'] is bool, 'Predicate functions must return bool.'
for param_name, param_type in func.__annotations__.items():
if param_name == 'return':
continue
self._validate_parameter(param_name, param_type)
if param_name in self.allowed_standard_params:
self.standard_params.add(param_name)
else:
self.custom_params.add(param_name)
# checks whether the developer has used more than one filtering parameter:
if len(self.standard_params.intersection({'oc', 'ocl', 'ocl_d', 'ocl_s', 'acl'})) > 1:
raise InvalidParameterError('Only one of {oc, ocl, ocl_d, ocl_s, acl} '
'may appear as a parameter in a predicate function.') | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/decor/predicate.py | 0.942994 | 0.384883 | predicate.py | pypi |
import os
import re
from typing import cast, Callable, Tuple, Optional, List
from schema.insight_engine_response import Trace
from enginelib.decor.predicate import Predicate
from enginelib.decor.errors import InvalidParameterError
from enginelib.decor.node import DecisionNode, LeafNode, AbstractNode
from enginelib.decor.registry import Registry
from enginelib.decor.tree import Tree
class TreeTraversal:
"""Responsible for walking down a decision tree, starting from the root,
while also making the registry update itself (the set of qualifying
claims and claim lines, and the custom parameters) according to
the predicate functions in each node of the decision tree."""
def __init__(self, decision_tree: Tree, registry: Registry):
#: the decision tree that must be traversed.
self.decision_tree = decision_tree
decision_tree.assemble()
#: the registry in its initial state.
self.registry = registry
#: the list of predicates and respective answers during the traversal
self.trace: Trace = Trace()
self.trace.tree_name = decision_tree.name
@staticmethod
def _log(*args, **kwargs):
print(*args, **kwargs)
def _eval_before(self, node: AbstractNode):
for predicate in self.decision_tree.execute_before.get(node.label, list()):
func, param_name = self._wrapped_predicate_func(predicate)
self._eval_predicate(func, param_name)
def _eval_after(self, node: AbstractNode):
for predicate in self.decision_tree.execute_after.get(node.label, list()):
func, param_name = self._wrapped_predicate_func(predicate)
self._eval_predicate(func, param_name)
def _eval_predicate(self, func: Callable, param_name: str) -> bool:
if param_name is None:
return func()
elif param_name == 'oc':
return self.registry.is_there_oc_such_that(func)
elif param_name == 'ocl':
return self.registry.is_there_ocl_such_that(func)
elif param_name == 'ocl_s':
return self.registry.is_there_ocl_s_such_that(func)
elif param_name == 'ocl_d':
return self.registry.is_there_ocl_d_such_that(func)
elif param_name == 'acl':
return self.registry.is_there_acl_such_that(func)
else:
raise InvalidParameterError(f'(TreeTraversal) Invalid parameter {param_name} encountered.')
def execute(self) -> str:
"""Perform the traversal, starting from the root of the tree.
For debugging, please set DECOR_DEBUG environment variable to a non-empty value.
Returns:
a SimpleInsight according to the end branch that was reached.
"""
debug = os.environ.get('DECOR_DEBUG', '')
self.decision_tree.assemble()
if debug:
msg = f'Traversal of tree "{self.decision_tree.name}" for claim {self.registry.cue.claim_num}' + \
f' line {self.registry.clue.sequence}:'
self._log('-' * len(msg))
self._log(msg)
self._log(end=(' ' * 8))
node = self.decision_tree.root
while isinstance(node, DecisionNode):
self._eval_before(node)
func, param_name = self._wrapped_predicate_func(node.predicate)
value = self._eval_predicate(func, param_name)
yes_no = 'YES' if value else 'NO'
self.trace.traversal.append((
node.predicate.description.strip(),
yes_no,
str(node.label),
self._info_qualifying_claims_and_claim_lines(param_name)
))
self._eval_after(node)
if debug:
text, _ = re.subn(r'\s+', ' ', node.predicate.description.strip())
self._log(f'--> #{node.label}: "{text}"')
self._log(end=f'{yes_no.rjust(7)} ')
node = node.yes_node if value else node.no_node
node = cast(LeafNode, node)
self._eval_before(node)
if debug:
text, _ = re.subn(r'\s+', ' ', node.simple_insight.text.strip())
insight_text = self.registry.format_text(text)
insight_type = node.simple_insight.insight_type
self._log(end=f'>>> Insight #{node.label}: {insight_type}; "{insight_text}"')
self.trace.end_label = node.label
return node.label
def _wrapped_predicate_func(self, predicate: Predicate) -> Tuple[Callable[..., bool], Optional[str]]:
outer_kwargs = dict()
if 'cue' in predicate.standard_params:
outer_kwargs['cue'] = self.registry.cue
if 'clue' in predicate.standard_params:
outer_kwargs['clue'] = self.registry.clue
if 'data' in predicate.standard_params:
outer_kwargs['data'] = self.registry.data
if 'registry' in predicate.standard_params:
outer_kwargs['registry'] = self.registry
for param_name in predicate.custom_params:
outer_kwargs[param_name] = self.registry[param_name]
filtering_param = set(predicate.standard_params).difference(
{'cue', 'clue', 'data', 'registry'}
)
if filtering_param == set():
if predicate.func(**outer_kwargs):
return (lambda: True), None
else:
return (lambda: False), None
def inner(**inner_kwargs) -> bool:
inner_kwargs.update(outer_kwargs)
return predicate.func(**inner_kwargs)
return inner, next(iter(filtering_param))
def _get_qualifying_ocl_s(self) -> str:
cue_claim_num = self.registry.cue.claim_num
return f'(CUE) Claim #{cue_claim_num}: ' + \
', '.join(f'{ocl_s.sequence}' for ocl_s in self.registry.iter_ocl_s())
def _get_qualifying_ocl_d(self, oc_claim_num: str) -> str:
return f'Claim #{oc_claim_num}: ' + \
', '.join(f'{ocl_d.sequence}' for ocl_d in self.registry.dict_ocl_d[oc_claim_num])
def _get_all_qualifying_ocl_d(self) -> List[str]:
# limit on the number of historical claims to show:
limit = 5
claim_lines: List[str] = list()
for i, oc in zip([*range(limit), 'more'], self.registry.iter_oc()):
if i == 'more':
claim_lines.append('(truncated)')
break
claim_lines.append(self._get_qualifying_ocl_d(oc.claim_num))
return claim_lines
def _info_qualifying_claims_and_claim_lines(self, param_name: str) -> str:
if param_name == 'ocl_s':
return 'Filtered OCL-S: ' + self._get_qualifying_ocl_s()
if param_name == 'ocl_d':
return 'Filtered OCL-D: ' + '; '.join(self._get_all_qualifying_ocl_d())
if param_name == 'ocl':
return 'Filtered OCL-S/D: ' + self._get_qualifying_ocl_s() + '; ' + '; '.join(self._get_all_qualifying_ocl_d())
if param_name == 'oc':
return 'Filtered OC (w/ OCL-D): ' + '; '.join(self._get_all_qualifying_ocl_d())
return '' | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/decor/traversal.py | 0.783906 | 0.234659 | traversal.py | pypi |
import os
from copy import copy
import datetime as dt
from trace import Trace
from typing import List, Optional, Dict, Any, Union
from dataUtils.DBClient import DBClient
from enginelib.claim_focus import ClaimFocus
from enginelib.claim_line_focus import ClaimLineFocus
from fhir.resources.claim import Claim
from schema.insight_engine_request import InsightEngineRequest
from schema.insight_engine_response import InsightEngineResponse, Insight, Defense, TranslatedMessage, MessageBundle, \
InsightType
from enginelib.decor.flow import Flow
from enginelib.decor.traversal import TreeTraversal
from enginelib.decor.registry import Registry
from enginelib.decor.tree import Tree
from enginelib.errors import MissingFieldError, ClaimError
from enginelib.utils import unique_identifier
class Policy:
def __init__(self, request: InsightEngineRequest, historical_claims: List[Claim],
decision_tree: Union[Tree, Flow], data: Optional[Dict[str, Any]] = None, engine_id: str = ''):
self.cue = ClaimFocus(claim=request.claim, request=request)
self.request = request
self.historical_claims = [
ClaimFocus(claim=claim, request=InsightEngineRequest.construct(claim=claim))
for claim in historical_claims
]
self.flow = Flow(decision_tree) if isinstance(decision_tree, Tree) else decision_tree
self.data: Dict[str, Any] = data or dict()
self.client = DBClient.GetDBClient(os.environ['APIKEY'])
self.engine_id = engine_id
if engine_id:
self.client.init_defenses(request.transaction_id or 'testing', engine_id=engine_id)
def check_effective_date(self, clue: ClaimLineFocus) -> Optional[Insight]:
if 'effective_start_date' not in self.data:
return None
start_date = self.data['effective_start_date']
end_date = self.data.get('effective_end_date', dt.date(9999, 1, 1))
assert isinstance(start_date, dt.date), \
'Custom data `effective_start_date`, if present, must be an instance of class `datetime.date`.'
assert isinstance(end_date, dt.date), \
'Custom data `effective_end_date`, if present, must be an instance of class `datetime.date`.'
assert start_date < end_date, \
'Custom data `effective_start_date`, if present, must come before `effective_end_date`.'
try:
from_date: dt.date = clue.service_period.start
except MissingFieldError as err:
return Insight(
id=unique_identifier(self.request.claim.id),
type=InsightType.Error,
description=str(err),
claim_line_sequence_num=clue.sequence,
defense=Policy.create_defense()
)
if from_date < start_date or from_date >= end_date:
if from_date < start_date:
relation = 'before this policy became effective'
critical_date = start_date
else:
relation = 'on or after the effective period of this policy ended'
critical_date = end_date
message = f'The service date on this claim line ({from_date.strftime("%Y-%m-%d")}) ' \
f'comes {relation} ({critical_date.strftime("%Y-%m-%d")}).'
return Insight(
id=unique_identifier(self.request.claim.id),
type=InsightType.NotApplicable,
description=message,
claim_line_sequence_num=clue.sequence,
defense=Policy.create_defense()
)
def evaluate(self) -> InsightEngineResponse:
"""Evaluates the policy for each claim line in self.request.claim.
Returns:
a response with the response.insights containing the list of insights
"""
response = InsightEngineResponse()
response.engine_name = self.engine_id
response.insights = list()
for clue in self.cue.lines:
ins = self.check_effective_date(clue)
if ins:
response.insights.append(ins)
continue
response.insights.extend(self._assess(clue))
return response
def _assess(self, clue: ClaimLineFocus) -> List[Insight]:
"""Assess one claim line according to the decision tree of the policy.
Args:
clue: claim line to assess.
Returns:
a list of insights for the given claim line (one for each tree in the flow).
"""
insights: List[Insight] = list()
registry = Registry(cue=self.cue, clue=clue, ocs=self.historical_claims, data=copy(self.data))
flow = self.flow
trace_list: List[Trace] = list()
while flow:
label, insight = self._assess_tree(clue, flow.tree, registry)
conn = flow.connections.get(label, None)
trace_list.extend(insight.trace)
if not conn or not conn.ignore_insight:
insight.trace = copy(trace_list)
insights.append(insight)
flow = conn.flow if conn else None
return insights
def _assess_tree(self, clue: ClaimLineFocus, tree: Tree, registry: Registry) -> (str, Insight):
debug = os.getenv('DECOR_DEBUG', '')
traversal = TreeTraversal(tree, registry)
try:
label = traversal.execute()
# We should add all errors that we want to catch in the following tuple:
except (MissingFieldError, ClaimError) as err:
return '', Insight(
id=unique_identifier(self.request.claim.id),
type=InsightType.Error,
description=str(err),
trace=[traversal.trace],
claim_line_sequence_num=clue.sequence,
defense=self.create_defense()
)
# Customize insight text with parameters in the registry
result_class = tree.ResultClass
insight_type = result_class.insight_type[label]
insight_text = result_class.insight_text[label]
insight_text = self._format_insight_text(registry, insight_text, debug=debug)
local_defense = result_class.insight_defense.get(label) or ''
# Fetch defense data and create defense object
if self.engine_id and 'subcode' in registry and registry['subcode']:
self.client.init_defenses(self.request.transaction_id or 'testing', engine_id=self.engine_id, subcode=registry['subcode'])
excerpt, uuid = self.client.get_defense_by_node(label)
defense = self.create_defense(
text=local_defense if os.getenv('RIALTIC_LOCAL_DEFENSE') else (excerpt or ''),
uuid=uuid or f"result::{self.engine_id}-{label}"
)
return label, Insight(
id=unique_identifier(self.request.claim.id),
type=insight_type,
description=insight_text,
trace=[traversal.trace],
claim_line_sequence_num=clue.sequence,
defense=defense
)
def _format_insight_text(self, registry: Registry, text: str, debug: str = '') -> str:
# DEBUGGING
if debug == 'parameters':
self._log(end=f'The function _format_insight_text() was called with text {repr(text)}. ')
self._log(f'The registry has the following parameters defined: {registry.computed_parameters_values}')
return registry.format_text(text)
@staticmethod
def create_defense(text: str = '', uuid: str = '') -> Defense:
message = TranslatedMessage()
message.lang = 'en'
message.message = text
script = MessageBundle()
script.uuid = uuid
script.messages = [message]
defense = Defense()
defense.script = script
return defense
@staticmethod
def _log(*args, **kwargs):
print(*args, **kwargs) | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/decor/policy.py | 0.835919 | 0.169887 | policy.py | pypi |
from typing import Dict
from enginelib.simple_insight import SimpleInsight
from schema.insight_engine_response import InsightType
from enginelib.decor.errors import InsightLookupError
class AbstractResult:
"""A class that inherits from this one stores information
about all possible insights: type and text. The type is
kept in one dictionary (class attribute insight_type) and
the text in another dictionary (class attribute insight_text).
Both dictionaries are indexed by the name of the branch in
the decision tree that led to the insight (e.g. "100N").
"""
#: stores the type of each insight.
insight_type: Dict[str, InsightType]
#: stores the text of each insight.
insight_text: Dict[str, str]
#: stores the text of each defense.
insight_defense: Dict[str, str] = {}
@classmethod
def simple_insight(cls, branch: str) -> SimpleInsight:
"""
Args:
branch: the name of the branch (to be used as key in
the dictionaries insight_type and insight_text).
Returns:
the SimpleInsight object corresponding to the given branch.
Raises:
InsightLookupError: in case the given branch is not found
in either insight_type or insight_text.
"""
try:
return SimpleInsight(
insight_type=cls.insight_type[branch],
text=cls.insight_text[branch],
defense=cls.insight_defense.get(branch) or ''
)
except KeyError:
raise InsightLookupError(f'Type or text of insight ref #{branch} not found.')
@classmethod
def is_valid(cls, branch: str) -> bool:
"""
Args:
branch: the name of the branch (to be used as key in
the dictionaries insight_type and insight_text).
Returns:
True if branch is a valid key of the dictionary insight_type, False otherwise.
"""
return branch in cls.insight_type
@classmethod
def validate(cls):
text_keys = set(cls.insight_text.keys())
type_keys = set(cls.insight_type.keys())
text_but_no_type = text_keys.difference(type_keys)
type_but_no_text = type_keys.difference(text_keys)
assert text_but_no_type == set(), \
f'Each insight label in {text_but_no_type} has an associated text, ' \
f'but no associated type in class {cls.__name__}.'
assert type_but_no_text == set(), \
f'Each insight label in {type_but_no_text} has an associated type, ' \
f'but no associated text in class {cls.__name__}.' | /rialtic_engine_lib_py-1.13.36-py3-none-any.whl/enginelib/decor/result.py | 0.860955 | 0.507202 | result.py | pypi |
from __future__ import annotations
import json, uuid, os, re
import datetime as dt
from typing import List
from collections import defaultdict
from schema.insight_engine_request import HistoryClaim
from fhir.resources.claim import Claim
from klee.files import read_json
from klee.internal import log
_claim_re = re.compile(r"(?:(CLUE|CUE)(\d?))?[_-]?(OCL|OC)?(\d?[a-zA-Z]?)[_-](\d+[YN]).json")
class ClaimsDirectory:
def __init__(self, claims_folder):
self.all_claims, self.claims = {}, {}
self.history = defaultdict(list)
self.location = claims_folder
def load_claims(self):
for filename in self.list_json:
tc = self.new_claim(filename)
self.all_claims[tc.file_id] = tc
if tc.is_history:
self.history[tc.label_id].append(tc)
else:
self.claims[tc.label_id] = tc
tc.shuffle_patient(tc.uuid)
# sync tx_ids on history
for label in self.history:
for historic in self.history[label]:
log.info("schema matching history: %s from %s to %s", label, historic.file_id, historic.label_id)
if historic.label_id in self.claims:
parent = self.claims[historic.label_id]
historic.shuffle_patient(parent.uuid)
historic.tx_id = parent.tx_id
else:
log.warning('note: for nodes with multiple test cases, ensure that OC/OCL indexes are 2 wide')
log.warning('warning: loose OCL file, unknown behavior, ignoring file')
log.warning('file: %s', historic.file_id)
def new_claim(self, filename: str) -> KleeTestClaim:
return KleeTestClaim(self, filename)
def claim_by_file(self, filename: str) -> KleeTestClaim:
file_description = _claim_re.findall(filename)[0]
file_id = "-".join(x for x in file_description if x)
return self.all_claims[file_id]
@property
def list_json(self) -> List[str]:
# noinspection PyTypeChecker
return [file for file in os.listdir
(self.location) if file.endswith(".json")]
class KleeTestClaim:
def __init__(self, claims_dir: ClaimsDirectory, filename: str):
self.claims_dir, self.time = claims_dir, dt.datetime.utcnow().timestamp()
# init vars
main_type, idx_1, ex_type, idx_2, self.node_id = \
file_description = _claim_re.findall(filename)[0]
self.index = "-".join(x for x in (idx_1, idx_2) if x)
self.type = "-".join(x for x in (main_type, ex_type) if x)
# multiple tests per node support {leaf-idx}
self.rel_idx = idx_1 if main_type else idx_2[:-1]
self.label_id = "-".join(x for x in (self.node_id, self.rel_idx) if x)
# unique claim/file id
self.file_id: str = "-".join(x for x in file_description if x)
self.claim_json = read_json(os.path.join(self.claims_dir.location, filename))
self.claim_json['id'] = f'{self.file_id}-{self.time}'
self.tx_id = 'TX.' + self.claim_json['id']
self.uuid = str(uuid.uuid4())
@property
def fhir_claim(self) -> Claim:
return Claim.parse_obj(self.claim_json)
@property
def is_history(self) -> bool:
return self.type in ('OCL', 'OC')
@property
def schema_history(self) -> List[HistoryClaim]:
return self.compose_history(self.claims_dir.history[self.label_id])
def compose_history(self, claims: List[KleeTestClaim]) -> List[HistoryClaim]:
results = []
for ix, other in enumerate(claims, 1):
other.shuffle_patient(self.uuid)
# other.claim_json['id'] = self.claim_json['id'] + f'-{ix}'
hc = HistoryClaim(claim = other.fhir_claim, transaction_id = self.tx_id)
results.append(hc)
return results
def shuffle_patient(self, member_id):
queue = {}
for item in self.claim_json['contained']:
if item['resourceType'] == 'Patient':
old = item['id']; ix = old.split('-')[-1]
queue[old] = f"Member-{member_id}-{ix}"
text = json.dumps(self.claim_json)
for old, new in queue.items():
text = text.replace(old, new)
self.claim_json = json.loads(text) | /rialtic_klee_py-0.0.16-py3-none-any.whl/klee/claims.py | 0.616936 | 0.151781 | claims.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, List, Dict
if TYPE_CHECKING:
from klee.plans import EngineTestPlan
from os.path import join
from klee.files import save_json, fhir_to_json
from klee.insights import InsightText
from klee.claims import KleeTestClaim
from schema.insight_engine_request import InsightEngineRequest
from schema.insight_engine_response import InsightEngineResponse, \
Insight, Defense, TranslatedMessage, MessageBundle
class InsightEngineTestCase:
def __init__(self, test_plan: EngineTestPlan, claim: KleeTestClaim):
self.insight: InsightText = test_plan.insights[claim.node_id]
self.test_plan: EngineTestPlan = test_plan
self.claim: KleeTestClaim = claim
@property
def defense(self) -> MessageBundle:
return MessageBundle(messages=[
TranslatedMessage(message=self.test_plan.get_defense(self))
])
@property
def request(self) -> InsightEngineRequest:
return InsightEngineRequest(claim = self.claim.fhir_claim, transaction_id =
self.claim.tx_id, history = self.test_plan.get_history(self.claim))
@property
def response(self) -> InsightEngineResponse:
return InsightEngineResponse(insights=[
Insight(
type=self.insight.type,
description=self.insight.text,
defense=Defense(script=self.defense, referenceData=[]),
claim_line_sequence_num=self.test_plan.claim_line(self.claim)
)
])
@property
def json(self) -> Dict:
return {
"insight_engine_request": fhir_to_json(self.request),
"insight_engine_response": fhir_to_json(self.response),
'resourceType': "InsightEngineTestCase"
}
@property
def history_list(self) -> List[Dict]:
return [fhir_to_json(x) for x in self.request.history]
def save_to_folder(self, directory):
save_json(self.json, join(directory, self.claim.file_id + '.json'))
def save_to_history(self, directory):
save_json(self.history_list, join(directory, self.claim.file_id + '.json')) | /rialtic_klee_py-0.0.16-py3-none-any.whl/klee/cases.py | 0.821367 | 0.159872 | cases.py | pypi |
import sys
from klee.internal import Structure, log
from klee.insights import InsightDict, load_insights
from klee.claims import ClaimsDirectory, KleeTestClaim
from klee.cases import InsightEngineTestCase
from typing import Dict, Union, List
class TestCaseBuilder:
def __init__(self, structure: Structure):
self.claims_dir: ClaimsDirectory = ClaimsDirectory(structure.claim_dir)
self.history_dir: str = structure.json_history
self.output_dir: str = structure.json_cases
self.insights: InsightDict = load_insights()
self.claims_dir.load_claims()
self.init_test_plan()
def build_all_cases(self) -> Dict[str, InsightEngineTestCase]:
all_cases = {}
for node_label, kt_claim in self.get_primary_claims():
all_cases[node_label] = self.build_test_case(kt_claim)
return all_cases
def build_test_case(self, node: Union[str, KleeTestClaim]) -> InsightEngineTestCase:
"""accepts node label or a node itself"""
kt_claim = self.claims_dir.claims[node] if isinstance(node, str) else node
if kt_claim.node_id in self.insights:
# noinspection PyTypeChecker
kt_case = InsightEngineTestCase(self, kt_claim)
if not kt_case.test_plan.validate_case(kt_case):
log.error(f"Unable to verify that {kt_claim.label_id} has been correctly built.")
else:
if self.output_dir:
kt_case.save_to_folder(self.output_dir)
if self.history_dir:
kt_case.save_to_history(self.history_dir)
return kt_case
else:
log.error(f"Unable to find insights for {kt_claim.label_id}")
def build_node_labels(self, node_labels: List[str]) -> Dict[str, InsightEngineTestCase]:
test_cases = {}
if not node_labels:
test_cases.update(self.build_all_cases())
for label in node_labels:
kt_case = self.build_test_case(label.split('.')[0])
test_cases[kt_case.claim.label_id] = kt_case
return test_cases
def get_primary_claims(self):
return self.claims_dir.claims.items()
def init_test_plan(self):
pass | /rialtic_klee_py-0.0.16-py3-none-any.whl/klee/build.py | 0.532425 | 0.263593 | build.py | pypi |
from fhir.resources.claim import Claim
from schema.insight_engine_response import InsightEngineResponse, Insight
from typing import List, Tuple
import pytest, re
import datetime as dt
from klee.internal import Structure, log
from klee.cases import InsightEngineTestCase
from klee.files import KleeFile
with KleeFile(Structure.reference.binary, 'rb') as file:
local_test_cases = file.read_data()
response_cache = {}
@pytest.mark.parametrize('label, case', local_test_cases.items(), ids=local_test_cases)
class TestEngineV1:
@pytest.fixture
def response(self, label, case: InsightEngineTestCase, run_engine) -> InsightEngineResponse:
log.info("Preparing to run test label %s", label)
log.debug('Test case billable period: %s', _period(case.request.claim))
if case.request.history:
periods = ", ".join(_period(x.claim) for x in case.request.history)
log.debug('Historic claim periods: %s', periods)
if label in response_cache:
return response_cache[label]
response_cache[label] = run_engine(case)
return response_cache[label]
@pytest.fixture
def expected(self, case: InsightEngineTestCase) -> Insight:
return case.response.insights[0]
@pytest.fixture
def actual(self, label, response: InsightEngineResponse, expected: Insight) -> Insight:
line, insight = expected.claim_line_sequence_num, None
# todo: create a ticket on someone's board so this hack isn't needed
n = len(response.insights)
log.info('Found %s insights for test label %s', n, label)
for ix, item in enumerate(response.insights, 1):
prefix = f">>> Insight {ix} of {n}"
if item.trace is None:
item.trace = []
exits = []
for trace in item.trace:
header = f"\n\texit: {trace.end_label}::{trace.tree_name}"
exits.append(header + _node_path(trace.traversal))
log.info('%s\n\ttype: %s \n\tpolicy: %s %s \n', prefix, item.type, item.policy_name, "".join(exits))
for item in response.insights:
if item.claim_line_sequence_num == line:
insight = item
for item in response.insights:
if item.claim_line_sequence_num == line and \
item.trace and item.trace[-1].end_label == label:
insight = item
return insight if insight else response.insights[line - 1]
def test_insight_type(self, actual, expected):
if actual.type == 'Error' and expected.type != 'Error':
assert not expected.type, actual.description
assert actual.type == expected.type
def test_insight_description(self, actual, expected):
if not re.search(r"{[^}]+}", expected.description):
assert actual.description == expected.description
else:
assert_parameterized(actual.description, expected.description)
def test_defense_message(self, actual, expected):
def read_defense(x):
msgs = x.defense.script.messages
return msgs[0].message if msgs else ""
actual, expected = map(read_defense, (actual, expected))
# idk whats going on with defense remotely, but im going to view them as nuclear worthy
normalize, scrub = lambda x: re.sub(r'[\n\r"]', "", x), lambda x: re.sub(r'[\s"]', "", x)
try:
assert scrub(actual) == scrub(expected)
return
except AssertionError:
pass
assert normalize(actual) == normalize(expected)
def assert_parameterized(actual, expected):
# check for const chunks
for chunk in re.split("{[^}]+}", expected):
if chunk not in actual:
assert actual == expected
# check for replacement
escaped = re.escape(expected)
pattern = re.sub(r'\\{[^\\}]+\\}', "[^{].*", escaped)
try:
assert re.fullmatch(pattern, actual)
return
except AssertionError:
pass
# pretty printing cmp
for var in re.findall(r"{[^}]+}", expected):
idx = actual.find(var)
if idx > -1:
assert not var, actual
def _period(claim: Claim) -> str:
return f"{_date(claim.billablePeriod.start)} - {_date(claim.billablePeriod.end)}"
def _date(date: dt.date):
return date.strftime("%Y/%m/%d")
def _node_path(traversal: List[Tuple[str, ...]]) -> str:
sep, nodes = "\n\t\t", ''
for series in map(list, traversal or []):
while len(series) < 4:
series.append('')
pred, result, label, ocl_info, *_ = series
pred = (f"#{label}, " if label else "") + pred
nodes += sep + f"{result.ljust(3)} <- {pred}"
if ocl_info:
nodes += sep + '\t' + ocl_info
return nodes | /rialtic_klee_py-0.0.16-py3-none-any.whl/klee/pyt/shared.py | 0.419886 | 0.471588 | shared.py | pypi |
__all__ = [
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
'rainbow',
]
__author__ = 'hanks'
def _wrap_with(code):
def inner(text, bold=False):
'''Wrap text with color code
Args:
text, string, string that need to be transferred
bold, boolean, text is needed to be bold or not
Returns:
string, wrapped text with color code
'''
c = code
if bold:
c = '1;{}'.format(c)
return '\033[{}m{}\033[0m'.format(c, text)
return inner
'''Basic color wrappers
'''
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
def _doctest_basic_wrappers():
'''
>>> red('a')
'\\x1b[31ma\\x1b[0m'
>>> green('a', bold=True)
'\\x1b[1;32ma\\x1b[0m'
'''
# color func list for rainbow
rainbow_wrapper_list = [red, yellow, green, blue, magenta]
def _nsplit(text, step):
'''Split text to small groups which contains n letter each
>>> _nsplit('abc', 4)
['abc']
>>> _nsplit('abc', 3)
['abc']
>>> _nsplit('abc', 2)
['ab', 'c']
>>> _nsplit('abc', 1)
['a', 'b', 'c']
>>> _nsplit('', 1)
[]
'''
assert step > 0, 'step parameter in xrange() shoule '\
'always be non-zero positive integer'
return [text[i: i + step] for i in range(0, len(text), step)]
def _flatten(a_list):
"""Return a flatten list
>>> _flatten([1, 2, 3])
[1, 2, 3]
>>> _flatten([1, (2, 'a'), 3])
[1, (2, 'a'), 3]
>>> _flatten([[(1, 2), (3, 4)], [('a', 'b'), ('c' ,'d')]])
[(1, 2), (3, 4), ('a', 'b'), ('c', 'd')]
>>> _flatten([])
[]
"""
assert isinstance(a_list, list), 'a_list should be list type'
res = []
for element in a_list:
if isinstance(element, list):
res.extend(_flatten(element))
else:
res.append(element)
return res
def rainbow(text, wrapper_list=rainbow_wrapper_list):
'''Return fabulous rainbow-like texts used in console
Args:
text, target string
wrapper_list, function list to wrap each letter in text
Returns:
wrapped string
>>> rainbow('abc')
'\\x1b[31ma\\x1b[0m\\x1b[33mb\\x1b[0m\\x1b[32mc\\x1b[0m'
'''
splitted_text_list = _nsplit(text, len(wrapper_list))
zipped_tuple_list = _flatten([list(zip(wrapper_list, splitted_text))
for splitted_text in splitted_text_list])
wrapped_str = ''.join([wrapper(letter)
for wrapper, letter in zipped_tuple_list])
return wrapped_str
if __name__ == '__main__':
import doctest
doctest.testmod() | /ribbon-1.0.0.tar.gz/ribbon-1.0.0/ribbon.py | 0.795658 | 0.183246 | ribbon.py | pypi |
import graphene
import riberry
from .graphene_sqla.builder import Builder
from .graphene_sqla.helpers import non_null_list
from .graphene_sqla.wrapper_graphene import ModelType
class Application(ModelType):
build = Builder(riberry.model.application.Application)
# fields
name = build.field('name')
internal_name = build.field('internal_name')
description = build.field('description')
# relationships
instances, resolve_instances = build.relationship('instances')
forms, resolve_forms = build.relationship('forms')
class ApplicationInstance(ModelType):
build = Builder(riberry.model.application.ApplicationInstance)
# fields
name = build.field('name')
internal_name = build.field('internal_name')
status = graphene.String()
# relationships
heartbeat, resolve_heartbeat = build.relationship('heartbeat')
schedules, resolve_schedules = build.relationship('schedules')
application, resolve_application = build.relationship('application')
forms, resolve_forms = build.relationship('forms')
class ApplicationInstanceSchedule(ModelType):
build = Builder(riberry.model.application.ApplicationInstanceSchedule)
# fields
days = build.field('days')
start_time = build.field('start_time')
end_time = build.field('end_time')
timezone = build.field('timezone', name='timeZone')
parameter = build.field('parameter')
value = build.field('value')
priority = build.field('priority')
# relationships
instance, resolve_instance = build.relationship('instance')
class Heartbeat(ModelType):
build = Builder(riberry.model.application.Heartbeat)
# fields
created = build.field('created')
updated = build.field('updated')
# relationships
instance, resolve_instance = build.relationship('instance')
class Form(ModelType):
build = Builder(riberry.model.interface.Form)
# fields
name = build.field('name')
internal_name = build.field('internal_name')
description = build.field('description')
version = build.field('version')
# relationships
instance, resolve_instance = build.relationship('instance')
application, resolve_application = build.relationship('application')
input_value_definitions, resolve_input_value_definitions = build.relationship('input_value_definitions')
input_file_definitions, resolve_input_file_definitions = build.relationship('input_file_definitions')
document, resolve_document = build.relationship('document')
groups = build.proxy(lambda: Group, is_list=True)
# connections
jobs, resolve_jobs = build.connection('jobs', sortable_fields={
'INTERNAL_ID': 'id',
'NAME': 'name',
'CREATED': 'created',
})
class Document(ModelType):
build = Builder(riberry.model.misc.Document)
# fields
type = build.field('type')
content, resolve_content = build.field_with_resolver('content', lambda c: c.decode() if c else None)
class Group(ModelType):
build = Builder(riberry.model.group.Group)
# fields
name = build.field('name')
class InputValueDefinition(ModelType):
build = Builder(riberry.model.interface.InputValueDefinition)
# fields
name = build.field('name')
internal_name = build.field('internal_name')
description = build.field('description')
type, resolve_type = build.field_with_resolver('type')
required = build.field('required')
default_value = graphene.JSONString()
allowed_values = non_null_list(graphene.JSONString)
class InputFileDefinition(ModelType):
build = Builder(riberry.model.interface.InputFileDefinition)
# fields
name = build.field('name')
internal_name = build.field('internal_name')
description = build.field('description')
type, resolve_type = build.field_with_resolver('type')
accept = build.field('accept')
required = build.field('required')
class Job(ModelType):
build = Builder(riberry.model.job.Job)
# fields
name = build.field('name')
created = build.field('created')
# relationships
form, resolve_form = build.relationship('form')
creator, resolve_creator = build.relationship('creator')
# connections
executions, resolve_executions = build.connection('executions', sortable_fields={
'INTERNAL_ID': 'id',
'CREATED': 'created',
'UPDATED': 'updated',
})
class JobExecution(ModelType):
build = Builder(riberry.model.job.JobExecution)
# fields
status = build.field('status')
created = build.field('created')
started = build.field('started')
updated = build.field('updated')
completed = build.field('completed')
task_id = build.field('task_id')
priority = build.field('priority')
stream_status_summary = graphene.Field(lambda: JobExecutionStreamSummary, required=True)
# relationships
creator, resolve_creator = build.relationship('creator')
job, resolve_job = build.relationship('job')
latest_progress, resolve_latest_progress = build.relationship('latest_progress', required=False)
data, resolve_data = build.relationship('data')
# connections
streams, resolve_streams = build.connection('streams', sortable_fields={
'INTERNAL_ID': 'id',
'CREATED': 'created',
'UPDATED': 'updated',
})
metrics, resolve_metrics = build.connection('metrics', sortable_fields={
'INTERNAL_ID': 'id',
'EPOCH_START': 'epoch_start',
'EPOCH_END': 'epoch_end',
})
artifacts, resolve_artifacts = build.connection('artifacts')
progress, resolve_progress = build.connection('progress')
class JobExecutionMetric(ModelType):
build = Builder(riberry.model.job.JobExecutionMetric)
# fields
epoch_start = build.field('epoch_start')
epoch_end = build.field('epoch_end')
stream_name = build.field('stream_name')
step_name = build.field('step_name')
count = build.field('count')
sum_duration = build.field('sum_duration')
max_duration = build.field('max_duration')
min_duration = build.field('min_duration')
class JobExecutionStreamSummary(graphene.ObjectType):
# fields
queued = graphene.Int(required=True)
active = graphene.Int(required=True)
retry = graphene.Int(required=True)
success = graphene.Int(required=True)
failure = graphene.Int(required=True)
@staticmethod
def resolve_queued(instance, *_):
return instance.get('QUEUED', 0)
@staticmethod
def resolve_active(instance, *_):
return instance.get('ACTIVE', 0)
@staticmethod
def resolve_retry(instance, *_):
return instance.get('RETRY', 0)
@staticmethod
def resolve_success(instance, *_):
return instance.get('SUCCESS', 0)
@staticmethod
def resolve_failure(instance, *_):
return instance.get('FAILURE', 0)
class ResourceData(ModelType):
build = Builder(riberry.model.misc.ResourceData)
# fields
name = build.field('name')
value = build.field('name')
class JobExecutionArtifact(ModelType):
build = Builder(riberry.model.job.JobExecutionArtifact)
# fields
name = build.field('name')
type, resolve_type = build.field_with_resolver('type')
category = build.field('category')
filename = build.field('filename', name='fileName')
created = build.field('created')
size = build.field('size')
content_type = graphene.String()
content_encoding = graphene.String()
# relationships
data, resolve_data = build.relationship('data')
class JobExecutionArtifactData(ModelType):
build = Builder(riberry.model.job.JobExecutionArtifactData)
# fields
title = build.field('title')
description = build.field('description')
class JobExecutionProgress(ModelType):
build = Builder(riberry.model.job.JobExecutionProgress)
# fields
created = build.field('created')
message = build.field('message')
class JobExecutionStream(ModelType):
build = Builder(riberry.model.job.JobExecutionStream)
# fields
name = build.field('name')
status = build.field('status')
created = build.field('created')
started = build.field('started')
updated = build.field('updated')
completed = build.field('completed')
# connections
steps, resolve_steps = build.connection('steps', sortable_fields={
'INTERNAL_ID': 'id',
'CREATED': 'created',
'UPDATED': 'updated',
})
class JobExecutionStreamStep(ModelType):
build = Builder(riberry.model.job.JobExecutionStreamStep)
# fields
name = build.field('name')
status = build.field('status')
created = build.field('created')
started = build.field('started')
updated = build.field('updated')
completed = build.field('completed')
class User(ModelType):
build = Builder(riberry.model.auth.User)
# fields
username = build.field('username')
details, resolve_details = build.relationship('details')
jobs, resolve_jobs = build.connection('jobs')
# relationships
groups = build.proxy(lambda: Group, is_list=True)
forms = non_null_list(lambda: Form)
applications = non_null_list(lambda: Application)
# connections
executions, resolve_executions = build.connection('executions')
class UserDetails(ModelType):
build = Builder(riberry.model.auth.UserDetails)
# fields
display_name = build.field('display_name')
first_name = build.field('first_name')
last_name = build.field('last_name')
department = build.field('department')
email = build.field('email') | /riberry_web-0.1.2-py3-none-any.whl/riberry_web/lib/gql/models.py | 0.559771 | 0.167185 | models.py | pypi |
from typing import Optional
from sqlalchemy import Column
from sqlalchemy.orm import RelationshipProperty, ColumnProperty
from sqlalchemy.orm.attributes import InstrumentedAttribute
from .wrapper_graphene import ModelType
class SqlaModel:
def __init__(self, model):
self.model = model
@property
def name(self):
return self.model.__name__
@property
def model_type(self):
return ModelType.instance(self.name)
def member(self, name) -> 'SqlaModelMember':
return SqlaModelMember(member=getattr(self.model, name), model=self)
class SqlaModelMember:
def __init__(self, member: InstrumentedAttribute, model: SqlaModel):
self.member: InstrumentedAttribute = member
self.model: SqlaModel = model
@property
def name(self):
return self.member.key
def _sqla_column(self) -> Optional[Column]:
if isinstance(self.member.prop, ColumnProperty):
return list(self.member.prop.columns)[0]
elif isinstance(self.member.prop, RelationshipProperty) and len(self.member.prop.local_columns) == 1:
return list(self.member.prop.local_columns)[0]
@property
def type(self):
return self._sqla_column().type
@property
def nullable(self):
return self._sqla_column().nullable
@property
def default(self):
return self._sqla_column().default
@property
def description(self):
return self.member.comment
@property
def target(self) -> SqlaModel:
if self.is_relationship():
return SqlaModel(model=self.member.property.mapper.class_)
@property
def target_to_source_member(self) -> 'SqlaModelMember':
if self.is_relationship() and self.member.property.back_populates:
return SqlaModelMember(
member=getattr(self.target.model, self.member.property.back_populates),
model=self.target
)
def is_relationship(self):
return isinstance(self.member.property, RelationshipProperty)
def is_list(self):
return self.member.property.uselist
def __repr__(self):
return f'SqlaModelMember(member={self.member})' | /riberry_web-0.1.2-py3-none-any.whl/riberry_web/lib/gql/graphene_sqla/wrapper_sqla.py | 0.86799 | 0.170232 | wrapper_sqla.py | pypi |
from types import FunctionType
from typing import Optional, Iterable, Union, Tuple, Callable, Any
import graphene
import sqlalchemy
from sqlalchemy.ext.declarative import DeclarativeMeta
from .fields import PaginationConnectionField
from .helpers import non_null_list
from .loaders import loader_factory
from .wrapper_sqla import SqlaModel, SqlaModelMember
class Builder:
""" Single point of entry for building SQLAlchemy-related graphene attributes """
field_mapping = mapping = {
sqlalchemy.String: graphene.String,
sqlalchemy.Integer: graphene.Int,
sqlalchemy.Float: graphene.Float,
sqlalchemy.DateTime: graphene.DateTime,
sqlalchemy.Boolean: graphene.Boolean,
sqlalchemy.JSON: graphene.JSONString,
sqlalchemy.Binary: graphene.String,
sqlalchemy.Enum: graphene.String,
}
def __init__(self, model: DeclarativeMeta):
self.model: SqlaModel = SqlaModel(model=model)
def field(self, member_name: str, **kwargs) -> graphene.Scalar:
""" Returns a graphene List/Field for the SQLAlchemy member """
return self._create_field(self.model.member(name=member_name), **kwargs)
def field_with_resolver(
self,
member_name: str,
transformer: Optional[Callable[[Any], Any]] = None,
**kwargs,
) -> Tuple[Any, Any]:
return (
self.field(member_name, **kwargs),
self.resolver(member_name, transformer or (lambda _: _))
)
@staticmethod
def resolver(member_name: str, transformer: FunctionType) -> FunctionType:
return lambda instance, *_: transformer(getattr(instance, member_name))
def relationship(self, member_name: str, **kwargs) -> Tuple[Union[graphene.List, graphene.Field], FunctionType]:
""" Returns a graphene relationship for the given SQLAlchemy relationship """
return loader_factory.relationship_attribute_resolver(self.model.member(name=member_name), **kwargs)
def connection(self, member_name: str, sortable_fields: Optional[Iterable[str]] = None) -> \
Tuple[PaginationConnectionField, FunctionType]:
""" Returns a graphene relay-style connection relationship for the given SQLAlchemy attribute """
return loader_factory.connection_attribute_resolver(
model_member=self.model.member(name=member_name),
sortable_fields=sortable_fields,
)
# noinspection PyMethodMayBeStatic
def proxy(self, model: DeclarativeMeta, is_list: bool, **kwargs) -> Union[graphene.List, graphene.Field]:
""" Returns a basic graphene List/Field for a given SQLAlchemy member.
Used for custom properties and does not delegate to a loader.
"""
return non_null_list(model, **kwargs) if is_list else graphene.Field(model, **kwargs)
@classmethod
def _create_field(cls, model_member: SqlaModelMember, **kwargs) -> graphene.Scalar:
""" Converts a SQLAlchemy member type to a graphene member type"""
try:
sqla_cls = cls.field_mapping[type(model_member.type)]
except KeyError:
raise KeyError(f'Unsupported type {type(model_member.type)} from {model_member}')
else:
return sqla_cls(description=model_member.description, **{
'required': not model_member.nullable,
'default_value': model_member.default,
**kwargs,
}) | /riberry_web-0.1.2-py3-none-any.whl/riberry_web/lib/gql/graphene_sqla/builder.py | 0.921397 | 0.19521 | builder.py | pypi |
from PIL import Image, ImageFile
import argparse
import numpy as np
import torch
import torchvision.transforms as transforms
from torchvision import datasets
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
def get_train_args():
"""
Retrieves and parses the 15 command line arguments provided by the user when
they run the train.py program from a terminal window. This function uses Python's
argparse module to created and defined these 15 command line arguments. If
the user fails to provide some or all of the 15 arguments, then the default
values are used for the missing arguments.
Command Line Arguments:
1. Image Folder as data_dir
2. Image size to use as input size for the model as --image_size
3. Batch size for data loaders as --batch_size
4. CNN Model Architecture as --arch with default value 'vgg'
5. Directory to save model's checkpoint as --save_dir with default value '.' to use the same directory
6. A float number between 0 and 1 as --learning_rate with default value 0.01
7. A float number between 0 and 1 as Drop out probability to use as --drop_p default value 0.5
8. A list of hidden units to use in model classifier block as --hidden units
9. An integer number as --epochs with default value 10
10. Number of epochs for training to stop if valid loss stops decreasing as --early_stopping default None
11. A boolean to switch the use of gpu for training as --gpu with default False
12. A boolean to plot the model training history or not as --plot_history default False
13. A booean to Set the model for evaluation only and prevent retraining as --evaluate_only default False.
14. A File path to save the model training history to csv file as --save_history default None
15 A File path to Model's checkpoint to use for retraining as --checkpoint defauly None
This function returns these arguments as an ArgumentParser object.
Param: None - simply using argparse module to create & store command line arguments
Returns:
parse_args() - data structure that stores the command line arguments object
"""
# Create Parse using ArgumentParser
parser = argparse.ArgumentParser(
description='Trains a new network on a dataset and save the model as a checkpoint.')
# Create 8 command line arguments as mentioned above using add_argument() from ArguementParser method
parser.add_argument('data_dir',
type=str,
help='path to the folder of the data')
parser.add_argument('--image_size',
type=int,
default=224,
help='Image size to use as input size for the model')
parser.add_argument('--batch_size',
type=int,
default=64,
help='Batch size for data loaders')
parser.add_argument('--arch',
type=str,
default='vgg',
choices=['vgg', 'resnet', 'alexnet', 'densnet', 'googlenet', 'inception'],
help='name of CNN archticture to use')
parser.add_argument('--save_dir',
dest='save_dir',
type=str,
default='.',
help="directory to save model's checkpoints")
parser.add_argument('--learning_rate',
type=float,
default=0.01,
help='Learning rate value for the optimizer')
parser.add_argument('--drop_p',
type=float,
default=0.5,
help='Drop out probability to use')
parser.add_argument('--hidden_units',
nargs='+',
type=int,
default=[512],
help="List of hidden units for the model's classifier block")
parser.add_argument('--epochs',
type=int,
default=10,
help='Number of epochs for training')
parser.add_argument('--early_stopping',
type=int,
default=None,
help='Number of epochs for training to stop if valid loss stops decreasing')
parser.add_argument('--gpu',
action="store_true",
default=False,
help='Use GPU for training')
parser.add_argument('--plot_history',
action="store_true",
default=False,
help='Plot the model training history')
parser.add_argument('--evaluate_only',
action="store_true",
default=False,
help='Sets the model for evaluation only and prevent retraining')
parser.add_argument('--save_history',
type=str,
default=None,
help='Saves the model training history to csv file')
parser.add_argument('--check_point',
type=str,
default=None,
help="Model's checkpoint to use for retraining")
# Return pasrer object containing the argumnts
return parser.parse_args()
def get_predict_args():
"""
Retrieves and parses the 6 command line arguments provided by the user when
they run the predict.py program from a terminal window. This function uses Python's
argparse module to created and defined these 6 command line arguments. If
the user fails to provide some or all of the 6 arguments, then the default
values are used for the missing arguments.
Command Line Arguments:
1. Image path to predict as input
2. A File path to Model's checkpoint to use for retraining as checkpoint
3. Number of top classes for predictions as --top_k default 5
4. A file path to category names json file as --category_names default None
5. A boolean to switch the use of gpu for prediction as --gpu with default False
6. A boolean to plot the prediction image a long with top k classes or not as --plot_predictions default False
This function returns these arguments as an ArgumentParser object.
Param: None - simply using argparse module to create & store command line arguments
Returns:
parse_args() - data structure that stores the command line arguments object
"""
# Create Parse using ArgumentParser
parser = argparse.ArgumentParser(
description='Use a pretrained model checkpoint for prediction.')
# Create 8 command line arguments as mentioned above using add_argument() from ArguementParser method
parser.add_argument('input',
type=str,
help='path to the folder of the data')
parser.add_argument('check_point',
type=str,
help="Model's checkpoint to use for prediction")
parser.add_argument('--top_k',
type=int,
default=5,
help='Number of top classes for predictions')
parser.add_argument('--category_names',
type=str,
default=None,
help='FIle path to category names json file')
parser.add_argument('--gpu',
action="store_true",
default=False,
help='Use GPU for prediction')
parser.add_argument('--plot_predictions',
action="store_true",
default=False,
help='Plot the model training history')
# Return pasrer object containing the argumnts
return parser.parse_args()
def get_dataloaders(data_dir, image_size=224, batch_size=64):
"""
Create data loaders for train, validation and test image folders for model training, validation and testing
param: data_dir (str): Folder contains image folders for train, valid and test images
param: batch_size (int): number of images to use as batch size for train, valid and test loaders
returns:
dataloaders: a dictionary that contains the dataloaders object for train, valid and test
image_datasets: a dictionary thet contains ImageFolder objects for train, valid and test
"""
# Setup the normalizer
normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# Transforming the data and applying image augmentation for training dataset
train_transform = transforms.Compose([transforms.Resize((image_size, image_size)), # Resize the image to 224x224
# Randomly rotate the image in the range of 30 degree
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(), # Randomly flip the image horizontally
transforms.ToTensor(), # Convert the numpy array that contains the image into a tensor
normalizer]) # Apply the normalizer
# Transformation for test and validation datasets
val_test_transform = transforms.Compose([transforms.Resize((224, 224)), # Resize the image to 224x224
transforms.ToTensor(), # Convert the numpy array that contains the image into a tensor
normalizer]) # Apply the normalizer
# Load the datasets with ImageFolder
train_data = datasets.ImageFolder(
data_dir + '/train', transform=train_transform)
valid_data = datasets.ImageFolder(
data_dir + '/valid', transform=val_test_transform)
test_data = datasets.ImageFolder(
data_dir + '/test', transform=val_test_transform)
image_datasets = {'train': train_data,
'valid': valid_data,
'test': test_data}
# prepare data loaders
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=batch_size, shuffle=True)
valid_loader = torch.utils.data.DataLoader(
valid_data, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=batch_size, shuffle=True)
dataloaders = {'train': train_loader,
'valid': valid_loader,
'test': test_loader}
# Get the number of classes
n_classes = len(train_data.classes)
# Get training classes weights
classes_count = np.array([train_data.targets.count(i)
for i in range(n_classes)])
weights = torch.FloatTensor(1/classes_count)
# print out some data stats
print(f'Number of training images: {len(train_data)}')
print(f'Number of validation images: {len(valid_data)}')
print(f'Number of test images: {len(test_data)}')
print()
return dataloaders, image_datasets, n_classes, weights
def process_image(image):
"""
Scales, crops, and normalizes a PIL image for a PyTorch model
param: image (str): Path for the image to preprocess
returns: preproccessed image
"""
# Process a PIL image for use in a PyTorch model
# First load the image
img = Image.open(image)
# Setup the normalizer
normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# Setting up image preprocessor
img_transform = transforms.Compose([transforms.Resize((224, 224)), # Resize the image to 244x244
transforms.ToTensor(), # Convert the numpy array that contains the image into a tensor
normalizer]) # Apply the normalizer
# Apply the preprocessing to the image
preproccessed_img = img_transform(img)
return preproccessed_img | /ribo_torch-0.2.tar.gz/ribo_torch-0.2/ribo_torch/utility.py | 0.876225 | 0.544317 | utility.py | pypi |
[](https://travis-ci.org/nickp60/riboSeed)
[](https://badge.fury.io/py/riboSeed)
[](https://coveralls.io/github/nickp60/riboSeed?branch=master)
[](https://zenodo.org/badge/latestdoi/68617544)
[](https://landscape.io/github/nickp60/riboSeed/master)
[](http://riboseed.readthedocs.io/en/latest/?badge=latest)

# riboSeed Pipeline
Impatient? See our [Quickstart Guide](./quickstart.md)
A brief overview of the theory can be found [here](https://nickp60.github.io/riboSeed.html).
The riboSeed manuscript can be found [here](https://academic.oup.com/nar/advance-article/doi/10.1093/nar/gky212/4955760).
## Citation
```
Nicholas R Waters, Florence Abram, Fiona Brennan, Ashleigh Holmes, Leighton Pritchard;
riboSeed: leveraging prokaryotic genomic architecture to assemble across ribosomal regions,
Nucleic Acids Research, gky212, https://doi.org/10.1093/nar/gky212
```
Interested in the figures/tables/analyses in the manuscript? See the [README](https://github.com/nickp60/riboSeed/blob/master/scripts/README.md) in the `scripts` dir.
## Table of Contents
* [`Reference Selection`](./README.md#reference-selection)
* [`Description`](./README.md#description)
* [`Installation`](./README.md#installation)
* [`Suggested Running`](./README.md#suggested-running)
* [`Contributing`](./README.md#contributing)
## Reference Selection
`riboSeed` requires an appropriate reference genome for the *de fere novo* assembly. We recommend using [PlentyOfBugs.](http://github.com/nickp60/plentyofbugs), which simplifies this process by comparing a preliminary assembly of your isolate to existing reference genomes.
## Before We Start
Please back up any and all data used, and work within a virtualenv.
Genome assembly gobbles RAM. If you, like me, are working on a 4gb RAM lappy, don't run riboSeed in parallel and instead run in series by using the `--serialize` option. That should prevent you from running out of RAM during the final SPAdes calls.
## Description
riboSeed is an supplemental assembly refinement method to try to address the issue of multiple ribosomal regions in a genome, as these create repeats unresolvable by short read sequencing. It takes advantage of the fact that while each region is identical, the regions flanking are unique, and therefore can potentially be used to seed an assembly in such a way that rDNA regions are bridged.
For a description of each submodule, follow the links below to the readthedocs manual page.
Preprocessing
- [`scan` | annotate reference genome rRNAs](http://riboseed.readthedocs.io/en/latest/PIPELINE.html#scan)
- [`select` | identify rDNA operons](http://riboseed.readthedocs.io/en/latest/PIPELINE.html#select)
*De fere novo assembly*
- [`seed` | perform interative subassembly](http://riboseed.readthedocs.io/en/latest/PIPELINE.html#seed)
Visualizations/assessment
- [`snag` | extract and visualize rDNA regions](http://riboseed.readthedocs.io/en/latest/PIPELINE.html#snag)
- [`stack` | calculate coverage at rDNAs in final assembly](http://riboseed.readthedocs.io/en/latest/PIPELINE.html#stack)
- [`sketch` | plot the relative rDNA regions in a handful of genomes](http://riboseed.readthedocs.io/en/latest/PIPELINE.html#sketch)
- [`swap` | switch questionable contigs ](http://riboseed.readthedocs.io/en/latest/PIPELINE.html#swap)
- [`score` | automated scoring for rDNA assemblies](http://riboseed.readthedocs.io/en/latest/PIPELINE.html#score)
- [`spec` | speculate the nunber of rDNA operons based on assembly graph](http://riboseed.readthedocs.io/en/latest/PIPELINE.html#spec)
## Installation
### From conda (new and recommended!)
Conda is a cross-platform, cross-language package management system. If you haven't already installed conda, follow [these instructions here](https://bioconda.github.io/index.html), and install the python3 version. Once you have that done, install riboSeed and all of its dependencies with one command:
```
conda install riboseed
```
(Note the lowercase "s")
#### From Pypi
riboSeed is on Pypi, so you can install with pip, preferably within a conda environemnt (recommended):
```
conda create -name riboseed -p python=3.5
conda activate riboseed
pip install riboSeed
```
#### From TestPypi
To install the bleeding-edge version, install from testpypi:
```
virtualenv -p python3.5 venv-riboSeed
source venv-riboSeed/bin/activate
pip install --extra-index-url https://testpypi.python.org/pypi riboSeed
```
#### From GitHub
You can also clone this repository, and run `python setup.py install`.
### Dependencies
Python requirements can be found in the `requirements.txt` file.
### External Requirements
External requirements can be found in the `environment.yml`, and can be used to create a conda environment: (`conda env create -f environment.yml`)
NOTE: barrnap has certain Perl requirements that may not be included on your machine. Ensure barrnap runs fine before trying `ribo snag`. Or try [python barrnap](https://github.com/nickp60/barrnap/).
## Suggested Running
The `ribo run` command orchestrates the most commonly used sequence of calls to `scan`, `select`, `seed`, `sketch`, `score`, and so on.
```
usage: ribo run [-r reference.fasta] -c config_file [-o /output/dir/]
[-n experiment_name] [-K {bac,euk,arc,mito}] [-S 16S:23S:5S]
[--clusters str] [-C str] [-F reads_F.fq] [-R reads_R.fq]
[-S1 reads_S.fq] [-s int]
[--ref_as_contig {ignore,infer,trusted,untrusted}] [--linear]
[-j] [--score] [-l int] [-k 21,33,55,77,99,127]
[--force_kmers] [-p 21,33,55,77,99] [-d int] [--clean_temps]
[-i int] [-v {1,2,3,4,5}] [--cores int] [--memory int]
[--damn_the_torpedos] [-t {1,2,4}] [-z] [-h] [--version]
Run the riboSeed pipeline of scan, select, seed, sketch, and score. Uses a
config file to wrangle all the args not available via these commandline args.
This can either be run by providing (as minimum) a reference, some reads, and
an output directory; or, if you have a completed config file, you can run it
with just that.
optional arguments:
-r reference.fasta, --reference_fasta reference.fasta
path to a (multi)fasta or a directory containing one
or more chromosomal sequences in fasta format.
Required, unless using a config file
-c config_file, --config config_file
config file; if none given, create one; default:
/home/nicholas/GitHub/riboSeed
-o /output/dir/, --output /output/dir/
output directory; default: /home/nicholas/GitHub/riboS
eed/2018-06-14T1353_riboSeed_pipeline_results/
-n experiment_name, --experiment_name experiment_name
prefix for results files; default: inferred
-K {bac,euk,arc,mito}, --Kingdom {bac,euk,arc,mito}
whether to look for eukaryotic, archaeal, or bacterial
rDNA; default: bac
-S 16S:23S:5S, --specific_features 16S:23S:5S
colon:separated -- specific features; default:
16S:23S:5S
--clusters str number of rDNA clusters;if submitting multiple
records, must be a colon:separated list whose length
matches number of genbank records. Default is inferred
from specific feature with fewest hits
-C str, --cluster_file str
clustered_loci file output from riboSelect;this is
created by default from run_riboSeed, but if you don't
agree with the operon structure predicted by
riboSelect, you can use your alternate clustered_loci
file. default: None
-F reads_F.fq, --fastq1 reads_F.fq
path to forward fastq file, can be compressed
-R reads_R.fq, --fastq2 reads_R.fq
path to reverse fastq file, can be compressed
-S1 reads_S.fq, --fastq_single1 reads_S.fq
path to single fastq file
-s int, --score_min int
If using smalt, this sets the '-m' param; default with
smalt is inferred from read length. If using BWA,
reads mapping with ASscore lower than this will be
rejected; default with BWA is half of read length
--ref_as_contig {ignore,infer,trusted,untrusted}
ignore: reference will not be used in subassembly.
trusted: SPAdes will use the seed sequences as a
--trusted-contig; untrusted: SPAdes will treat as
--untrusted-contig. infer: if mapping percentage over
80%, 'trusted'; else 'untrusted'. See SPAdes docs for
details. default: infer
--linear if genome is known to not be circular and a region of
interest (including flanking bits) extends past
chromosome end, this extends the seqence past
chromosome origin forward by --padding; default: False
--subassembler {spades,skesa}
assembler to use for subassembly scheme. SPAdes is
used by default, but Skesa is a new addition that
seems to work for subassembly and is faster
-j, --just_seed Don't do an assembly, just generate the long read
'seeds'; default: False
--score run riboScore too! default: False
-l int, --flanking_length int
length of flanking regions, in bp; default: 1000
-k 21,33,55,77,99,127, --kmers 21,33,55,77,99,127
kmers used for final assembly, separated by commas
such as21,33,55,77,99,127. Can be set to 'auto', where
SPAdes chooses. We ensure kmers are not too big or too
close to read length; default: 21,33,55,77,99,127
--force_kmers skip checking to see if kmerchoice is appropriate to
read length. Sometimes kmers longer than reads can
help in the final assembly, as the long reads
generated by riboSeed contain kmers longer than the
read length
-p 21,33,55,77,99, --pre_kmers 21,33,55,77,99
kmers used during seeding assemblies, separated bt
commas; default: 21,33,55,77,99
-d int, --min_flank_depth int
a subassembly won't be performed if this minimum depth
is not achieved on both the 3' and5' end of the
pseudocontig. default: 0
--clean_temps if --clean_temps, mapping files will be removed once
they are no no longer needed during the mapping
iterations to save space; default: False
-i int, --iterations int
if iterations>1, multiple seedings will occur after
subassembly of seed regions; if setting --target_len,
seedings will continue until --iterations are
completed or --target_len is matched or exceeded;
default: 3
-v {1,2,3,4,5}, --verbosity {1,2,3,4,5}
Logger writes debug to file in output dir; this sets
verbosity level sent to stderr. 1 = debug(), 2 =
info(), 3 = warning(), 4 = error() and 5 = critical();
default: 2
--cores int cores used; default: None
--memory int cores for multiprocessing; default: 8
--damn_the_torpedos Ignore certain errors, full speed ahead!
-t {1,2,4}, --threads {1,2,4}
if your cores are hyperthreaded, set number threads to
the number of threads per processer.If unsure, see
'cat /proc/cpuinfo' under 'cpu cores', or 'lscpu'
under 'Thread(s) per core'.: 1
-z, --serialize if --serialize, runs seeding and assembly without
multiprocessing. We recommend this for machines with
less than 8GB RAM: False
-h, --help Displays this help message
--version show program's version number and exit
```
## Contributing
Pull requests are more than welcome!
### Known Bugs
#### X server
You may run into issues where you get an error about "Unable to connect to X server: None" or localhost:N. Sorry about that; any tips would be useful; a quick glance at the commit history will show I have spent much time trying to resolve it, without any luck. If you do run into this, try the following:
- connect to the machine with an X session (`ssh -X hostname`)
- avoid using `gnu screen` if possible, but if you do need to use it, start the `screen` session after ensuring you have a `$DISPLAY` availible through starting the host session with `-X`
#### Pysam on MacOS
If you are on MacOS, you may run into an issue with Pysam.
```
ImportError: dlopen(/Users/nicholas/miniconda3/envs/ribo/lib/python3.5/site-packages/pysam/libchtslib.cpython-35m-darwin.so, 2): Library not loaded: @rpath/liblzma.5.dylib
Referenced from: /Users/nicholas/miniconda3/envs/ribo/lib/libhts.2.dylib
Reason: Incompatible library version: libhts.2.dylib requires version 8.0.0 or later, but liblzma.5.dylib provides version 6.0.0
```
The simplest solution is to pip instal pysam, forcing the original to be overwritten:
```
pip install pysam -U
```
In cases where this does not work, try installing by first making a conda env with the `environment.yaml` file, and then installing riboSeed from pip.
```
conda env create -y environment.yaml
source activate ribo
pip install riboSeed
```
If you run into malloc issues similar to https://github.com/ablab/spades/issues/9, we recommend running in a VM.
#### smalt scoring
Submitting `--smalt_scoring` with vastly different scoring schemes usually causes an error.
### Running Tests
The tests for the module can be found under the `tests` directory. I run them with the unittests module. The tests assume the installation of all the recommended tools.
| /riboSeed-0.4.90.tar.gz/riboSeed-0.4.90/README.md | 0.705785 | 0.978302 | README.md | pypi |
## RiboDetector - Accurate and rapid RiboRNA sequences Detector based on deep learning
### About Ribodetector
<img src="RiboDetector_logo.png" width="600" />
`RiboDetector` is a software developed to accurately yet rapidly detect and remove rRNA sequences from metagenomeic, metatranscriptomic, and ncRNA sequencing data. It was developed based on LSTMs and optimized for both GPU and CPU usage to achieve a **10** times on CPU and **50** times on a consumer GPU faster runtime compared to the current state-of-the-art software. Moreover, it is very accurate, with ~**10** times fewer false classifications. Finally, it has a low level of bias towards any GO functional groups.
### Prerequirements
#### 1. Create `conda` env and install `Python v3.8`
To be able to use `RiboDetector`, all you need to do is to install `Python v3.8` (make sure you have version `3.8` because `3.7` cannot serialize a string larger than 4GiB) with `conda`:
```shell
conda create -n ribodetector python=3.8
conda activate ribodetector
```
#### 2. Install `pytorch` in the ribodetector env if GPU is available
To install `pytorch` compatible with your CUDA version, please fellow this instruction:
https://pytorch.org/get-started/locally/. Our code was tested with `pytorch v1.7`, `v1.7.1`, `v1.10.2`.
Note: you can skip this step if you don't use GPU
### Installation
```shell
pip install ribodetector
```
### Usage
#### GPU mode
#### Example
```shell
ribodetector -t 20 \
-l 100 \
-i inputs/reads.1.fq.gz inputs/reads.2.fq.gz \
-m 10 \
-e rrna \
--chunk_size 256 \
-o outputs/reads.nonrrna.{1,2}.fq.gz
```
The command lind above excutes ribodetector for paired-end reads with length 100 using GPU and 20 CPU cores
#### Full help
```shell
usage: ribodetector [-h] [-c CONFIG] [-d DEVICEID] -l LEN -i [INPUT [INPUT ...]]
-o [OUTPUT [OUTPUT ...]] [-r [RRNA [RRNA ...]]] [-e {rrna,norrna,both,none}]
[-t THREADS] [-m MEMORY] [--chunk_size CHUNK_SIZE] [-v]
rRNA sequence detector
optional arguments:
-h, --help show this help message and exit
-c CONFIG, --config CONFIG
Path of config file
-d DEVICEID, --deviceid DEVICEID
Indices of GPUs to enable. Quotated comma-separated device ID
numbers. (default: all)
-l LEN, --len LEN Sequencing read length, should be not smaller than 50.
-i [INPUT [INPUT ...]], --input [INPUT [INPUT ...]]
Path of input sequence files (fasta and fastq), the second
file will be considered as second end if two files given.
-o [OUTPUT [OUTPUT ...]], --output [OUTPUT [OUTPUT ...]]
Path of the output sequence files after rRNAs removal (same
number of files as input). (Note: 2 times slower to write gz
files)
-r [RRNA [RRNA ...]], --rrna [RRNA [RRNA ...]]
Path of the output sequence file of detected rRNAs (same
number of files as input)
-e {rrna,norrna,both,none}, --ensure {rrna,norrna,both,none}
Only output certain sequences with high confidence
norrna: output non-rRNAs with high confidence, remove as many
rRNAs as possible;
rrna: vice versa, output rRNAs with high confidence;
both: both non-rRNA and rRNA prediction with high confidence;
none: give label based on the mean probability of read pair.
(Only applicable for paired end reads, discard the read
pair when their predicitons are discordant)
-t THREADS, --threads THREADS
number of threads to use. (default: 10)
-m MEMORY, --memory MEMORY
amount (GB) of GPU RAM. (default: 12)
--chunk_size CHUNK_SIZE
Use this parameter when having low memory. Parsing the file in
chunks.
Not needed when free RAM >=5 * your_file_size (uncompressed,
sum of paired ends).
When chunk_size=256, memory=16 it will load 256 * 16 * 1024
reads each chunk (use ~20 GB for 100bp paired end).
-v, --version show program's version number and exit
```
#### CPU mode
#### Example
```shell
ribodetector_cpu -t 20 \
-l 100 \
-i inputs/reads.1.fq.gz inputs/reads.2.fq.gz \
-e rrna \
--chunk_size 256 \
-o outputs/reads.nonrrna.{1,2}.fq.gz
```
The above command line excutes ribodetector for paired-end reads with length 100 using 20 CPU cores.
When using SLURM job submission system, you need to specify `--cpus-per-task` to the number you CPU cores you need and set `--threads-per-core` to 1.
#### Full help
```shell
usage: ribodetector_cpu [-h] [-c CONFIG] -l LEN -i [INPUT [INPUT ...]]
-o [OUTPUT [OUTPUT ...]] [-r [RRNA [RRNA ...]]] [-e {rrna,norrna,both,none}]
[-t THREADS] [--chunk_size CHUNK_SIZE] [-v]
rRNA sequence detector
optional arguments:
-h, --help show this help message and exit
-c CONFIG, --config CONFIG
Path of config file
-l LEN, --len LEN Sequencing read length, should be not smaller than 50.
-i [INPUT [INPUT ...]], --input [INPUT [INPUT ...]]
Path of input sequence files (fasta and fastq), the second
file will be considered as second end if two files given.
-o [OUTPUT [OUTPUT ...]], --output [OUTPUT [OUTPUT ...]]
Path of the output sequence files after rRNAs removal (same
number of files as input).
(Note: 2 times slower to write gz files)
-r [RRNA [RRNA ...]], --rrna [RRNA [RRNA ...]]
Path of the output sequence file of detected rRNAs (same
number of files as input)
-e {rrna,norrna,both,none}, --ensure {rrna,norrna,both,none}
Only output certain sequences with high confidence
norrna: output non-rRNAs with high confidence, remove as many
rRNAs as possible;
rrna: vice versa, output rRNAs with high confidence;
both: both non-rRNA and rRNA prediction with high confidence;
none: give label based on the mean probability of read pair.
(Only applicable for paired end reads, discard the read
pair when their predicitons are discordant)
-t THREADS, --threads THREADS
number of threads to use. (default: 10)
--chunk_size CHUNK_SIZE
chunk_size * threads reads to process per thread.(default:
1024)
When chunk_size=1024 and threads=20, each process will load
1024 reads, in total consumming ~20G memory.
-v, --version show program's version number and exit
```
<!-- ### Benchmarks
We benchmarked five different rRNA detection methods including RiboDetector on 8 benchmarking datasets as following:
- 20M paired end reads simulated based on rRNA sequences from Silva database, those sequences are distinct from sequences used for training and validation.
- 20M paired end reads simulated based on 500K CDS sequences from OMA databases.
- 27,206,792 paired end reads simulated based on 13,848 viral gene sequences downloaded from ENA database.
- 7,917,920 real paired end amplicon sequencing reads targeting V1-V2 region of 16s rRNA genes from oral microbiome study.
- 6,330,381 paired end reads simulated from 106,880 human noncoding RNA sequences.
- OMA_Silva dataset in figure C contains 1,027,675 paired end reads simulated on CDS sequences which share similarity to rRNA genes, the sequences with identity >=98% and query coverage >=90% to rRNAs were excluded.
- HOMD dataset in figure C has 100,558 paired end reads simulated on CDS sequences from HOMD database which share similarity to the FP sequences of three tools, again sequences with identity >=98% and query coverage >=90% to rRNAs were excluded.
- GO_FP_N_02 in figure C consisting of 678,250 paired end reads was simulated from OMA sequences which have the GO with FP reads ratio >=0.2 on 20M mRNA reads dataset for BWA, RiboDetector or SortMeRNA.

In the above figures, the definitions of *FPNR* and *FNR* are:
<img src="https://render.githubusercontent.com/render/math?math=\large FPNR=100\frac{false \:predictions}{total \: sequences}">
<img src="https://render.githubusercontent.com/render/math?math=\large FNR=100\frac{false \:negatives}{total \:positives}">
RiboDetector has a very high generalization ability and is capable of detecting novel rRNA sequences (Fig. C). -->
### Acknowledgements
The scripts from the `base` dir were from the template [pytorch-template
](https://github.com/victoresque/pytorch-template) by [Victor Huang](https://github.com/victoresque) and other [contributors](https://github.com/victoresque/pytorch-template/graphs/contributors).
| /ribodetector-0.2.4.tar.gz/ribodetector-0.2.4/README.md | 0.575588 | 0.955152 | README.md | pypi |
# RIBOFLOW - classifying riboswitches with >99% accuracy
[riboflow](https://test.pypi.org/project/riboflow/) is a python package for classifying putative riboswitch sequences into one of 32 classes with > 99% accuracy. It is based on a [tensorflow](https://www.tensorflow.org) deep learning model. ``riboflow`` has been tested using ``Python 3.5.2``.
Installation
------------
The easiest way to install the package is via ``pip``
$ pip install riboflow
Dependencies:
numpy==1.14.5
tensorflow==1.8.0
keras==2.2.0
A trained ``Bi-directional Recurent Neural Network (RNN) Model`` is integrated into the ``riboflow`` package (and installed automatically with the ``pip``). Note that the source code to generate the ``Bi-directional Recurent Neural Network Model`` is available. The git repository [Riboswitch Classification](https://github.com/RiboswitchClassifier/RiboswitchClassification) could be forked to generate a new model.
Problem Statement
-------------------
Riboswitches are metabolite-sensing mRNAs, for e.g, amino acid or metal ion sensors, that switch conformation upon binding the cognate ligand, thereby exerting control on translation. It would be of interest to classfify the ligand-specificity of riboswitches given their sequence.
**The prediction problem**:
Given the riboswitch sequence, predict the riboswitch class (as given by the ligand-specificity of the riboswitch).
**Machine learning formulation**:
- Input: Riboswitch sequence
- Source dataset: Rfam database (rfam.org)
- Output: Riboswitch class
- Best-performing Classifier: Bi-directional RNN (>99% accuracy)
- Features used in the best-performing classifier: the full riboswitch sequence
Usage
-------------------
Once `riboflow` is installed, please follow the steps to predict the class of a new riboswitch sequence:
**1. Import the package**:
- Inside the python shell or in the python file::
> import riboflow
**2. Construct a list of riboswitch sequences**:
> # A sequence is a string in alphabet 'ATGC'
> sequences = [
"TTTTTTTTGCAGGGGTGGCTTTAGGGCCTGAGAAGATACCCATTGAACCTGACCTGGCTAAAACCAGGGTAGGGAATTGCAGAAATGTCCTCATT",
"CTCTTATCCAGAGCGGTAGAGGGACTGGCCCTTTGAAGCCCAGCAACCTACACTTTTTGTTGTAAGGTGCTAACCTGAGCAGGAGAAATCCTGACCGATGAGAG",
"CCACGATAAAGGTAAACCCTGAGTGATCAGGGGGCGCAAAGTGTAGGATCTCAGCTCAAGTCATCTCCAGATAAGAAATATCAGAAAGATAGCCTTACTGCCGAA"
]
**3a. Predict the class for each riboswitch sequence**:
> # Predict the most probable riboswitch class of each sequence
> riboflow.predict(sequences, "predict_class")
**3b. Predict a vector of class probabilities for each riboswitch sequence**:
> # Predict probabilty of each riboswitch class associated with each sequence
> riboflow.predict(sequences, "predict_prob")
Riboswitches Accounted For
------------
1. 'RF00504 - Glycine Riboswitch'
2. 'RF01786 - Cyclic di-GMP-II riboswitch'
3. 'RF01750 - ZMP/ZTP riboswitch'
4. 'RF00059 - TPP riboswitch (THI element)'
5. 'RF01057 - S-adenosyl-L-homocysteine riboswitch'
6. 'RF01725 - SAM-I/IV variant riboswitch'
7. 'RF00162 - SAM riboswitch (S box leader)'
8. 'RF00174 - Cobalamin riboswitch'
9. 'RF01055 - Molybdenum Cofactor riboswitch'
10. 'RF01727 - SAM/SAH Riboswitch'
11. 'RF01482 - Abocbl Riboswitch'
12. 'RF03057 - nhaA-I RNA'
13. 'RF01734 - Fluroride riboswitch'
14. 'RF00167 - Purine Riboswitch'
15. 'RF00234 - glmS glucosamine-6-phosphate activated ribozyme'
16. 'RF01739 - Glutamine riboswitch'
17. 'RF03072 - raiA RNA'
18. 'RF03058 - sul RNA'
19. 'RF00380 - yKoK leader'
20. 'RF00168 - Lysine Riboswitch'
21. 'RF03071 - DUF1646 RNA'
22. 'RF01689 - Abocbl variant RNA'
23. 'RF00379 - ydaO/yuaA leader'
24. 'RF00634 - S-adenosyl methionine (SAM) riboswitch'
25. 'RF01767 - SMK box translational riboswitch (SAM-III)'
26. 'RF00080 - yybP-ykoY manganese riboswitch'
27. 'RF02683 - NiCo riboswitch'
28. 'RF00442 - Guanidine-I Riboswitch'
29. 'RF00522 - PreQ1 Riboswitch'
30. 'RF00050 - FMN Riboswitch'
31. 'RF01831 - THF riboswitch'
32. 'RF00521 - SAM riboswitch (alpha-proteobacteria)'
Additional information
-----
For more information, please refer to our manuscript below.
*Premkumar KAR, Bharanikumar R, Palaniappan A.* (2019) Classifying riboswitches with >99% accuracy. **Microorganisms** (to be submitted)
Please cite us if you use our services.
Package Structure
-----
.
├── build # Buildout project configuration
├── dist # Consists of .whl and .tar package files
├── riboflow # Package Directory
│ ├── __init__.py # main file
│ ├── rnn_32_model.h5 # Bi-directional Recurent Neural Network Model
├── riboflow.egg-info # Egg information of the project
├── LICENSE # License
├── MANIFEST.in # To include the Bi-directional Recurent Neural Network Model within the package
├── README.md # Package description
└── setup.py # Package metadata
References and acknowledgements for pypi package development
----------
* http://fouryears.eu/2014/03/19/structure-of-a-python-project/
* http://www.jeffknupp.com/blog/2013/08/16/open-sourcing-a-python-project-the-right-way/
* [Bharat Goel](https://github.com/BharatGoel36) provided help in packaging the application.
Authors
----------
* [Keshav Aditya R.P](https://keshavadityarp.github.io)
- [Github](https://github.com/KeshavAdityaRP)
- [LinkedIn](https://www.linkedin.com/in/keshavadityarp/)
* Ramit Bharanikumar
- [Github](https://github.com/ramit29)
- [LinkedIn](https://www.linkedin.com/in/ramit-bharanikumar-12a014114/)
* Ashok Palaniappan
- [Senior Assistant Professor](http://www.sastra.edu/staffprofiles/schools/scbt.php?staff_id=C2164)
- [Github](https://github.com/apalania)
- [LinkedIn](https://www.linkedin.com/in/ashokpalaniappan/)
Copyright & License
-------------------
Copyright (c) 2019, the Authors. MIT License.
| /riboflow-1.1.2.tar.gz/riboflow-1.1.2/README.md | 0.853058 | 0.888566 | README.md | pypi |
============================================================================
Manual
============================================================================
Contents
========
roboraptor is a comprehensive pipeline and set of tools for
analyzing Ribo-seq data. This manual explains
the stages in our pipeline, how to use the analysis tools and how to modify
the pipeline for your specific context.
Assumptions
===========
Our pipeline was designed to run in a cluster computing context, with many
processing nodes available, and a job submission system like PBS or SGE.
Much of this analysis is computationally intensive. We assume that individual
nodes will have several GB of memory available for processing.
Translatome construction
================================
Ribo-seq experiments are always single-end sequenced. Ribosome protected
fragments range from 28-32 nucleotides and hence most experiments involve
50 bp single end reads. Before mapping, we need to get rid of the adapters
ligated at the 3' end of the fragments as part of the library protocol.
Trimming Reads
--------------
We use trim_galore_ for trimming. It automates adapter trimming:
.. code-block:: console
$ trim_galore -o <out_dir> -q <min_quality> <input.fq.gz>
-o out_dir Output directory
-q min_quality Trim low-quality ends from reads in addition to adapter removal
Mapping Reads
-------------
We use STAR_ to map reads, though other splice-aware aligners can also be used.
The first step is to create an index, preferably using a GTF file.
If the index step is run without a GTF file (which is optional), STAR_ will
not be splice-aware.
Creating Index
~~~~~~~~~~~~~~
.. code-block:: console
$ STAR --runThreadN <threads>\
--runMode genomeGenerate\
--genomeDir <index_out_dir>\
--genomeSAindexNbases <SA_INDEX_Nbases>\
--genomeFastaFiles <input.fasta>\
--sjdbGTFfile <input.gtf>
--runThreadN Number of threads to use
--runMode Flag to set for index mode
--genomeDir Directory to write index files to
--genomeSAindexNbases min(14, log2(GenomeLength)/2 - 1), this **must** be scaled down for small genomes
--genomeFastaFiles Path to reference fasta
--sjdbGTFfile Path to GTF file
Mapping
~~~~~~~
Often Ribo-seq experiments have a matching RNA-seq library. Ideally, the RNA-seq
library should be as similar to Ribo-seq library and hence will often be single-ended.
We recommend both RNA-seq and Ribo-seq samples be mapped using the following parameters:
.. code-block:: console
$ STAR --runThreadN <threads>\
--genomeDir <input.index>\
--outFilterMismatchNmax 2\
--alignIntronMin <ALIGN_INTRON_Nmin>\
--alignIntronMax <ALIGN_INTRON_Nmax>\
--outFileNamePrefix <params.prefix> --readFilesIn <input.R1>\
--outSAMtype BAM Unsorted\
--readFilesCommand zcat\
--quantMode TranscriptomeSAM\
--outTmpDir /tmp/<params.name>_tmp\
--outReadsUnmapped Fastx\
--runThreadN Number of threads to use
--genomeDir Path to index directory
--outFilterMismatchNmax Allow a maximum of mismatches=2
--alignIntronMin Minimum intron size. Any genomic gap
is considered intron if its
length >= alignIntronMin. (Default = 20)
--alignIntronMax Maximum intron size (Default = 1000000)
--outFileNamePrefix Prefix for output files
--readFilesIn Path to input fastq.gz
--outSAMtype Output an unsorted BAM file (outtype=BAM Unsorted)
--readFilesCommand cat/zcat depending on input is fq/fq.gz
--quantMode Also output BAM aligned to the transcriptome
--outTmpDir Directory to use for writing temporary files
--outReadsUnmapped Write unmapped reads to separate fastq file
Sorting and Indexing
~~~~~~~~~~~~~~~~~~~~
STAR outputted BAM files are not sorted. We need a BAM file sorted
by coordinates.
.. code-block:: console
$ samtools sort <prefix>Aligned.out.bam -o <output.bam> -T <tmpdir>_sort &&\
$ samtools index <prefix>Aligned.out.bam
Additionaly, we also need BAM file sorted by name, since htseq-counts_
(and featureCounts_) prefer a BAM sorted by name in their default mode.
.. code-block:: console
$ samtools sort -on <input.bam> -T <tmpdir> -o <output.bam> &&\
$ samtools index <output.bam>
Translatome analysis
============================
Once we have the bams, we are ready for downstream analysis.
The downstream step often involves a number of steps.
The following list summarises these steps along with their recommended
values (wherever applicable):
* Quality Control
- Number of uniquely mapped reads : >=5M
- Periodicity : TODO
- Ratio of CDS/(5'UTR+3'UTR) : >1 after length normalization
- Fragment length distribution : Peak around 28-32 nt
* Metagene analysis
- P-site offsets : Around 12-14 nt upstream of the start codon when counting based on 5'end
Counting uniquely mapped reads
------------------------------
The first step is to simply caculate the number of uniquely mapped reads.
We recommend a minimum of 5 million reads for any downstream analysis.
TODO: list different recommendation for different species
.. code-block:: console
$ riboraptor uniq-mapping-count --bam <input.bam>
--bam input.bam Path to bam file
Read length distribution
------------------------
An ideal Ribo-seq library is expected to have 28-32 nt long fragments most enriched.
We can calculate enrichment and plot the fragment size distribution using riboraptor.
Readd length distribution can be calculated using the `read-length-dist` subcommand:
.. code-block:: console
$ riboraptor read-length-dist --bam <input.bam>
This will print out the read length and associated counts on the console. In order to
visualize thhese counts as a barplot, we can use the `plot-read-dist` subcommand:
.. code-block:: console
$ riboraptor read-length-dist --bam <input.bam>\
| riboraptor plot-read-dist --saveto <output.png>
Metagene Analysis
-----------------
A metagene plot is used as a summary statistic to visualize the distribution of ribosome
protected fragments along the positions of a gene often starting (ending) at the start (stop)
codon. This is useful for estimating P-site offsets. The ribosome subunuits are known to protect
28-32 nt and hence the P-site is often located 12 nt downstream the 5' position of the mapped read.
Creating bigWig file
~~~~~~~~~~~~~~~~~~~~~
To perform metagene analysis, we will work with bigWig_ format. in order to do that, we need an
intermediate bedGraph_ file. This can be done using `bam-to-bedgraph` subcommand:
.. code-block:: console
$ riboraptor bam-to-bedgraph --bam <input.bam>
This will print the bedGraph to console. this cna be piped to `bedgraph-to-bigwig` subcommand:
.. code-block:: console
$ riboraptor bam-to-bedgraph --bam <input.bam> \
| riboraptor bedgraph-to-bigwig --sizes <genome.sizes> --saveto <output.bw>
We now have `<output.bw>` ready for further downstream analysis.
Distribution in 5'UTR/3'UTR/CDS regions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
TODO (See Example)
Metagene plot
~~~~~~~~~~~~~
TODO (See Example)
Example
=======
We will use two samples from GSE94454_ , one RNA-seq sample (SRR5227310) and
one Ribo-seq sample (SRR5227306) as examples for examples that follow.
.. code-block:: console
$ riboraptor uniq-mapping-count --bam data/SRR5227310.bam
28637667
This is a pretty deep library.
.. code-block:: console
$ riboraptor read-length-dist --bam data/SRR5227310.bam\
| riboraptor plot-read-dist --saveto SRR5227310.png
.. figure:: images/SRR5227310.png
:align: center
:width: 100%
:alt: Fragment length distribution SRR5227310
:figclass: align center
Fragment length distribution for SRR5227310
How enriched is it in 27-32 nt fragment range?
.. code-block:: console
$ riboraptor read-length-dist --bam data/SRR5227310.bam\
| riboraptor read-enrichment
(Enrichment: 1.52768004237, pval: 0.458943823895)
So the fragment length distribution doesn't seem to be enriched. We next perform metagene
analysis. Ribo-seq data is expected to have an inherent periodicity of 3, since ribosomes move
one codon at a time during active translation.
.. code-block:: console
$ riboraptor bedgraph-to-bigwig -bg data/SRR5227310.bg -s hg38 -o data/SRR5227310.bw
.. code-block:: console
$ riboraptor metagene-coverage -bw data/SRR5227310.bw \
--region_bed hg38_cds --max-positions 500 \
--prefix data/SRR5227310.metagene --offset 60 --ignore_tx_version
.. code-block:: console
$ riboraptor plot-read-counts \
--counts data/SRR5227310.metagene_metagene_normalized.pickle\
--saveto data/SRR5227310.metagene.png
.. figure:: images/SRR5227310.metagene.png
:align: center
:width: 100%
:alt: Metagene distribution for SRR5227310
:figclass: align center
Metagene distribution for SRR5227310
Since metagene gives a summary statistic, we can also look at the abolute counts distribution per frame:
.. code-block:: console
$ riboraptor plot-framewise-counts --counts data/SRR5227310.metagene_metagene_raw.pickle\
--saveto data/SRR5227310.framewise.png
.. figure:: images/SRR5227310.framewise.png
:align: center
:width: 100%
:alt: Fragment length distribution SRR5227310
:figclass: align center
Framewise distribution for SRR5227310
Let's try another sample: SRR5227306 and compare it with SRR5227310
with respect to distribution of reads.
.. code-block:: console
$ riboraptor uniq-mapping-count --bam data/SRR5227306.bam
10658208
.. code-block:: console
$ riboraptor read-length-dist --bam data/SRR5227306.bam | riboraptor plot-read-dist --saveto SRR5227306.png
.. figure:: images/SRR5227306.png
:align: center
:width: 100%
:alt: Fragment length distribution SRR5227306
:figclass: align center
Fragment length distribution for SRR5227306
.. code-block:: console
$ riboraptor read-length-dist --bam data/SRR5227306.bam | riboraptor read-enrichment
(Enrichment: 14.0292145986, pval: 0.135220082438)
As compared to SRR5227310, the enrichment in this case is almost 10 times higher.
.. code-block:: console
$ riboraptor plot-framewise-counts --counts data/SRR5227306.metagene_metagene_raw.pickle\
--saveto data/SRR5227306.framewise.png
.. figure:: images/SRR5227306.framewise.png
:align: center
:width: 100%
:alt: Fragment length distribution SRR5227306
:figclass: align center
Framewise distribution for SRR5227306
We can see the framewise distribution of reads in SRR5227310 is more or less uniform, but not so in SRR5227306.
.. code-block:: console
$ riboraptor bedgraph-to-bigwig -bg data/SRR5227306.bg -s hg38 -o data/SRR5227306.bw
.. code-block:: console
$ riboraptor metagene-coverage -bw data/SRR5227306.bw \
--region_bed hg38_cds --max-positions 500 \
--prefix data/SRR5227306.metagene --offset 60 --ignore_tx_version
.. code-block:: console
$ riboraptor plot-read-counts \
--counts data/SRR5227306.metagene_metagene_normalized.pickle\
--saveto data/SRR5227306.metagene.png
.. figure:: images/SRR5227306.metagene.png
:align: center
:width: 100%
:alt: Metagene distribution for SRR5227306
:figclass: align center
Metagene distribution for SRR5227306
The metagene of a Ribo-seq sample will show periodicity as in the case of SRR5227306 sample.
On the other hand a RNA-seq sample like SRR5227310 will tend to have a flat profile.
Distribution of 5'UTR/CDS/3'UTR counts
--------------------------------------
TODO
.. _trim_galore: https://www.bioinformatics.babraham.ac.uk/projects/trim_galore/
.. _STAR: https://github.com/alexdobin/STAR
.. _riboraptor: https://github.com/saketkc/riboraptor
.. _GSE94454: https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE94454
.. _htseq-counts: https://htseq.readthedocs.io/
.. _featureCounts: http://bioinf.wehi.edu.au/featureCounts/
.. _bigWig: https://genome.ucsc.edu/goldenpath/help/bigWig.html
.. _bedGraph: https://genome.ucsc.edu/goldenpath/help/bedgraph.html
| /riboraptor-0.2.2.tar.gz/riboraptor-0.2.2/docs/cmd-manual.rst | 0.753013 | 0.730049 | cmd-manual.rst | pypi |
README for Ribo-TISH (0.2.6)
==================================
<2020-11-15 Peng Zhang>
Introduction
============
Translation is a critical step in gene regulation that synthesizes proteins from a given RNA template. The development of the ribosome profiling (riboseq) technique has enabled the measurement of translation at a genome-wide level. The basic idea of ribosome profiling is to perform deep-sequencing of the ribosome-protected mRNA fragment (~30 nts), termed ribosome footprints, to determine the occupancy of translating ribosomes on a given mRNA. There are several variants of the ribosome profiling technique that are based on the use of different translation inhibitors. The regular ribo-seq utilizes Cycloheximide (CHX), a translation elongation inhibitor to freeze all translating ribosomes. In contrast to CHX, the translation inhibitor lactimidomycin (LTM) and harringtonine (Harr) have a much stronger effect on initiating ribosomes. The use of these two inhibitors allows for the global mapping of translating initiating sites (TISs) when they are coupled with with ribosome profiling (TI-Seq). In addition, when LTM is used sequentially with puromycin (PMY), the TISs can be mapped quantitatively and can be compared between different conditions.
we present a novel algorithm, named Ribo TIS Hunter (Ribo-TISH), for identifying translation activities using ribosome profiling data. Ribo-TISH uses statistical tests to assess the significance of translation activities. It captures significant TISs using negative binomial test, and frame biased open reading frames (ORFs) using rank sum test. Ribo-TISH can also perform differential analysis between two TI-Seq data.
Install
=======
Please check the file 'INSTALL.rst' in the distribution.
Usage of Ribo-TISH
========================
::
ribotish [-h] [--version] {quality,predict,tisdiff}
:Example for quality control: ``ribotish quality -b ltm.bam -g gene.gtf -t``
:Example for prediction: ``ribotish predict -t ltm.bam -b chx.bam -g gene.gtf -f genome.fa -o pred.txt``
:Example for differential TIS: ``ribotish tisdiff -1 pred1.txt -2 pred2.txt -a qti1.bam -b qti2.bam -g gene.gtf -o diff.txt``
There are 3 functions available as sub-commands.
:quality: Quality control for riboseq bam data.
:predict: Main function to predict ORF/TIS.
:tisdiff: Call diffential TIS between two TIS data
The main input data is in bam file format. For best performance, reads should be *trimmed* (to ~ 29 nt RPF length) and aligned to genome using *end-to-end* mode (no soft-clip). Intron splicing is supported. Some attributes are needed such as NM, NH and MD. For STAR, ```--outSAMattributes All``` should be set. bam file should be sorted and indexed by samtools_.
All positions or regions reported by Ribo-TISH are 0 based, half open, same as in bed_ format.
.. _samtools: https://github.com/samtools/samtools
.. _bed: https://genome.ucsc.edu/FAQ/FAQformat.html#format1
quality
~~~~~~~
Quality control of riboseq bam data. This function checks reads distribution around annotated protein coding regions on user provided transcripts, show frame bias and estimate P-site offset for different group of reads. Reads are grouped by read length as well as 5' end match or mismatch. 5' end mismatch ('m0') reads often have different distribution from matched reads. To turn off 5' end mismatch grouping, use ```--nom0```.
There are 3 output files: a txt file recording all distribution data, a pdf figure file and a python file for P-site offset parameters.
Quick examples:
For regular riboseq
::
ribotish quality -b chx.bam -g gene.gtf
For TI-Seq data
::
ribotish quality -b ltm.bam -g gene.gtf -t
Options
--------------
-b RIBOBAMPATH
``````````````
Riboseq bam data file. Reads should be trimmed and aligned to genome.
-g GENEPATH
```````````
Gene annotation file. Acceptable formats include gtf, gff, bed and genepred with gene names. Input file format can be auto detected or specified by ```--geneformat``` option
-o OUTPUT
`````````
Output all distribution data. Default: bampath[:-4]+'_qual.txt'. Quality and offset estimation is based on this distribution. User can save this file for further quick estimation trying different thresholds by ```-i``` option.
-t/--tis
````````
This data is TIS enriched, for LTM and Harr. Quality will pay more attention to TIS sites.
-i INPUT
````````
Input previous output file, do not read gene file and bam file again.
--geneformat GENEFORMAT
```````````````````````
Gene annotation file format (gtf, bed, gpd, gff, default: auto)
--chrmap CHRMAP
```````````````
Input chromosome id mapping table file if annotation chr ids are not the same as chr ids in bam/fasta files. Format:
========= =========
chr_name1 chr_name2
========= =========
Two columns, tab seperated, no specific order requirement. Mappings such as 'chr1' to '1' can be automatically processed without using this option.
-f FIGPDFPATH
`````````````
Output pdf figure file. Default: bampath[:-4]+'_qual.pdf'
-r PARAPATH
```````````
Output offset parameter file. Default: bampath+'.para.py'. This file saves P-site offsets for different reads lengths in python code dict format, and can be used in further analysis.
-l LENS
```````
Range of tag length Default: 25,35. The last number (35) is not included, i.e. the longest length considered is 34.
-d DIS
``````
Position range near start codon or stop codon. Default: -40,20
--bins BINS
```````````
Number of bins for cds profile. Default: 20
--nom0
```````````
Not consider reads with mismatch at position 0 (5' end mismatch) as a new group.
--th TH
```````
Threshold for quality. Default: 0.5. Group that frame bias ratio < TH will be considered as low quality and this group of reads will not be used in further analysis. The offset for low quality groups will not be set in parameter file.
--end3
``````````
Plot RPF 3' end profile instead of 5' end.
--colorblind
````````````
Use a color style readable for color blind people ('#F00078,#00F000,#0078F0')
--colors COLORS
```````````````
User specified Matplotlib acceptable color codes for three frames (default: 'r,g,b')
-p NUMPROC
``````````
Number of processes. Default: 1
-v/--verbose
`````````````
Increase output verbosity.
Output files
------------
OUTPUT
```````
OUTPUT is a txt file recording all distribution data in python format for each group of reads. These distributions are shown in pdf figure file. Quality and offset estimation is based on this distribution. User can save this file for further quick estimation trying different thresholds by ```-i``` option.
Pdf figure
``````````
Pdf figure file is plot of all the distributions and illustration of quality and P-site offset. The left part is for 5' end matched reads and the right part is for 5' end mismatch reads if ```--nom0``` is not set.
Upper panel: the length distribution of RPFs uniquely mapped to annotated protein-coding regions.
Lower panel: different quality metrics for RPFs uniquely mapped to annotated protein-coding regions.
Each row shows the RPFs with different lengths.
- Column 1: distribution of RPF 5’ end in 3 frames in all annotated codons. The percentage of the reads from the dominant reading frame is shown.
- Column 2: the distribution of RPF 5’end count near annotated TIS. The estimate of the P site offset and TIS accuracy are also shown. The RPFs of a specific length that do not pass threshold are considered as low quality and removed.
- Column 3: the distribution of RPF 5’end count near annotated stop codon.
- Column 4: The RPF profile throughout the protein-coding regions in 3 frames. TIS enrich score (TIS count / CDS average) is also shown for TIS data.
Offset parameter file
`````````````````````
This file saves P-site offsets for different reads lengths in python code dict format, and can be used in further analysis. The default offset file name is bampath+'.para.py' accompanied with the input bam file. The file format is like
::
offdict = {28: 12, 29: 12, 30: 12, 32: 13, 'm0': {29: 12, 30: 12, 31: 13}}
The offset parameter file is easy to interpret and can be edited by user if auto estimated offsets are not correct. The default file name will be auto-recognized in further analysis. If the bam file is in a different directory and user do not want to create a parameter file in that directory, we recommend creating a link for the bam file in current working directory, e.g. ```ln -s original/dir/ribo.bam```
Ribo-TISH does not guarantee that it can always find best P-site offset values. Users should check the quality figures and edit the parameter file if necessary.
predict
~~~~~~~
This is the main function of Ribo-TISH. This function predicts ORF/TIS with riboseq bam files. This function uses negative binomial model to fit TI-Seq background and test significance of TIS sites. For regular riboseq data, Wilcoxon rank sum test between in-frame reads and out-frame reads inside the ORF is performed.
Quick examples:
Combine TI-Seq and regular riboseq data
::
ribotish predict -t ltm.bam -b chx.bam -g gene.gtf -f genome.fa -o pred.txt
For TI-Seq data only
::
ribotish predict -t ltm.bam -g gene.gtf -f genome.fa -o pred.txt
De novo ORF prediction with only regular riboseq data using longest strategy
::
ribotish predict -b chx.bam -g gene.gtf -f genome.fa --longest -o pred.txt
De novo ORF prediction with two regular riboseq data using framebest strategy
::
ribotish predict -b chx1.bam,chx2.bam -g gene.gtf -f genome.fa --framebest -o pred.txt
Only test user provided ORF candidates with two regular riboseq data
::
ribotish predict -b chx1.bam,chx2.bam -g gene.gtf -f genome.fa -i cand.txt -o pred.txt
Options
--------------
-t TISBAMPATHS
``````````````
Input TI-seq bam data files, comma seperated.
-b RIBOBAMPATHS
```````````````
Regular riboseq bam data files, comma seperated.
At least one bam file should be provided by either ```-t``` or ```-b```.
-g GENEPATH
```````````
Gene annotation file for ORF prediction. Acceptable formats include gtf, gff, bed and genepred with gene names. Input file format can be auto detected or specified by ```--geneformat``` option.
If user need to predict on only non-coding genes and use a different gene annotation file for known ORF annotation and background estimation, use ```-a``` option to provide another gene annotation for known ORF annotation.
If user provided candidates ```-i``` option is set, the transcript annotation for the candidates should be found in gene annotation file.
-f GENOMEFAPATH
```````````````
Genome fasta file. The fasta file should has a .fai index file accompanied with genome fasta file (indexed) or indexable (fasta sequences have fixed length in each line). This program will index the genome file before prediction if .fai index file can not be found.
-o OUTPUT
`````````
Output all possible ORF results that fit the thresholds.
-i INPUT
````````
Only test input candidate ORFs, format:
======= ===== =====
transID start stop
======= ===== =====
Start, stop position is 0 based, half open. Stop - start should be multiples of 3. Transcript should be found in gene annotation file.
--geneformat GENEFORMAT
```````````````````````
Gene annotation file format (gtf, bed, gpd, gff, default: auto)
--chrmap CHRMAP
```````````````
Input chromosome id mapping table file if annotation chr ids are not same as chr ids in bam/fasta files. See --chrmap option in ```quality``` section.
--tispara TISPARA
`````````````````
Input P-site offset parameter files for ```-t``` bam files. The default parameter files are bampath+'.para.py' for each bam file, which is generated in ```ribotish quality``` function. There's no need to specify this option if default parameter files exist. To use this option to provide other parameter files, each bam file should be provided with a file, and file names are separated with comma. If no parameter file is found, default offset 12 will apply for all reads in the bam data.
--ribopara RIBOPARA
```````````````````
Input P-site offset parameter files for ```-b``` bam files. Same as ```--tispara``` option.
--nparts NPARTS
```````````````
Group transcript according to TIS reads density quantile. Default: 10.
TIS background estimation uses ORF in-frame read counts (excluding TIS codons) to estimate negative binomial parameters. Since different transcripts have different expression levels, the background is different for highly expressed and lowly expressed transcripts. Ribo-TISH groups expressed transcripts into N parts based on TIS reads density of the transcript. Each transcript group have same total number of TIS reads.
-e ESTPATH
``````````
Output TIS background estimation result. If only one bam file is provided by ```-t``` option, the default file name is tisbampath+'.bgest.txt'. If multiple TIS data provided, the default file name is tisBackground.txt
The result file contains negative binomial parameters, group levels and thresholds for each group.
-s INESTPATH
````````````
Input background estimation result file instead of instant estimation. By default, if only one bam file is provided by ```-t``` option, the program will first look for file name tisbampath+'.bgest.txt'. If this file exists, background parameters in this file will be used. Otherwise, TIS background estimation will run and generate a result file according to ```-e``` option.
-a AGENEPATH
````````````
Another gene annotation file for ORF annotation in addition to ```-g``` gene file. This option is mainly used when ```-g``` annotation focuses on predicting ORFs in non-coding transcripts and does not have sufficient protein coding gene annotation. Protein coding gene annotation is used for TIS background estimation as well as output TIS type classification.
--alt
`````
Use alternative start codons. If set, all codons with 1 base different from ATG will be considered as start codon in ORF finding. Affect both TIS background estimation and prediction. It does not affect ```-i``` mode prediction. To customize alt start codons, use ```--altcodons```.
--altcodons ALTCODONS
`````````````````````
Use provided alternative start codons, comma seperated, e.g. ```--altcodons CTG,GTG,ACG```. Turn on ```--alt``` option. Do not need to provide 'ATG'. It does not support 'N' bases.
--tis2ribo
``````````
Add TIS bam counts to regular riboseq counts. Use TIS data also for ORF frame test. This option will be turned on automatically if ```-b``` is not provided.
--harr
``````
The data is treated with harringtonine (instead of LTM). For Harr data, the reads at TIS sites are not as focus as LTM reads. Reads in flanking region (default 15 codons) of TIS will not be used for TIS background estimation. To customize flanking size, use ```--harrwidth```.
--harrwidth HARRWIDTH
`````````````````````
Flanking region for harr data, in codons. Default: 15. Turn on ```--harr``` option.
--longest
`````````
Only report longest possible ORF results for multiple candidate start codons in the same ORF (same stop codon). This is a TIS selection strategy when there's no ```-t``` TI-Seq data input.
--framebest
```````````
Only report best frame test results for multiple candidate start codons in the same ORF (same stop codon), which is TIS with the smallest frame test p-value (marked as 'T' in RiboPStatus column). This is a TIS selection strategy when there's no ```-t``` TI-Seq data input.
--enrichtest
````````````
Use enrich test instead of frame test. Enrich test is rank sum test between in-frame reads inside ORF and same frame reads outside ORF.
--nocompatible
``````````````
Not require reads compatible with transcript splice junctions.
--minaalen MINAALEN
```````````````````
Minimum amino acid length of candidate ORF, Default: 6.
--genefilter GENEFILTER
```````````````````````
Only process given genes. Comma separated.
--tpth TPTH
```````````
TIS p value threshold. Default: 0.05.
--fpth FPTH
```````````
Frame p value threshold. Default: 0.05.
--minpth MINPTH
```````````````
At least one of TIS or frame p value should be lower than this threshold. Default: 1.
--fspth FSPTH
`````````````
Fisher's p value threshold. Default: 0.05.
--fsqth FSQTH
`````````````
Fisher's FDR q value threshold. Default: 0.05.
--allresult ALLRESULT
`````````````````````
Write all result output without FDR q-value threshold to another file. (default: output + '_all.txt', 'off' or using ```--fsqth 1``` to turn off)
-p NUMPROC
``````````
Number of processes. Default: 1
-v/--verbose
`````````````
Increase output verbosity.
--transprofile TRANSPROFILE
```````````````````````````
Output RPF P-site profile for each transcript. The profile data is in python dict format, recording non-zero read counts at different positions on transcript.
--inprofile INPROFILE
``````````````````````
Input RPF P-site profile for each transcript, instead of reading bam reads. The profile file is the output file from ```--transprofile``` option. Save some time for re-running.
--seq
`````
Report ORF sequences.
--aaseq
````````
Report amino acid sequences.
--blocks
````````
Report all exon block positions for predicted ORFs. Format: start1-stop1,start2-stop2,...startN-stopN. In chromosome direction.
--inframecount
``````````````
Report the sum of all counts at the in-frame positions in the ORF.
Output files
------------
OUTPUT
```````
The output is a txt file all possible ORF results that fit the thresholds. Some of the columns are:
:GenomePos: Genome position and strand of TIS site, 0 based, half open
:Start: TIS of the ORF on transcript
:Stop: 3' end of stop codon on transcript
:TisType: Relative position of this TIS to annotated ORF of the transcript. 'Novel' if no ORF annotation. ':Known' means the TIS is annotated in another transcript. ':CDSOverlap' means the ORF overlaps with annotated CDS in another transcript in the same reading frame.
:TISGroup: Group of the transcript for TIS background estimation
:TISCount: Number of reads with P-site at TIS site
:TISPvalue: One tailed negative binomial test p-value for TISCount (TIS test)
:RiboPvalue: One tailed rank sum test p-value for regular riboseq frame bias inside ORF (frame test)
:RiboPStatus: For all ORFs sharing same stop codon, 'T' means top (best) p-value, 'L' means local best p-value, 'N' means other. All 'N' in ```-i``` or ```--longest``` mode.
:FisherPvalue: Combination of TIS and Ribo p-values using Fisher's method
:TISQvalue: BH correction q-value of TIS test
:RiboQvalue: BH correction q-value of frame test
:FisherQvalue: BH correction q-value of Fisher's p-value
:AALen: Amino acid length of the ORF
ALL
```
The '_all' output result is generated according to ```--allresult``` option, which is similar to the output but do not use FDR (q-value) cutoff. Other cutoffs are the same as output file.
tisdiff
~~~~~~~
This is the function for differential TIS identification. This function uses two different TIS test results generated by ```ribotish predict``` using different quantitative TI-Seq (QTI-Seq) data. The ordinary global TI-Seq (GTI-Seq) may have some biases so is not suitable for differential analysis.
First a normalization factor is estimated by Trimmed Mean of M values (TMM) method on the union of significant TIS counts in the two results. Then binomial test p-value and fold change are calculated. If RNASeq counts are provided as reference, the TI efficiency is calculated using Fisher's exact test with normalized count values.
Quick examples:
Differential TIS activity calling
::
ribotish tisdiff -1 pred1.txt -2 pred2.txt -a qti1.bam -b qti2.bam -g gene.gtf -o diff.txt
Differential TIS efficiency calling with RNASeq count input
::
ribotish tisdiff -1 pred1.txt -2 pred2.txt -a qti1.bam -b qti2.bam -g gene.gtf --rnaseq RNA.txt -o diff.txt
Options
--------------
-1 TIS1PATH, -2 TIS2PATH
````````````````````````
Predict result of group 1 & 2 TIS data. Comma seperated if there are more than 1 replicates.
-a TIS1BAMPATHS, -b TIS1BAMPATHS
````````````````````````````````
Group 1 & 2 TIS riboseq bam files, comma seperated.
--l1 TIS1LABELS, --l2 TIS2LABELS
````````````````````````````````
Labels for each replicate.
-g GENEPATH
```````````
Gene annotation file. Acceptable formats include gtf, gff, bed and genepred with gene names. Input file format can be auto detected or specified by ```--geneformat``` option.
-o OUTPUT
`````````
Output result file.
--geneformat GENEFORMAT
```````````````````````
Gene annotation file format (gtf, bed, gpd, gff, default: auto)
--tis1para TIS1PARA, --tis2para TIS2PARA
````````````````````````````````````````
Input P-site offset parameter files for group 1 & 2 bam files. The default parameter files are bampath+'.para.py' for each bam file, which is generated in ```ribotish quality``` function. To use this option, each bam file should be provided with a file, and file names are separated with comma. If no parameter file is found, default offset 12 will apply for all reads in the bam data.
--nocompatible
``````````````
Not require reads compatible with transcript splice junctions.
--normcomm
``````````
Use common TISs instead of union TISs for normalization.
--normanno
``````````
Use only annotated TISs for normalization.
--rnaseq RNASEQ
```````````````
RNASeq count input. Format:
==== ====== ====== ======
ID count1 count2 ...
==== ====== ====== ======
Both gene ID and transcript ID are acceptable.
--scalefactor SCALEFACTOR
`````````````````````````
Input TIS scale factor of group 2/1 instead of auto calculate. Not log value.
--rnascale RNASCALE
```````````````````
Input RNASeq scale factor of group 2/1 instead of auto calculate. Not log value.
--export EXPORT
```````````````
Export count table for differential analysis with other tools. Especially for replicated data.
--plotout PLOTOUT
`````````````````
Scatter plot output pdf file.
--figsize FIGSIZE
`````````````````
Scatter plot figure size. Default: 8,8.
-f FOLDCHANGE
`````````````
Minimum fold change threshold. Default: 1.5.
--ipth IPTH
```````````
Input TIS p value threshold. Default: 0.05.
--iqth IQTH
```````````
Input TIS q value threshold. Default: 0.05.
--opth OPTH
```````````
Output TIS diff p value threshold. Default: 0.05.
--oqth OQTH
```````````
Output TIS diff q value threshold. Default: 0.05.
-p NUMPROC
``````````
Number of processes. Default: 1
-v/--verbose
`````````````
Increase output verbosity.
Output files
------------
OUTPUT
```````
The output is a txt file all differential TIS results that fit the thresholds. Some of the columns are:
:FoldChange: Fold change (2/1) value after normalization
:DiffPvalue: Differential test p-value, two-tailed.
:DiffQvalue: BH correction q-value of DiffPvalue
EXPORT
``````
The export table is generated using ```--export``` option. It is also automatically generated when the input data has replicated samples. It is a txt file with raw TIS counts for each predicted TIS. The format of TIS id is 'TransID_Start_GenomePos'.
For replicated data, Ribo-TISH provided R scripts to call differential TISs using edgeR_ or DESeq2_.
Example for edgeR:
::
Rscript path_to_scripts/tisdiff_edgeR.r tisdiff_export.txt 3 4 tisdiff_edgeR_output.txt
For DESeq2:
::
Rscript path_to_scripts/tisdiff_DESeq2.r tisdiff_export.txt 3 4 tisdiff_DESeq2_output.txt
3 and 4 are number of replicates in two conditions.
.. _edgeR: https://bioconductor.org/packages/edgeR
.. _DESeq2: https://bioconductor.org/packages/DESeq2
If ```--rnaseq``` is provided, the RNASeq counts of genes/transcripts for the TISs are also provided in the export table. However, the analysis for RNASeq referenced differential TIS efficiency analysis with replicate data is currently unavailable.
| /ribotish-0.2.7.tar.gz/ribotish-0.2.7/README.rst | 0.919138 | 0.961425 | README.rst | pypi |

# ribotricer: Accurate detection of short and long active ORFs using Ribo-seq data
[](https://travis-ci.org/smithlabcode/ribotricer)
[](https://pypi.org/project/ribotricer/)
[](http://bioconda.github.io/recipes/ribotricer/README.html)

[](https://pypi.org/project/ribotricer/)
[](LICENSE)
[Advanced Access Paper](https://academic.oup.com/bioinformatics/advance-article-abstract/doi/10.1093/bioinformatics/btz878/5637228)
[Supplementary File](https://saket-choudhary.me/pdfs/ribotricer_2019.pdf)
## Installation
We highly recommend that you install ribotricer via conda:
```bash
conda install -c bioconda ribotricer
```
To install locally, you can either download the source code from release or clone the latest version using ```git clone```.
After you get a copy of the source code, please change into the directory where the source code locates, and type
```bash
python setup.py install
```
**NOTE**: The above will install the following depencies:
```
pyfaidx>=0.5.0
pysam>=0.11.2.2
numpy>=1.11.0
pandas>=0.20.3
scipy>=0.19.1
matplotlib>=2.1.0
click>=6.0
click-help-colors>=0.3
quicksect>=0.2.0
tqdm>=4.23.4
```
If some of these are already present,
they might be replaced by the designated version. So we strongly recommend
creating a separate enrivoment (using `venv` or `conda`) before installing
`ribotricer`:
```
conda create -n ribotricer -c bioconda ribotricer
```
------------------
## Workflow of ribotricer
In order to run ribotricer, you need to have the following three files
prepared including:
* **genome annotation file** in GTF format, supporting both GENCODE and
Ensembl annotation
* **reference genome file** in FASTA format
* **alignment file** in BAM format
### Preparing candidate ORFs
The first step of ribotricer is to take the GTF file and the FASTA file to find all
candidate ORFs. In order to generate all candidate ORFs, please run
```bash
ribotricer prepare-orfs --gtf {GTF} --fasta {FASTA} --prefix {RIBOTRICER_INDEX_PREFIX}
```
The command above by default only includes ORFs with length longer than 60 nts,
and only uses 'ATG' as start codon. You can change the setting by including
options ```--min_orf_length``` and ```--start_codons```.
Output: {PREFIX}\_candidate\_orfs.tsv.
### Detecting translating ORFs
The second step of ribotricer is to take the index file generated by ```prepare-orfs```
and the BAM file to detect the actively translating ORFs by assessing the periodicity
of all candidate ORFs:
```bash
ribotricer detect-orfs \
--bam {BAM} \
--ribotricer_index {RIBOTRICER_INDEX_PREFIX}_candidate_ORFs.tsv \
--prefix {OUTPUT_PREFIX}
```
**NOTE**: This above command, by default, uses a phase-score cutoff of 0.428. Our species specific recommended cutoffs
are as follows:
|Species | Cutoff|
|-------------|-------|
|Arabidopsis | 0.330 |
|C. elegans | 0.239 |
|Baker's Yeast| 0.318 |
|Drosophila | 0.181 |
|Human | 0.440 |
|Mouse | 0.418 |
|Rat | 0.453 |
|Zebrafish | 0.249 |
In order to assign `non-translating` or `translating` status, ribotricer by default
uses a cutoff threshold of `0.428`. ORFs with phase score above `0.428` are marked as
translating as long as they have at least five codons with non-zero read count.
By default, ribotricer does not take coverage into account for predicting an ORF to be
translating or not-translating. However, this behavior can be changed by following
filters:
- `--min_valid_codons` (default=5): Minimum number of codons with non-zero reads for determining active translation
- `--min_valid_codons_ratio` (default=0): Minimum ratio of codons with non-zero reads to total codons for determining active translation
- `--min_reads_per_codon` (default=0): Minimum number of reads per codon for determining active translation
- `--min_read_density` (default=0.0): Minimum read density (total_reads/length) over an ORF total codons for determining active translation
For each of the above filters, an ORF failing **any** of the filters is
marked as `non-translating`.
For example, to ensure that each ORF has at least 3/4 of its codons non-empty,
we can specify `--min_valid_codons_ratio` to be 0.75:
```
ribotricer detect-orfs \
--bam {BAM} \
--ribotricer_index {RIBOTRICER_INDEX_PREFIX}_candidate_ORFs.tsv \
--prefix {OUTPUT_PREFIX}
--min_valid_codons_ratio 0.75
```
The ORF detection step consists of several small steps including:
1. Infer the experimental protocol (strandedness of the reads)
You can directly assign the strandedness using option ```--stranded```, it can be 'yes',
'no', or 'reverse'. If this option is not provided, ribotricer will automatically infer the
experimental protocol by comparing the strand of reads to the reference.
Output: {OUTPUT_PREFIX}\_protocol.txt
2. Split the bam file by strand and read length
In this step, all mapped reads will be filtered to include only uniquely mapped reads. Reads
will be split by strand and read length with respect to the strandedness provided or inferred
from the previous step. If you only want to include certain read lengths, they can be assigned with
option ```--read_lengths```.
Output: {OUTPUT_PREFIX}\_bam\_summary.txt
3. Plot read length distribution
In this step, read length distribution will be plotted and serves as quality control
Output: {OUTPUT_PREFIX}\_read\_length\_dist.pdf
4. Calculate metagene profiles
In this step, the metagene profile of all CDS transcripts for each read length is
calculated by aligning with start codon or stop codon.
Output: {OUTPUT_PREFIX}\_metagene\_profiles\_5p.tsv is the metagene profile aligning with the
start codon and {OUTPUT_PREFIX}\_metagene\_profiles\_3p.tsv is the metagene profile aligning with
the stop codon
5. Plot metagene profiles
In this step, metagene plots will be made to serve as quality control.
Output: {OUTPUT_PREFIX}\_metagene\_plots.pdf
6. Align metagene profiles
If the P-site offsets are not provided, this step will use cross-correlation to find out the relative
offsets between different read lengths
Output: {OUTPUT_PREFIX}\_psite\_offsets.txt
7. merge reads from different read lengths based on P-site offsets
This step will integrate reads of different read lengths by shifting with the P-site offsets
8. Export wig file
A WIG file is exported in this step to be used for visualization in Genome Browser
Output: {OUTPUT_PREFIX}\_pos.wig for the positive strand and {OUTPUT_PREFIX}\_neg.wig for the negative strand.
9. Export actively translating ORFs
The periodicity of all ORF profiles are assessed and the translating ones are outputed. You can output all ORFs regardless
of the translation status with option ```--report_all```
Output: {OUTPUT_PREFIX}\_translating\_ORFs.tsv
------------------
## Definition of ORF types
Ribotricer reports eight different ORF types as defined below:
* **annotated**: CDS annotated in the provided GTF file
* **super_uORF**: upstream ORF of the annotated CDS, not overlapping with any CDS of the same gene
* **super_dORF**: downstream ORF of the annotated CDS, not overlapping with any CDS of the same gene
* **uORF**: upstream ORF of the annotated CDS, not overlapping with the main CDS
* **dORF**: downstream ORF of the annotated CDS, not overlapping with the main CDS
* **overlap_uORF**: upstream ORF of the annotated CDS, overlapping with the main CDS
* **overlap_dORF**: downstream ORF of the annotated CDS, overlapping with the main CDS
* **novel**: ORF in non-coding genes or in non-coding transcripts of coding genes
------------------
## Learning cutoff empirically from data
Ribotricer can also learn cutoff empirically from the data. Given at least one Ribo-seq and one RNA-seq BAM file,
`ribotricer` learns the cutoff by running one iteration of the algorithm on the provided files with a prespecified
cutoff (`--phase_score_cutoff`, default: 0.428) and then uses the generated output to find the median difference between Ribo-seq and RNA-seq phase scores of only candidate ORFs with `transcript_type` set to `protein_coding` (`--filter_by_tx_annotation`).
```
ribotricer learn-cutoff --ribo_bams ribo_bam1.bam,ribo_bam2.bam \
--rna_bams rna_1.bam \
--prefix ribo_rna_prefix \
--ribotricer_index {RIBOTRICER_ANNOTATION}
```
------------------
## Contacts and bug reports
Andrew D. Smith
andrewds@usc.edu
Saket Choudhary
skchoudh@usc.edu
Wenzheng Li
wenzhenl@usc.edu
We are dedicated to make the best ORF detector for Ribo-seq data analysis.
If you found a bug or mistake in this project, we would like to know about it.
Before you send us the bug report though, please check the following:
1. Are you using the latest version? The bug you found may already have been
fixed.
2. Check that your input is in the correct format and you have selected the
correct options.
3. Please reduce your input to the smallest possible size that still produces
the bug; we will need your input data to reproduce the problem, and the
smaller you can make it, the easier it will be.
------------------
## LICENSE
Ribotricer for detecting actively translating ORFs from Ribo-seq data
Copyright (C) 2018 Andrew D Smith, Wenzheng Li, Saket Choudhary and
the University of Southern California
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
| /ribotricer-1.3.2.tar.gz/ribotricer-1.3.2/README.md | 0.83025 | 0.962108 | README.md | pypi |
# pyribs
| Website | Source | Docs | Paper | Twitter |
| :------------------------------: | :--------------------------------------------: | :----------------------------------------: | :------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------: |
| [pyribs.org](https://pyribs.org) | [GitHub](https://github.com/icaros-usc/pyribs) | [docs.pyribs.org](https://docs.pyribs.org) | [pyribs.org/paper](https://pyribs.org/paper) | [](https://twitter.com/pyribs) |
| PyPI | Conda | CI/CD | Docs Status |
| :---------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------: |
| [](https://pypi.python.org/pypi/ribs) | [](https://anaconda.org/conda-forge/pyribs) | [](https://github.com/icaros-usc/pyribs/actions?query=workflow%3A"Tests") | [](https://readthedocs.org/projects/ribs/) |
A _bare-bones_ Python library for quality diversity (QD) optimization. Pyribs
implements the highly modular _Rapid Illumination of Behavior Space (RIBS)_
framework for QD optimization. Pyribs is also the official implementation of
Covariance Matrix Adaptation MAP-Elites (CMA-ME), Covariance Matrix Adaptation
MAP-Elites via a Gradient Arborescence (CMA-MEGA), Covariance Matrix Adaptation
MAP-Annealing (CMA-MAE), and scalable variants of CMA-MAE.
## Overview

[Quality diversity (QD) optimization](https://arxiv.org/abs/2012.04322) is a
subfield of optimization where solutions generated cover every point in a
_measure_ space while simultaneously maximizing (or minimizing) a single
_objective_. QD algorithms within the MAP-Elites family of QD algorithms produce
heatmaps (archives) as output where each cell contains the best discovered
representative of a region in measure space.
> In the QD literature, measure function outputs have also been referred to as
> "behavioral characteristics," "behavior descriptors," or "feature
> descriptors."
Recent years have seen the development of a large number of QD algorithms. To
represent these and future algorithms, we have developed the highly modular RIBS
framework. RIBS divides a QD algorithm into three components:
- An **archive**, which saves generated solutions within measure space.
- One or more **emitters**, where each emitter is an algorithm which generates
new candidate solutions and responds to feedback about how the solutions were
evaluated and how they were inserted into the archive.
- A **scheduler** which controls the interaction of the **archive** and
**emitters**. The **scheduler** also provides an interface for requesting new
candidate solutions and telling the algorithm how candidates performed.
By interchanging these components, a user can compose a large number of QD
algorithms.
Pyribs is an implementation of the RIBS framework designed to support a wide
range of users, from beginners entering the field to experienced researchers
seeking to develop new algorithms. Pyribs achieves these goals by embodying
three principles:
- **Simple:** Centered _only_ on components that are absolutely necessary to run
a QD algorithm, allowing users to combine the framework with other software
frameworks.
- **Flexible:** Capable of representing a wide range of current and future QD
algorithms, allowing users to easily create or modify components.
- **Accessible:** Easy to install and learn, particularly for beginners with
limited computational resources.
In contrast to other QD libraries, pyribs is "bare-bones." For example, like
[pycma](https://pypi.org/project/cma/), pyribs focuses solely on optimizing
fixed-dimensional continuous domains. Focusing on this one commonly-occurring
problem allows us to optimize the library for performance as well as ease of
use. Refer to the list of [additional QD libraries](#additional-qd-libraries)
below if you need greater performance or have additional use cases.
Following the RIBS framework (shown in the figure below), a standard algorithm
in pyribs operates as follows:
1. The user calls the `ask()` method on the scheduler. The scheduler requests
solutions from each emitter by calling the emitter's `ask()` method.
2. The user evaluates solutions to obtain the objective and measure values.
3. The user passes the evaluations to the scheduler's `tell()` method. The
scheduler adds the solutions into the archive and receives feedback. The
scheduler passes this feedback along with the evaluated solutions to each
emitter's `tell()` method, and each emitter then updates its internal state.

## Paper
Two years after the initial release of pyribs, we released a paper which
elaborates on the RIBS framework and the design decisions behind pyribs! For
more information on this paper, see [here](https://pyribs.org/paper).
## Citation
If you use pyribs in your research, please consider citing our
[GECCO 2023 paper](https://dl.acm.org/doi/10.1145/3583131.3590374) as follows.
Also consider citing any algorithms you use as shown
[below](#citing-algorithms-in-pyribs).
```
@inproceedings{10.1145/3583131.3590374,
author = {Tjanaka, Bryon and Fontaine, Matthew C and Lee, David H and Zhang, Yulun and Balam, Nivedit Reddy and Dennler, Nathaniel and Garlanka, Sujay S and Klapsis, Nikitas Dimitri and Nikolaidis, Stefanos},
title = {Pyribs: A Bare-Bones Python Library for Quality Diversity Optimization},
year = {2023},
isbn = {9798400701191},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3583131.3590374},
doi = {10.1145/3583131.3590374},
abstract = {Recent years have seen a rise in the popularity of quality diversity (QD) optimization, a branch of optimization that seeks to find a collection of diverse, high-performing solutions to a given problem. To grow further, we believe the QD community faces two challenges: developing a framework to represent the field's growing array of algorithms, and implementing that framework in software that supports a range of researchers and practitioners. To address these challenges, we have developed pyribs, a library built on a highly modular conceptual QD framework. By replacing components in the conceptual framework, and hence in pyribs, users can compose algorithms from across the QD literature; equally important, they can identify unexplored algorithm variations. Furthermore, pyribs makes this framework simple, flexible, and accessible, with a user-friendly API supported by extensive documentation and tutorials. This paper overviews the creation of pyribs, focusing on the conceptual framework that it implements and the design principles that have guided the library's development. Pyribs is available at https://pyribs.org},
booktitle = {Proceedings of the Genetic and Evolutionary Computation Conference},
pages = {220–229},
numpages = {10},
keywords = {framework, quality diversity, software library},
location = {Lisbon, Portugal},
series = {GECCO '23}
}
```
## Usage
Here we show an example application of CMA-ME in pyribs. To initialize the
algorithm, we first create:
- A 2D **GridArchive** where each dimension contains 20 cells across the range
[-1, 1].
- Three instances of **EvolutionStrategyEmitter**, all of which start from the
search point **0** in 10-dimensional space and a Gaussian sampling
distribution with standard deviation 0.1.
- A **Scheduler** that combines the archive and emitters together.
After initializing the components, we optimize (pyribs maximizes) the negative
10-D Sphere function for 1000 iterations. Users of
[pycma](https://pypi.org/project/cma/) will be familiar with the ask-tell
interface (which pyribs adopted). First, the user must `ask` the scheduler for
new candidate solutions. After evaluating the solution, they `tell` the
scheduler the objectives and measures of each candidate solution. The algorithm
then populates the archive and makes decisions on where to sample solutions
next. Our toy example uses the first two parameters of the search space as
measures.
```python
import numpy as np
from ribs.archives import GridArchive
from ribs.emitters import EvolutionStrategyEmitter
from ribs.schedulers import Scheduler
archive = GridArchive(
solution_dim=10,
dims=[20, 20],
ranges=[(-1, 1), (-1, 1)],
)
emitters = [
EvolutionStrategyEmitter(
archive,
x0=[0.0] * 10,
sigma0=0.1,
) for _ in range(3)
]
scheduler = Scheduler(archive, emitters)
for itr in range(1000):
solutions = scheduler.ask()
# Optimize the 10D negative Sphere function.
objective_batch = -np.sum(np.square(solutions), axis=1)
# Measures: first 2 coordinates of each 10D solution.
measures_batch = solutions[:, :2]
scheduler.tell(objective_batch, measures_batch)
```
To visualize this archive with Matplotlib, we then use the
`grid_archive_heatmap` function from `ribs.visualize`.
```python
import matplotlib.pyplot as plt
from ribs.visualize import grid_archive_heatmap
grid_archive_heatmap(archive)
plt.show()
```

For more information, refer to the [documentation](https://docs.pyribs.org/).
## Installation
pyribs supports Python 3.7 and above. Earlier Python versions may work but are
not officially supported. To find the installation command for your system
(including for installing from source), visit the
[installation selector](https://pyribs.org/#installation) on our website.
To test your installation, import pyribs and print the version with this
command:
```bash
python -c "import ribs; print(ribs.__version__)"
```
You should see a version number in the output.
## Documentation
See here for the documentation: <https://docs.pyribs.org>
To serve the documentation locally, clone the repo and install the development
requirements with
```bash
pip install -e .[dev]
```
Then run
```bash
make servedocs
```
This will open a window in your browser with the documentation automatically
loaded. Furthermore, every time you make changes to the documentation, the
preview will also reload.
## Contributors
pyribs is developed and maintained by the [ICAROS Lab](http://icaros.usc.edu) at
USC. For information on contributing to the repo, see
[CONTRIBUTING](./CONTRIBUTING.md).
- [Bryon Tjanaka](https://btjanaka.net)
- [Matthew C. Fontaine](https://scholar.google.com/citations?user=RqSvzikAAAAJ)
- [David H. Lee](https://github.com/itsdawei)
- [Yulun Zhang](https://github.com/lunjohnzhang)
- [Nivedit Reddy Balam](https://www.linkedin.com/in/nivedit-reddy)
- [Nathan Dennler](https://ndennler.github.io/)
- [Sujay S. Garlanka](https://sujaygarlanka.com)
- Nikitas Klapsis
- [Robby Costales](https://robbycostales.com)
- [Sam Sommerer](https://github.com/sam-sommerer)
- [Vincent Vu](https://vuvincent.com/)
- [Stefanos Nikolaidis](https://stefanosnikolaidis.net)
We thank [Amy K. Hoover](http://amykhoover.com/) and
[Julian Togelius](http://julian.togelius.com/) for their contributions deriving
the CMA-ME algorithm.
## Users
pyribs users include:
<!-- Alphabetical order -->
- [Adam Gaier (Autodesk Research)](https://scholar.google.com/citations?user=GGyARB8AAAAJ)
- [Adaptive & Intelligent Robotics Lab (Imperial College London)](https://www.imperial.ac.uk/adaptive-intelligent-robotics)
- [Chair of Statistical Learning and Data Science (LMU Munich)](https://www.slds.stat.uni-muenchen.de/)
- [Game Innovation Lab (New York University)](https://game.engineering.nyu.edu)
- [Giovanni Iacca (University of Trento)](https://sites.google.com/site/giovanniiacca/)
- [HUAWEI Noah's Ark Lab](https://github.com/huawei-noah)
- [ICAROS Lab (University of Southern California)](http://icaros.usc.edu/)
- [Jacob Schrum (Southwestern University)](https://github.com/schrum2/PyribsForGameGAN)
- [Lenia Research](https://lenia.world)
- [Paul Kent (The University of Warwick)](https://warwick.ac.uk/fac/sci/mathsys/people/students/2018intake/kent/)
- [Various](https://github.com/ganyariya/mario_pytorch)
[researchers](https://direct.mit.edu/isal/proceedings/isal/33/112256) at the
University of Tsukuba
### Publications
For the list of publications which use pyribs, refer to our
[Google Scholar entry](https://scholar.google.com/scholar?oi=bibs&hl=en&cites=16246392515630874608).
### Software
See the
[GitHub dependency graph](https://github.com/icaros-usc/pyribs/network/dependents)
for the public GitHub repositories which depend on pyribs.
## Citing Algorithms in pyribs
If you use the following algorithms, please also cite their relevant papers:
- **CMA-ME:** [Fontaine 2020](https://dl.acm.org/doi/10.1145/3377930.3390232)
```
@inproceedings{10.1145/3377930.3390232,
author = {Fontaine, Matthew C. and Togelius, Julian and Nikolaidis, Stefanos and Hoover, Amy K.},
title = {Covariance Matrix Adaptation for the Rapid Illumination of Behavior Space},
year = {2020},
isbn = {9781450371285},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3377930.3390232},
doi = {10.1145/3377930.3390232},
booktitle = {Proceedings of the 2020 Genetic and Evolutionary Computation Conference},
pages = {94–102},
numpages = {9},
location = {Canc\'{u}n, Mexico},
series = {GECCO '20}
}
```
- **CMA-MEGA:**
[Fontaine 2021](https://proceedings.neurips.cc/paper/2021/hash/532923f11ac97d3e7cb0130315b067dc-Abstract.html)
```
@inproceedings{NEURIPS2021_532923f1,
author = {Fontaine, Matthew and Nikolaidis, Stefanos},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Ranzato and A. Beygelzimer and Y. Dauphin and P.S. Liang and J. Wortman Vaughan},
pages = {10040--10052},
publisher = {Curran Associates, Inc.},
title = {Differentiable Quality Diversity},
url = {https://proceedings.neurips.cc/paper/2021/file/532923f11ac97d3e7cb0130315b067dc-Paper.pdf},
volume = {34},
year = {2021}
}
```
- **CMA-MAE:** [Fontaine 2022](https://arxiv.org/abs/2205.10752)
```
@misc{cmamae,
doi = {10.48550/ARXIV.2205.10752},
url = {https://arxiv.org/abs/2205.10752},
author = {Fontaine, Matthew C. and Nikolaidis, Stefanos},
keywords = {Machine Learning (cs.LG), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Covariance Matrix Adaptation MAP-Annealing},
publisher = {arXiv},
year = {2022},
copyright = {arXiv.org perpetual, non-exclusive license}
}
```
- **Scalable CMA-MAE:** [Tjanaka 2022](https://arxiv.org/abs/2210.02622)
```
@misc{scalablecmamae,
title={Training Diverse High-Dimensional Controllers by Scaling Covariance Matrix Adaptation MAP-Annealing},
author={Bryon Tjanaka and Matthew C. Fontaine and Aniruddha Kalkar and Stefanos Nikolaidis},
year={2022},
eprint={2210.02622},
archivePrefix={arXiv},
primaryClass={cs.RO}
}
```
## Additional QD Libraries
- [QDax](https://github.com/adaptive-intelligent-robotics/QDax): Implementations
of QD algorithms in JAX. QDax is suitable if you want to run entire QD
algorithms on hardware accelerators in a matter of minutes, and it is
particularly useful if you need to interface with Brax environments.
- [qdpy](https://gitlab.com/leo.cazenille/qdpy/): Python implementations of a
wide variety of QD algorithms.
- [sferes](https://github.com/sferes2/sferes2): Contains C++ implementations of
QD algorithms; can also handle discrete domains.
## License
pyribs is released under the
[MIT License](https://github.com/icaros-usc/pyribs/blob/master/LICENSE).
## Credits
The pyribs package was initially created with
[Cookiecutter](https://github.com/audreyr/cookiecutter) and the
[audreyr/cookiecutter-pypackage](https://github.com/audreyr/cookiecutter-pypackage)
project template.
| /ribs-0.5.2.tar.gz/ribs-0.5.2/README.md | 0.694821 | 0.967808 | README.md | pypi |
# Contributing
Contributions are welcome, and they are greatly appreciated. Every little bit
helps, and credit will always be given.
## Types of Contributions
- **Report Bugs:** Refer to the
[Issue Tracker](https://github.com/icaros-usc/pyribs/issues). Please include
details such as operating system, Python version, and ribs version, as well as
detailed steps to reproduce the bug.
- **Fix Bugs:** Look through the Issue Tracker for bugs. Anything tagged with
"bug" and "help wanted" is open to whoever wants to implement it.
- **Propose features:** To request new features in pyribs, submit a Feature
Request on the Issue Tracker. In the request, please:
- Explain in detail how the feature would work.
- Keep the scope as narrow as possible, to make the features easier to
implement.
- **Implement Features:** Look through the Issue Tracker for features. Anything
tagged with "enhancement" and "help wanted" is open to whoever wants to
implement it.
- **Write Documentation:** pyribs could always use more documentation, whether
as part of the official pyribs docs, in docstrings, or even on the web in blog
posts, articles, and such. For the website, refer to the
[website repo](https://github.com/icaros-usc/pyribs.org).
- **Submit Feedback:** The best way to send feedback is to file an issue on the
[Issue Tracker](https://github.com/icaros-usc/pyribs/issues).
## Developing pyribs
Ready to contribute? Here's how to set up pyribs for local development.
1. [Fork](https://github.com/icaros-usc/pyribs/fork) the pyribs repo on GitHub.
1. Clone the fork locally:
```bash
# With SSH:
git clone git@github.com:USERNAME/pyribs.git
# Without SSH:
git clone https://github.com/USERNAME/pyribs.git
```
1. Install the local copy and dev requirements into an environment. For
instance, with Conda, the following creates an environment at `./env`.
```bash
cd pyribs
conda create --prefix ./env python=3.7 # 3.7 is the minimum version pyribs supports.
conda activate ./env
pip install -e .[dev]
```
1. Create a branch for local development:
```bash
git checkout -b name-of-bugfix-or-feature
```
Now make the appropriate changes locally.
- Please follow the
[Google Style Guide](https://google.github.io/styleguide/pyguide.html)
(particularly when writing docstrings).
- Make sure to auto-format the code using YAPF. We highly recommend
installing an editor plugin that auto-formats on save, but YAPF also runs
on the command line:
```bash
yapf -i FILES
```
1. After making changes, check that the changes pass the tests:
```bash
pytest tests/
make test # ^ same as above
```
And to run the benchmarks:
```bash
pytest -c pytest_benchmark.ini
make benchmark # ^ same as above
```
Finally, to lint the code:
```bash
pylint ribs tests benchmarks examples
make lint # ^ same as above
```
To get pytest and pylint, pip install them into the environment. However,
they should already install with `pip install -e .[dev]`.
1. Add your change to the changelog for the current version in `HISTORY.md`.
1. Commit the changes and push the branch to GitHub:
```bash
git add .
git commit -m "Detailed description of changes."
git push origin name-of-bugfix-or-feature
```
1. Submit a pull request through the GitHub website.
## Pull Request Guidelines
Before submitting a pull request, check that it meets these guidelines:
1. Style: Code should follow the
[Google Style Guide](https://google.github.io/styleguide/pyguide.html) and be
auto-formatted with [YAPF](https://github.com/google/yapf).
1. The pull request should include tests.
1. If the pull request adds functionality, corresponding docstrings and other
documentation should be updated.
1. The pull request should work for Python 3.7 and higher. GitHub Actions will
display test results at the bottom of the pull request page. Check there for
test results.
## Instructions
### Running a Subset of Tests
To run a subset of tests, use `pytest` with the directory name, such as:
```bash
pytest tests/core/archives
```
### Documentation
Documentation is primarily written in Markdown, as we use the
[MyST](https://myst-parser.readthedocs.io/en/latest/) Sphinx plugin.
To preview documentation, use:
```bash
make servedocs
```
This will open up a browser window and automatically reload as changes are made
to the docs.
### Adding a Tutorial
Tutorials are created in Jupyter notebooks that are stored under `tutorials/` in
the repo. To create a tutorial:
1. Write the notebook and save it under `tutorials/`.
1. Use cell magic (e.g. `%pip install ribs`) to install pyribs and other
dependencies.
- Installation cells tend to produce a lot of output. Make sure to clear this
output in Jupyter lab so that it does not clutter the documentation.
1. Before the main loop of the QD algorithm, include a line like
`total_itrs = 500` (any other integer will work). This line will be replaced
during testing (see `tests/tutorials.sh`) in order to test that the notebook
runs end-to-end. By default, the tests run the notebook with
`total_itrs = 5`. If this tutorial needs more (or less), modify the
switch-case statement in `tests/tutorials.sh`.
1. Make sure that the only level 1 heading (e.g. `# Awesome Tutorial`) is the
title at the top of the notebook. Subsequent titles should be level 2 (e.g.
`## Level 2 Heading`) or higher.
1. If linking to the pyribs documentation, make sure to link to pages in the
`latest` version on ReadTheDocs, i.e. your links should start with
`https://docs.pyribs.org/en/latest/`
1. Add an entry into the toctree in `docs/tutorials.md` and add it to one of the
lists of tutorials.
1. Check that the tutorial shows up on the Tutorials page when serving the docs.
1. Create a PR into the website repo that adds the tutorial onto the home page,
specifically
[this file](https://github.com/icaros-usc/pyribs.org/blob/master/src/index.liquid).
In the PR, include a square image that represents the tutorial.
### Adding an Example
Examples are created in Python scripts stored under `examples/` in the repo, and
their source is shown in the docs. To create an example:
1. Write the Python script and save it under `examples/`.
1. Add any dependencies at the top of the script with a `pip install` command
(see existing examples for a sample of how to do this).
1. Add a shell command to `tests/examples.sh` that calls the script with
parameters that will make it run as quickly as possible. This helps us ensure
that the script has basic correctness. Also call the `install_deps` function
on the script file before running the script.
1. Add a Markdown file in the `docs/examples` directory with the same name as
the Python file -- if the example is `examples/foobar.py`, the Markdown file
will be `docs/examples/foobar.md`.
1. Add a title to the Markdown file, such as:
```
# My Awesome Example
```
1. In the markdown file, include the following so that the source code of the
example is displayed.
````
```{eval-rst}
.. literalinclude:: ../../examples/EXAMPLE.py
:language: python
:linenos:
```
````
1. Add any other relevant info to the Markdown file.
1. Add an entry into the toctree and list of examples in `docs/examples.md`.
1. Check that the example shows up on the Examples page when serving the docs.
### Referencing Papers
When referencing papers, refer to them as `Lastname YEAR`, e.g. `Smith 2004`.
Also, prefer to link to the paper's website, rather than just the PDF. This is
particularly relevant when linking to arXiv papers.
### Deploying
1. Create a PR into master after doing the following:
1. Switch tutorial links from latest to stable with:
```bash
make tutorial_links
```
See [#300](https://github.com/icaros-usc/pyribs/pull/300) for why we do
this.
1. Update the version with `bump2version` by running the following for minor
versions:
```bash
bump2version minor
```
or the following for patch versions:
```bash
bump2version patch
```
1. Add all necessary info on the version to `HISTORY.md`.
1. (Optional) Once the PR has passed CI/CD and been squashed-and-merged into
master, check out the squash commit and locally run `make release-test`. This
uploads the code to TestPyPI to check that the deployment works. If this
fails, make fixes as appropriate.
1. Once the PR in step 1 and any changes in step 2 have passed CI/CD and been
squashed-and-merged into master, locally tag the master branch with a tag
like `v0.2.1`, e.g.
```bash
git tag v0.2.1 HEAD
```
1. Now push the tag with
```bash
git push --tags
```
1. Check that the version was deployed to PyPI. If it failed, delete the tag,
make appropriate fixes, and repeat steps 2 and 3.
1. Write up the release on GitHub, and attach it to the tag.
1. Submit another PR which reverts the changes to the tutorial links.
Specifically, while on master, make sure your workspace is clean, then revert
the changes with:
```bash
git checkout HEAD~ tutorials/
```
And commit the result.
Our deployment process may change in the future as pyribs becomes more complex.
| /ribs-0.5.2.tar.gz/ribs-0.5.2/CONTRIBUTING.md | 0.797004 | 0.927232 | CONTRIBUTING.md | pypi |
import argparse
from controllers.http_links_collector import HttpLinksCollector
import logging
import json
class RicardoCrawler:
'''RicardoCrawler class to crawl a web using system parameters.
Created on 27/09/2012
@author: Ricardo García Fernández
@mail: ricardogarfe@gmail.com
'''
def __init__(self):
'''Init method
'''
self.setup_log()
self.crawler_start()
def crawler_start(self):
'''Method to start crawling.
* Checks input parameters.
* returns the result of crawling printing a dictionary on the screen.
'''
# ArgParse definition rules
parser = argparse.ArgumentParser(description="Let's crawl a web")
parser.add_argument('url', nargs=1, help='target URL')
parser.add_argument('-n', '--number-of-levels', type = int, \
default = 1, help = 'how depth the crawl will go.')
# Create argument object
args = parser.parse_args()
target_url = args.url.pop()
depth = args.number_of_levels
# Starting level to retrieve links
level = 1
links = {}
http_links_collector = HttpLinksCollector(target_url)
links_list = http_links_collector.\
retrieve_links(target_url, depth, level)
links[target_url] = links_list
links_result = json.dumps(links, sort_keys=True, indent=4)
# Print result in json view mode.
self.logger.info(links_result)
def setup_log(self):
'''Setup Python logging.
'''
self.logger = logging.getLogger('ricardo-crawler')
self.hdlr = logging.FileHandler('/var/tmp/crawler.log')
self.formatter = logging.Formatter('%(asctime)s %(levelname)s \
%(filename)s %(message)s')
self.hdlr.setFormatter(self.formatter)
self.logger.addHandler(self.hdlr)
self.logger.setLevel(logging.INFO)
def main():
'''Main method to initialize project.
'''
RicardoCrawler()
if __name__ == '__main__':
main() | /ricardo_crawler-01.00.01.tar.gz/ricardo_crawler-01.00.01/ricardo_crawler.py | 0.550124 | 0.170266 | ricardo_crawler.py | pypi |
(quickstart)=
# Quick Start
```
%config InlineBackend.figure_format = "retina"
from matplotlib import rcParams
rcParams["savefig.dpi"] = 100
rcParams["figure.dpi"] = 100
rcParams["font.size"] = 12
rcParams["legend.frameon"] = False
rcParams["lines.markersize"] = 10
import warnings
warnings.filterwarnings('ignore')
```
As a quick example, we show how to use riccati module to solve the Airy equation,
$$ u''(t) + tu(t) = 0, $$
on the interval $t \in [1, 50]$, subject to the initial conditions $u(1) = 1$, $u'(1) = 0$. The equation takes the required form,
$$ u''(t) + 2\gamma(t)u'(t) + \omega^2(t)u(t) = 0, $$
with $\gamma(t) = 0$ and $\omega(t) = \sqrt{t}$.
First we import the necessary modules:
```
import numpy as np
import riccati
```
Then we need to define the problem by specifying the ODE coefficients, $\omega(t)$ and $\gamma(t)$ as callables, making sure that the result vectorises correctly:
```
w = lambda t: np.sqrt(t)
g = lambda t: np.zeros_like(t) # Make sure the result is vectorised!
```
We then define the ODE with `solversetup`. This is so that the same ODE can be solved repeatedly with different initial conditions or tolerance parameters, without overhead:
```
# Set up the solver
info = riccati.solversetup(w, g)
```
To fully nail down the solution, we need to define the integration range and initial conditions, which are the only strictly necessary parameters for the solver to run:
```
# Integration range
ti = 1e0
tf = 5e1
# Initial conditions
ui = 1.0
dui = 0.0
```
We'll now give some optional parameters: we set the (local) relative tolerance, `eps`, and we ask the solver to produce output at intermediate points (as opposed to points the solver would naturally step to; called dense output) for visualisation:
```
# Relative tolerance (optional)
eps = 1e-12
# Dense output (optional)
t_eval = np.linspace(ti, tf, 800)
```
We are now ready to solve! Some of the outputs are not important for this example, so we store them together in `misc`.
```
ts, ys, *misc, y_eval = riccati.solve(info, ti, tf, ui, dui, eps = eps, xeval = t_eval)
```
Finally, we plot the output:
```
from matplotlib import pyplot as plt
plt.figure()
plt.plot(t_eval, y_eval, label = "Dense output", color = 'k')
plt.plot(ts, ys, '.', label = "Internal step", color = 'C1')
plt.xlim(ti, tf)
plt.xlabel('$t$')
plt.ylabel('$u(t)$')
plt.legend()
plt.show()
```
| /riccati-1.1.1.tar.gz/riccati-1.1.1/docs/quickstart.ipynb | 0.640748 | 0.916484 | quickstart.ipynb | pypi |
import numpy as np
import riccati
import scipy.special as sp
from scipy.integrate import solve_ivp
import matplotlib
from matplotlib import pyplot as plt
import math
import os
from pathlib import Path
import time
import pyoscode
from matplotlib.legend_handler import HandlerTuple
import pandas
import subprocess
from matplotlib.ticker import LogLocator
class HandlerTupleVertical(HandlerTuple):
def __init__(self, **kwargs):
HandlerTuple.__init__(self, **kwargs)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
# How many lines are there.
numlines = len(orig_handle)
handler_map = legend.get_legend_handler_map()
# divide the vertical space where the lines will go
# into equal parts based on the number of lines
height_y = (height / numlines)
leglines = []
for i, handle in enumerate(orig_handle):
handler = legend.get_legend_handler(handler_map, handle)
legline = handler.create_artists(legend, handle,
xdescent,
(2*i + 1)*height_y,
width,
2*height,
fontsize, trans)
leglines.extend(legline)
return leglines
def Bremer237(l, n, eps, epsh, outdir, rdc = True, wkbmarching = False,\
kummer = False, oscode = False, rk = False):
"""
Solves problem (237) from Bremer's "On the numerical solution of second
order ordinary differential equations in the high-frequency regime" paper.
"""
def w(x):
return l*np.sqrt(1 - x**2*np.cos(3*x))
def g(x):
return np.zeros_like(x)
# For the reference solution
def f(t, y):
yp = np.zeros_like(y)
yp[0] = y[1]
yp[1] = -l**2*(1 - t**2*np.cos(3*t))*y[0]
return yp
xi = -1.0
xf = 1.0
eps = eps
yi = 0.0
dyi = l
yi_vec = np.array([yi, dyi])
# Utility function for rounding to n significant digits
round_to_n = lambda n, x: x if x == 0 else round(x, - int(math.floor(math.log10(abs(x)))) + (n-1))
# Reference solution and its reported error
reftable = "./data/eq237.txt"
refarray = np.genfromtxt(reftable, delimiter=',')
ls = refarray[:,0]
ytrue = refarray[abs(ls -l) < 1e-8, 1]
errref = refarray[abs(ls -l) < 1e-8, 2]
if rdc:
print("riccati")
N = 1000 # Number of repetitions for timing
epsh = epsh
n = n
p = n
start = time.time_ns()
info = riccati.solversetup(w, g, n = n, p = p)
for i in range(N):
xs, ys, dys, ss, ps, stypes = riccati.solve(info, xi, xf, yi, dyi, eps = eps, epsh = epsh, hard_stop = True)
end = time.time_ns()
ys = np.array(ys)
# Compute statistics
runtime = (end - start)*1e-9/N
yerr = np.abs((ytrue - ys[-1])/ytrue)
# Write to txt file
# Create dir
outputf = outdir + "bremer237-rdc.txt"
outputpath = Path(outputf)
outputpath.touch(exist_ok = True)
lines = ""
if os.stat(outputf).st_size != 0:
with open(outputf, 'r') as f:
lines = f.readlines()
with open(outputf, 'w') as f:
if lines == "":
f.write("# method, l, eps, relerr, tsolve, errlessref, params\n")
for line in lines:
f.write(line)
f.write("{}, {}, {}, {}, {}, {}, {}".format("rdc",\
l, eps, round_to_n(3, max(yerr)), round_to_n(3, runtime),\
(yerr < errref)[0], "(n = {}; p = {}; epsh = {})".format(n, p, epsh)))
f.write("\n")
if oscode:
print("oscode")
if eps < 1e-8 and l < 1e4:
N = 10
else:
N = 100
# Time this process
start = time.time_ns()
for i in range(N):
solution = pyoscode.solve_fn(w, g, xi, xf, yi, dyi, rtol = eps)
end = time.time_ns()
ys = solution['sol']
ys = np.array(ys)
yerr = np.abs((ytrue - ys[-1])/ytrue)
runtime = (end - start)*1e-9/N
# Write to txt file
outputf = outdir + "bremer237-oscode.txt"
outputpath = Path(outputf)
outputpath.touch(exist_ok = True)
lines = ""
if os.stat(outputf).st_size != 0:
with open(outputf, 'r') as f:
lines = f.readlines()
with open(outputf, 'w') as f:
if lines == "":
f.write("# method, l, eps, relerr, tsolve, errlessref, params\n")
for line in lines:
f.write(line)
f.write("{}, {}, {}, {}, {}, {}, {}".format("oscode",\
l, eps, round_to_n(3, max(yerr)), round_to_n(3, runtime),\
(yerr < errref)[0], "(nrk = default; nwkb = default)"))
f.write("\n")
if kummer:
print("Kummer phase function method")
kummerscript = "test_eq237"
# Compile and run fortran code with command-line arguments l and eps
os.chdir("./ext-codes/Phase-functions")
subprocess.run(["make", "clean"])
subprocess.run(["make", kummerscript])
kummeroutput = subprocess.run(["./{}".format(kummerscript), str(l), str(eps)], capture_output = True)
kummerstdout = kummeroutput.stdout.decode('utf-8')
print(kummerstdout)
y, runtime, n_fevals = [float(i) for i in kummerstdout.split()]
yerr = np.abs((ytrue - y)/ytrue)
# Write to txt file
os.chdir("../../")
outputf = outdir + "bremer237-kummer.txt"
outputpath = Path(outputf)
outputpath.touch(exist_ok = True)
lines = ""
if os.stat(outputf).st_size != 0:
with open(outputf, 'r') as f:
lines = f.readlines()
with open(outputf, 'w') as f:
if lines == "":
f.write("# method, l, eps, relerr, tsolve, errlessref, params\n")
for line in lines:
f.write(line)
f.write("{}, {}, {}, {}, {}, {}, {}".format("kummer",\
l, eps, round_to_n(3, max(yerr)), round_to_n(3, runtime),\
(yerr < errref)[0], "()"))
f.write("\n")
if wkbmarching:
print("WKB marching method")
if eps < 1e-8 and l < 1e2:
N = 1000
elif eps < 1e-8 and l < 1e4:
N = 100
elif l < 1e2:
N = 100
else:
N = 100000
print("N:", N)
# Write to txt file
# Create dir
matlabscript = "bremer237"
outputf = "\\\"" + outdir + "bremer237-wkbmarching.txt\\\""
outputpath = Path(outputf)
os.chdir("./ext-codes/adaptive-WKB-marching-method")
# Run matlab script (will write file)
os.system("matlab -batch \"global aeval; {}({}, {}, {}, {}); exit\" ".format(matlabscript, l, eps, N, outputf))
os.chdir("../../")
if rk:
print("Runge--Kutta")
# We're only running this once because it's slow
atol = 1e-14
method = "DOP853"
f = lambda t, y: np.array([y[1], -l**2*(1 - t**2*np.cos(3*t))*y[0]])
time0 = time.time_ns()
sol = solve_ivp(f, [-1, 1], [0, l], method = method, rtol = eps, atol = atol)
time1 = time.time_ns()
runtime = (time1 - time0)*1e-9
err = np.abs((sol.y[0,-1]- ytrue)/ytrue)[0]
# Write to txt file
outputf = outdir + "bremer237-rk.txt"
outputpath = Path(outputf)
outputpath.touch(exist_ok = True)
lines = ""
if os.stat(outputf).st_size != 0:
with open(outputf, 'r') as f:
lines = f.readlines()
with open(outputf, 'w') as f:
if lines == "":
f.write("# method, l, eps, relerr, tsolve, errlessref, params\n")
for line in lines:
f.write(line)
f.write("{}, {}, {}, {}, {}, {}, {}".format("rk",\
l, eps, round_to_n(3, err), round_to_n(3, runtime),\
(err < errref)[0], "(atol = {}; method = {})".format(atol, method)))
f.write("\n")
def joss_fig(outdir):
# Example solution
def f(t, y):
yp = np.zeros_like(y)
yp[0] = y[1]
yp[1] = -l**2*(1 - t**2*np.cos(3*t))*y[0]
return yp
eps = 1e-4
atol = 1e-14
method = "DOP853"
l = 1e2
f = lambda t, y: np.array([y[1], -l**2*(1 - t**2*np.cos(3*t))*y[0]])
t_eval = np.linspace(-1, 1, 5000)
sol = solve_ivp(f, [-1, 1], [0, l], method = method, rtol = eps, atol = atol, t_eval = t_eval)
# Helper function
round_to_n = lambda n, x: x if x == 0 else round(x, - int(math.floor(math.log10(abs(x)))) + (n-1))
# Read in little tables and combine into one pandas dataframe
outputfs = [outdir + "bremer237-{0}.txt".format(method) for method in ["rk", "rdc", "kummer", "oscode", "wkbmarching"]]
dfs = []
for outputf in outputfs:
df = pandas.read_csv(outputf, sep = ', ')#, index_col = None)
dfs.append(df)
data = pandas.concat(dfs, axis = 0)#, ignore_index = True)
print(data)
print(data.columns)
solvernames = data['# method']
epss = data['eps']
oscodes = data.loc[solvernames == 'oscode']
rks = data.loc[solvernames == 'rk']
wkbs = data.loc[solvernames == 'wkbmarching']
rdcs = data.loc[solvernames == 'rdc']
kummers = data.loc[solvernames == 'kummer']
allosc = oscodes.loc[oscodes['eps'] == 1e-12]
allrk = rks.loc[rks['eps'] == 1e-12]
allricc = rdcs.loc[rdcs['eps'] == 1e-12]
allarn = wkbs.loc[wkbs['eps'] == 1e-12]
allkummer = kummers.loc[kummers['eps'] == 1e-12]
losc = allosc['l']
lrk = allrk['l']
lricc = allricc['l']
larn = allarn['l']
lkum = allkummer['l']
tosc = allosc['tsolve']
trk = allrk['tsolve']
tricc = allricc['tsolve']
tkum = allkummer['tsolve']
tarn = allarn['tsolve']
eosc = allosc['relerr']
erk = allrk['relerr']
ericc = allricc['relerr']
earn = allarn['relerr']
ekum = allkummer['relerr']
allosc2 = oscodes.loc[oscodes['eps'] == 1e-6]
allrk2 = rks.loc[rks['eps'] == 1e-6]
allricc2 = rdcs.loc[rdcs['eps'] == 1e-6]
allarn2 = wkbs.loc[wkbs['eps'] == 1e-6]
allkummer2 = kummers.loc[kummers['eps'] == 1e-6]
losc2 = allosc2['l']
lrk2 = allrk2['l']
lricc2 = allricc2['l']
larn2 = allarn2['l']
lkum2 = allkummer2['l']
tosc2 = allosc2['tsolve']
trk2 = allrk2['tsolve']
tricc2 = allricc2['tsolve']
tarn2 = allarn2['tsolve']
tkum2 = allkummer2['tsolve']
eosc2 = allosc2['relerr']
erk2 = allrk2['relerr']
ericc2 = allricc2['relerr']
earn2 = allarn2['relerr']
ekum2 = allkummer2['relerr']
# Bremer 'exclusion zone'
ebrem = np.array([7e-14, 5e-13, 3e-12, 5e-11, 3e-10, 5e-9, 4e-8])
# Colourmap
tab20c = matplotlib.cm.get_cmap('tab20c').colors
tab20b = matplotlib.cm.get_cmap('tab20b').colors
plt.style.use('riccatipaper')
fig, (ax1, ax0) = plt.subplots(1, 2, figsize = (6, 3))
l1, = ax0.loglog(lrk, trk, '.-', c = tab20c[0*4 + 0])
l2, = ax0.loglog(losc, tosc, 'o-', c = tab20c[1*4 + 0])
l3, = ax0.loglog(larn, tarn, '^-', c = tab20c[2*4 + 0])
l4, = ax0.loglog(lkum, tkum, 'x-', c = tab20c[3*4 + 0])
l5, = ax0.loglog(lricc, tricc, 'v-', c = tab20b[2*4 + 0])
l6, = ax0.loglog(lrk2, trk2, marker = '.', ls = '--', c = tab20c[0*4 + 1])
l7, = ax0.loglog(losc2, tosc2, marker = 'o', ls = '--', c = tab20c[1*4 + 1])
l8, = ax0.loglog(larn2, tarn2, marker = '^', ls = '--', c = tab20c[2*4 + 1])
l9, = ax0.loglog(lkum2, tkum2, marker = 'x', ls = '--', c = tab20c[3*4 + 1])
l10, = ax0.loglog(lricc2, tricc2, marker = 'v', ls = '--', c = tab20b[2*4 + 1])
# Invisible lines
l11, = ax0.loglog(lricc, tricc*1e-5, c = tab20c[0*4 + 0])
l12, = ax0.loglog(lricc, tricc*1e-5, c = tab20c[1*4 + 0])
l13, = ax0.loglog(lricc, tricc*1e-5, c = tab20c[2*4 + 0])
l14, = ax0.loglog(lricc, tricc*1e-5, c = tab20c[3*4 + 0])
l15, = ax0.loglog(lricc, tricc*1e-5, c = tab20b[2*4 + 0])
l16, = ax0.loglog(lricc, tricc*1e-5, '--', c = tab20c[0*4 + 1])
l17, = ax0.loglog(lricc, tricc*1e-5, '--', c = tab20c[1*4 + 1])
l18, = ax0.loglog(lricc, tricc*1e-5, '--', c = tab20c[2*4 + 1])
l19, = ax0.loglog(lricc, tricc*1e-5, '--', c = tab20c[3*4 + 1])
l20, = ax0.loglog(lricc, tricc*1e-5, '--', c = tab20b[2*4 + 1])
l = ax0.legend([(l1, l6), (l2, l7), (l3, l8), (l4, l9), (l5, l10), (l11, l12, l13, l14, l15), (l16, l17, l18, l19, l20)], ['DOP853 (SciPy)', '\\texttt{oscode}', 'WKB marching', "Kummer's phase function", 'ARDC (\\texttt{riccati})', '$\\varepsilon = 10^{-12}$', '$\\varepsilon = 10^{-6}$'], handler_map = {tuple: HandlerTupleVertical()})
ax0.set_ylabel('runtime/s, $t_{\mathrm{solve}}$')
ax0.set_xlim((1e1, 1e7))
ax0.set_ylim((2e-4, 2e5))
ax0.set_xlabel('$\lambda$')
ax0.yaxis.set_minor_locator(LogLocator(numticks=15, subs=np.arange(-4, 6)))
ax1.plot(t_eval, sol.y[0,:], color='black', lw = 0.5)
ax1.set_xlabel('$t$')
ax1.set_ylabel('$u(t)$')
ax1.set_xlim((-1, 1))
plt.savefig('./timing-fig-2.pdf')
outdir = os.getcwd() + "/data/"
epss, epshs, ns = [1e-12, 1e-6], [1e-13, 1e-9], [35, 20]
for m in np.logspace(1, 7, num = 7):
print("Testing solver on Bremer 2018 Eq. (237) with lambda = {}".format(m))
for eps, epsh, n in zip(epss, epshs, ns):
if m < 1e7:
Bremer237(m, n, eps, epsh, outdir, rk = True, rdc = True,\
oscode = True, wkbmarching = True, kummer = True)
else:
Bremer237(m, n, eps, epsh, outdir, rk = False, rdc = True,\
oscode = True, wkbmarching = True, kummer = True)
joss_fig(outdir) | /riccati-1.1.1.tar.gz/riccati-1.1.1/benchmarks/fig1.py | 0.503174 | 0.320077 | fig1.py | pypi |
PDF Utils
=========
The module `ricecooker.utils.pdf` contains helper functions for manipulating PDFs.
PDF splitter
------------
When importing source PDFs like books that are very long documents (100+ pages),
it is better for the Kolibri user experience to split them into multiple shorter PDF
content nodes.
The `PDFParser` class in `ricecooker.utils.pdf` is a wrapper around the `PyPDF2`
library that allows us to split long PDF documents into individual chapters,
based on either the information available in the PDF's table of contents, or user-defined page ranges.
### Split into chapters
Here is how to split a PDF document located at `pdf_path`, which can be either
a local path or a URL:
from ricecooker.utils.pdf import PDFParser
pdf_path = '/some/local/doc.pdf' or 'https://somesite.org/some/remote/doc.pdf'
with PDFParser(pdf_path) as pdfparser:
chapters = pdfparser.split_chapters()
The output `chapters` is list of dictionaries with `title` and `path` attributes:
[
{'title':'First chapter', 'path':'downloads/doc/First-chapter.pdf'},
{'title':'Second chapter', 'path':'downloads/doc/Second-chapter.pdf'},
...
]
Use this information to create an individual `DocumentNode` for each PDF and store
them in a `TopicNode` that corresponds to the book:
from ricecooker.classes import nodes, files
book_node = nodes.TopicNode(title='Book title', description='Book description')
for chapter in chapters:
chapter_node = nodes.DocumentNode(
title=chapter['title'],
files=[files.DocumentFile(chapter['path'])],
...
)
book_node.add_child(chapter_node)
By default, the split PDFs are saved in the directory `./downloads`. You can customize
where the files are saved by passing the optional argument `directory` when initializing
the `PDFParser` class, e.g., `PDFParser(pdf_path, directory='somedircustomdir')`.
The `split_chapters` method uses the internal `get_toc` method to obtain a list
of page ranges for each chapter. Use `pdfparser.get_toc()` to inspect the PDF's
table of contents. The table of contents data returned by the `get_toc` method
has the following format:
[
{'title': 'First chapter', 'page_start': 0, 'page_end': 10},
{'title': 'Second chapter', 'page_start': 10, 'page_end': 20},
...
]
If the page ranges automatically detected form the PDF's table of contents are
not suitable for the document you're processing, or if the PDF document does not
contain table of contents information, you can manually create the title and
page range data and pass it as the `jsondata` argument to the `split_chapters()`.
page_ranges = pdfparser.get_toc()
# possibly modify/customize page_ranges, or load from a manually created file
chapters = pdfparser.split_chapters(jsondata=page_ranges)
### Split into chapters and subchapters
By default the `get_toc` will detect only the top-level document structure,
which might not be sufficient to split the document into useful chunks.
You can pass the `subchapters=True` optional argument to the `get_toc()` method
to obtain a two-level hierarchy of chapters and subchapter from the PDF's TOC.
For example, if the table of contents of textbook PDF has the following structure:
Intro
Part I
Subchapter 1
Subchapter 2
Part II
Subchapter 21
Subchapter 22
Conclusion
then calling `pdfparser.get_toc(subchapters=True)` will return the following
chapter-subchapter tree structure:
[
{ 'title': 'Part I', 'page_start': 0, 'page_end': 10,
'children': [
{'title': 'Subchapter 1', 'page_start': 0, 'page_end': 5},
{'title': 'Subchapter 2', 'page_start': 5, 'page_end': 10}
]},
{ 'title': 'Part II', 'page_start': 10, 'page_end': 20,
'children': [
{'title': 'Subchapter 21', 'page_start': 10, 'page_end': 15},
{'title': 'Subchapter 22', 'page_start': 15, 'page_end': 20}
]},
{ 'title': 'Conclusion', 'page_start': 20, 'page_end': 25 }
]
Use the `split_subchapters` method to process this tree structure and obtain the
tree of title and paths:
[
{ 'title': 'Part I',
'children': [
{'title': 'Subchapter 1', 'path': '/tmp/0-0-Subchapter-1.pdf'},
{'title': 'Subchapter 2', 'path': '/tmp/0-1-Subchapter-2.pdf'},
]},
{ 'title': 'Part II',
'children': [
{'title': 'Subchapter 21', 'path': '/tmp/1-0-Subchapter-21.pdf'},
{'title': 'Subchapter 22', 'path': '/tmp/1-1-Subchapter-22.pdf'},
]},
{ 'title': 'Conclusion', 'path': '/tmp/2-Conclusion.pdf'}
]
You'll need to create a `TopicNode` for each chapter that has `children` and
create a `DocumentNode` for each of the children of that chapter.
Accessibility notes
-------------------
Do not use `PDFParser` for tagged PDFs because splitting and processing loses
the accessibility features of the original PDF document.
| /ricecooker-0.7.3.tar.gz/ricecooker-0.7.3/docs/pdfutils.md | 0.834744 | 0.710616 | pdfutils.md | pypi |
Downloading web content
=======================
### The ArchiveDownloader class
**New in 0.7**
#### Overview
The `ArchiveDownloader` class encapsulates the functionality of
downloading URLs, their related resources, and rewriting page links
to point to their downloaded location.
All the downloaded content becomes part of an archive, which can be
saved and reused on future runs of the script. After the download completes,
the script could even be run offline.
This enables many things, including the ability to update and re-run scripts even
if the original content was removed, easily and automatically create a dependency
zip of content shared by various pages in the archive, and create HTML5 zips for
pages that correctly include all necessary resources without extra code.
#### Using ArchiveDownloader
`ArchiveDownloader` has the following general workflow:
- Create an `ArchiveDownloader` instance before downloading any web content.
- Call `get_page` on that instance passing in full URLs to pages you wish to download.
- Once content has been downloaded, you need to create an HTML5 zip of the content.
* If the content does not need to be modified, call `export_page_as_zip` and use the
zip created as a file for an HTML5 app node.
* If you need to make modifications, call `create_zip_dir_for_page`, then modify
the files in the directory it returns as needed. (Not modifying the original
sources allows you to keep a clean copy at all times.) Finally, create a ZIP by
calling `ricecooker.utils.create_predictable_zip` and use the zip created
as a file for an HTML5 app node.
Usage example:
```python
from ricecooker.utils.downloader import ArchiveDownloader
sushi_url = 'https://en.wikipedia.org/wiki/Sushi'
archive = ArchiveDownloader("downloads/archive_1")
# Download and store page in the archive
archive.get_page(sushi_url)
# Convert page into a Kolibri-friendly HTML5 zip file
zip_file = archive.export_page_as_zip(sushi_url)
# ... code to add zip_file to an HTML5AppNode ...
```
Example scripts:
The [COVID 19 Simulations sushi chef](https://github.com/learningequality/sushi-chef-covid19-sim/blob/master/sushichef.py)
provides a relatively small and simple example of how to use `ArchiveDownloader`.
#### Using `get_page`
By default, `get_page` will download the page and all its related resources, including
CSS, image, and JS files. You can also pass a few optional arguments to modify its
behavior:
`refresh` [True | False]:
If `True`, this will re-download the page, even if it is already in the archive.
`run_js` [True | False]:
If `True`, the page will be loaded using `pyppeteer` and wait until page load handlers
have run before downloading the content. If `False` (default), it will use `requests`
to download the content.
`link_policy` [Dict or None]:
Defines how to handle scraping of page links. The default, `None`, indicates no scraping.
If a dictionary is passed, it may contain the following keys:
* `levels` [Integer]: (Required) Number of levels deep to scrape links.
* `scope` [String]: (Optional) Defaults to "internal", which only scrapes links on the
same domain. Change to "all" to scrape links from external domains as well.
* `whitelist` [List]: (Optional) A list of strings containing URLs to be whitelisted.
They will be compared against complete URLs, so the strings can be as complete as desired.
e.g. `www.mydomain.com/subdir/subdir2/` can be used to match against only URLs in that
particular subdir.
* `blacklist` [List]: (Optional) A list of strings containing URLs to be blacklisted.
URLs can be specified using the same rules as the `whitelist` argument.
### downloader.py Functions
The Ricecooker module `utils/downloader.py` provides a `read` function that can
be used to read the file contents from both urls and local file paths.
Usage examples:
```python
from ricecooker.utils.downloader import read
local_file_content = read('/path/to/local/file.pdf') # Load local file
web_content = read('https://example.com/page') # Load web page contents
web_content2 = read('https://example.com/loadpage', loadjs=True) # Load js before getting contents
```
The `loadjs` option will run the JavaScript code on the webpage before reading
the contents of the page, which can be useful for scraping certain websites that
depend on JavaScript to build the page DOM tree.
If you need to use a custom session, you can also use the `session` option.
This can be useful for sites that require login.
See the [sushi-chef-firki code](https://github.com/learningequality/sushi-chef-firki/blob/master/client.py#L20-L31)
for an example of this.
Caching
-------
Requests made with the `read` method are cached by default, and the cache doesn't
have an expiration date. The cached files are stored the folder `.webcache` in
the chef repository. You must manually delete this folder when the source website changes.
rm -rf .webcache
This [sample code](https://github.com/learningequality/sushi-chef-pradigi/blob/master/sushichef.py#L64-L70)
shows how to setup requests session caching that expires after one day.
Further reading
---------------
- Tutorial on the Python [requests module](https://stackabuse.com/the-python-requests-module/).
| /ricecooker-0.7.3.tar.gz/ricecooker-0.7.3/docs/downloader.md | 0.454472 | 0.761095 | downloader.md | pypi |
Video compression tools
=======================
Importing video files into Kolibri requires special considerations about the file
size of the video resources that will be imported.
Below are some general guidelines for importing video files:
- Use the `.mp4` file format
- Use the `H.264` (a.k.a. `x264`) video codec to ensure video will play in web browsers
- Use the `aac` audio codec
- Use compression
- Short videos (5-10 mins long) should be roughly less than 15MB
- Longer video lectures (1 hour long) should not be larger than 200MB
- High-resolution videos should be converted to lower resolution formats:
Here are some recommended choices for video vertical resolution:
- Use max height of `480` for videos that work well in low resolution (most videos)
- Use max height of `720` for high resolution videos (lectures with writing on board)
Using video compression and low resolutions is important for the context of use.
**Think of the learners and the device they will be using to view the videos.**
Consider also the overall size of the channel---**how much storage space** will be
required for the entire collection of videos?
Let's now look at compression tools that you can use to ensure a good video
experience for all Kolibri users, regardless of their device.
Automated conversion
--------------------
The `ricecooker` library can handle the video compression for you if you specify
the `--compress` command line argument to the chef script, e.g. `python chef.py ... --compress`.
Under the hood, the `ffmpeg` video conversion program will be called to compress
video files before uploading them to Kolibri Studio. Specifying `--compress` on
the command line will use the following default settings:
ffmpeg -i inputfile.mp4 \
-b:a 32k -ac 1 \
-vf scale="'w=-2:h=trunc(min(ih,480)/2)*2'" \
-crf 32 \
-profile:v baseline -level 3.0 -preset slow -v error -strict -2 -stats \
-y outputfile.mp4
This command takes the `inputfile.mp4` and outputs the file `outputfile.mp4` that
has the following transformations applied to it:
- Limits the audio codec to 32k/sec
- Scale the video to max-height of 480 pixels
- Compress the video with CRF of 32 (constant rate factor)
To overwrite these defaults, chef authors can pass the argument `ffmpeg_settings` (dict),
when creating `VideoFile` object, and specify these options: `crf`, `max_height`, and `max_width`.
Manual conversion
-----------------
For optimal control of the compression options, users should perform the conversion
and compression steps before uploading their videos to Kolibri Studio.
We highly recommend the command line tool [`ffmpeg`](https://www.ffmpeg.org/).
You'll need to use it through the command prompt (Terminal in linux, CMD in Windows).
Any video conversion and compression operation can be performed by setting the
appropriate parameters.
### Installing ffmpeg
Before proceeding, please go and download the `ffmpeg` program for you OS:
Links:
- Homepage: https://www.ffmpeg.org/
- Downloads for windows users: https://web.archive.org/web/20200918193047/https://ffmpeg.zeranoe.com/builds/
Choose 64bit "static" version to download, unzip the archive, then go to the folder
called `bin/` inside the zip file. Copy the files `ffmpeg.exe` and `ffprobe.exe`
to the folder on your computer where your videos are stored.
To check the installation was successful you can open a command line prompt
([cmd.exe on Windows](https://www.howtogeek.com/wp-content/uploads/2017/02/Windows_106-650x300.jpg),
or terminal on mac/linux), and try typing in the command:
ffmpeg -h
which will print command help information. You can see the full list command line
options for `ffmpeg` here: [https://www.ffmpeg.org/ffmpeg.html](https://www.ffmpeg.org/ffmpeg.html).
Don't worry you won't need to use all of them.
If you see the error message "ffmpeg is not recognized as an internal or external command,
operable program or batch file," you will have to change directory to the folder where you
saved the program files `ffmpeg.exe` and `ffprobe.exe` (e.g. use `cd Desktop` if saved
on the desktop or `cd %HOMEPATH%\Documents` to go to your Documents folder).
### Looking around with ffprobe
Equally useful is the command `ffprobe` which prints detailed information for
any video files. To illustrate the usefulness, let's see what info `ffprobe`
can tells us about some video files downloaded from the internet. You can download
the same files from [here](https://archive.org/details/CM_National_Rice_Cooker_1982)
if you want to follow along (download the three different video formats available
in the sidebar: `ogv`, `mpg`, and `mp4`)
To check what's in the file `CM_National_Rice_Cooker_1982.ogv` use the command:
ffprobe CM_National_Rice_Cooker_1982.ogv
Input #0, ogg, from 'CM_National_Rice_Cooker_1982.ogv':
Duration: 00:00:15.03, start: 0.000000, bitrate: 615 kb/s
Stream #0:0: Video: theora, yuv420p,
400x300 [SAR 1:1 DAR 4:3], 29.97 fps, 29.97 tbr, 29.97 tbn, 29.97 tbc
Stream #0:1: Audio: vorbis, 44100 Hz, stereo, fltp, 128 kb/s
The video codec is `theora` and the audio codec is `vorbis`, so this video will
need to be converted before uploading to Studio.
Similarly we can check the codecs for `CM_National_Rice_Cooker_1982.mpg` using
ffprobe CM_National_Rice_Cooker_1982.mpg
Input #0, mpeg, from 'CM_National_Rice_Cooker_1982.mpg':
Duration: 00:00:15.02, start: 0.233367, bitrate: 6308 kb/s
Stream #0:0[0x1e0]: Video: mpeg2video (Main), yuv420p(tv, smpte170m, top first),
720x480 [SAR 8:9 DAR 4:3], 29.97 fps, 29.97 tbr, 90k tbn, 59.94 tbc
Stream #0:1[0x1c0]: Audio: mp2, 48000 Hz, stereo, s16p, 224 kb/s
The video codec is `mpeg2video` and the audio codec is `mp2`, so this video too
will need to be converted.
Finally, to check the codecs for `CM_National_Rice_Cooker_1982.mp4`, we use
ffprobe CM_National_Rice_Cooker_1982.mp4
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'CM_National_Rice_Cooker_1982.mp4':
Duration: 00:00:15.05, start: -0.012585, bitrate: 835 kb/s
Stream #0:0(und): Video: h264 (Constrained Baseline) (avc1 / 0x31637661), yuv420p,
640x480 [SAR 1:1 DAR 4:3], 700 kb/s, 29.97 fps, 29.97 tbr, 30k tbn, 59.94 tbc (default)
Stream #0:1(und): Audio: aac (LC) (mp4a / 0x6134706D), 44100 Hz, stereo, fltp, 129 kb/s (default)
Here we see the `h264` video codec and `aac/mp4a` audio codec so this file can
be uploaded to Studio as is. These codecs are relatively well supported by
[most browsers](https://developer.mozilla.org/en-US/docs/Web/Media/Formats).
This video can be uploaded to Kolibri.
### Converting files using ffmpeg
Recall the file `CM_National_Rice_Cooker_1982.mpg` that we downloaded above,
which uses the Kolibri-incompatibe codecs `mpeg2video` and `mp2`.
Let's see how to use the `ffmpeg` command to convert it to the supported codecs:
ffmpeg -i CM_National_Rice_Cooker_1982.mpg \
-b:a 32k -ac 1 \
-vf scale="'w=-2:h=trunc(min(ih,480)/2)*2'" \
-crf 32 \
-profile:v baseline -level 3.0 -preset slow -v error -strict -2 -stats \
-y compressed.mp4
Note the `\` character denotes line-continuation and works only on UNIX.
Windows users should put the entire command on a single line:
ffmpeg -i CM_National_Rice_Cooker_1982.mpg -b:a 32k -ac 1 -vf scale="'w=-2:h=trunc(min(ih,480)/2)*2'" -crf 32 -profile:v baseline -level 3.0 -preset slow -v error -strict -2 -stats -y compressed.mp4
This command will run for some time (video transcoding takes a lot of CPU power).
In the end, if you check using `ffprobe compressed.mp4`, you'll see that the
converted output file has video codec `h264` and audio codec `aac`.
The resolution `720x480` and bitrate `534 kb/s` are also very good parameters.
Note the file size of `compressed.mp4` is 1MB which is twice smaller than the
file `mp4` file which we obtained directly form the web `CM_National_Rice_Cooker_1982.mp4`.
Clearly the compression option `-crf 32` had an effect.
The video `compressed.mp4` is now ready for upload to Studio!
### Using the ffmpeg helper scripts
We provide a helper script to help run the ffmpeg command. The instructions are different
depending if your operating systems is Windows or Mac/Linux:
- For Windows users, download the file [convertvideo.bat](https://raw.githubusercontent.com/learningequality/ricecooker/master/resources/scripts/convertvideo.bat)
and save it to your computer. Make sure the extension is `.bat` (Windows batch file).
Put the `convertvideo.bat` file in the same folder where you copied `ffmpeg.exe`.
To convert `inputfile.mp4` to `outputfile.mp4` using the conversion script, open a
command line prompt, navigate to the folder where `convertvideo.bat` and `ffmpeg.exe`
are stored, and type the following command:
convertvideo.bat inputfile.mp4 outputfile.mp4
- Linux and Mac users should download [convertvideo.sh](https://raw.githubusercontent.com/learningequality/ricecooker/master/resources/scripts/convertvideo.sh),
save it to the folder where all the videos are. Next open a command prompt and change
directory to that folder. Make the script executable using `chmod u+x convertvideo.sh`,
then you can start converting videos using:
./convertvideo.sh inputfile.mp4 outputfile.mp4
See [https://youtu.be/oKbCbuDlRmY](https://www.youtube.com/watch?v=oKbCbuDlRmY)
for a video walkthrough of the steps and example usage of the batch script.
<iframe width="560" height="315" src="https://www.youtube.com/embed/oKbCbuDlRmY" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
<div style="height:20px;"> </div>
The conversion scripts provided are just wrappers for the `ffmpeg` command, to make it
easier for you so you won't have to remember all the command line options. If you need
to adjust the conversion parameters, you edit the scripts---they are ordinary text files,
so you can edit them with notepad.
Note video conversion takes a long time, so be prepared to get a coffee or two.
### HandBrake
If you don't have many videos to convert, you can use [HandBrake](https://handbrake.fr/),
which is a video conversion tool with a graphical user interface. Handbrake uses
`ffmpeg` under the hood, so the same compression results can be achieved as with
the more technical options presented above.
Here are steps for converting videos using HandBrake:
1. **Download** and install handbrake from here [https://handbrake.fr/](https://handbrake.fr/)
2. **Open** the video file you want to compress.
3. From the presets menu, choose **Web > Gmail Medium 5 Minutes 480p30**
4. Set the output filename (e.g. you could use the same as input filename,
but append `_compressed.mp4`). Make sure to use the `.mp4` extension.
5. Click the **Start Encode** button.

Here is a [video guide to using HandBrake](https://www.youtube.com/watch?v=83MdDLaFXfs) for compressing videos.
<iframe width="560" height="315" src="https://www.youtube.com/embed/83MdDLaFXfs" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
<div style="height:20px;"> </div>
The **Web > Gmail Medium 5 Minutes 480p30** preset will use the `x264` video codec,
`aac` audio codec, and `480` vertical resolution, and compression rate `crf=23`.
The 480 vertical resolution is a good choice for most videos, but if you find the compressed output to be too low quality, you can try the preset
**Web > Gmail Large 3 Minutes 720p30**, which will result in larger videos files
with 720 vertical resolution.
If your channel contains many videos, or very long videos, you should consider
increasing the "Constant Rate Factor" compression parameter in the Video settings.
Using the value [RF=32](https://github.com/learningequality/ricecooker/blob/master/docs/figures/HandBrake/handbreake_screenshot_video_settings.png)
will result in highly compressed videos, with very small file sizes.
### Experimenting
Since every content source is unique, we recommend that you experiment with
different compression options. The command line tool `ffmpeg` offers a very
useful option called `crf` which stands for Constant Rate Factor.
**Setting this single parameter allows for controlling overall video quality.**
For example, setting `crf=24` produces high quality video (and possibly large file size),
`crf=28` is a mid-range quality, and values of `crf` above 30 produce highly-compressed
videos with small size.
Here are the steps to preview different compression factors in Kolibri:
- Choose a sample video from your collection, let's call it `video.mp4`
- Try different compression options for it:
- Create a CRF=24 version using `ffmpeg -i video.mp4 ... -crf 24 video_crf24.mp4`
- Create a CRF=28 version using `ffmpeg -i video.mp4 ... -crf 28 video_crf28.mp4`
- Create a CRF=30 version using `ffmpeg -i video.mp4 ... -crf 30 video_crf30.mp4`
- Upload the original and the compressed version to a Studio channel
- PUBLISH the channel and record the channel token
- Import the channel into a Kolibri instance using the channel token
- Test video playback on different devices (desktop and mobile browsers on all OSs)
| /ricecooker-0.7.3.tar.gz/ricecooker-0.7.3/docs/video_compression.md | 0.899776 | 0.732735 | video_compression.md | pypi |
CSV Exercises Workflow
======================
In addition to content nodes (files) and topics (folders), we can also specify exercises
using CSV metadata files (and associated images).
Exercises nodes store the usual metadata that all content nodes have (title,
description, author, license, etc.) and contain multiple types of questions.
The currently supported question types for the CSV workflow are:
- `input_question`: Numeric input question, e.g. What is 2+2?
- `single_selection`: Multiple choice questions where a single correct answer.
- `multiple_selection`: Multiple choice questions with multiple correct answers/
To prepare a CSV content channel with exercises, you need the usual things
(A channel directory `channeldir`, `Channel.csv`, and `Content.csv`) and two
additional metadata files `Exercises.csv` and `ExerciseQuestions.csv`, the format
of which is defined below.
You can download template HERE
https://github.com/learningequality/sample-channels/tree/master/channels/csv_exercises
Exercises.csv
-------------
A CSV file that contains the following fields:
- `Path *`:
- `Title *`:
- `Source ID *`: A unique identifier for this exercise, e.g., `exrc1`
- `Description`:
- `Author`:
- `Language`:
- `License ID *`:
- `License Description`:
- `Copyright Holder`:
- `Number Correct`: (integer, optional) This field controls how many questions
students must get correct in order to complete the exercise.
- `Out of Total`: (integer, optional) This field controls how many questions
students are presented in a row, if not specified the value will be determined
automatically based on the number of questions available (up to maximum of 5).
- `Randomize`: (bool) True or False
- `Thumbnail`:
ExerciseQuestions.csv
---------------------
Individual questions
- `Source ID *`: This field is the link (foreign key) to the an exercise node, e.g. `exrc1`
- `Question ID *`: A unique identifier for this question within the exercise, e.g. q1
- `Question type *`: (str) Question types are defined in
[le-utils](https://github.com/learningequality/le-utils/blob/master/le_utils/constants/exercises.py#L34).
The currently supported question types for the CSV workflow are:
- `input_question`: Numeric input question, e.g. What is 2+2?
- `single_selection`: Multiple choice questions where a single correct answer.
- `multiple_selection`: Multiple choice questions with multiple correct answers/
- `Question *`: (markdown) contains the question setup and the prompt, e.g. "What is 2+2?"
- `Option A`: (markdown) The first answer option
- `Option B`: (markdown)
- `Option C`: (markdown)
- `Option D`: (markdown)
- `Option E`: (markdown) The fifth answer option
- `Options F...`: Use this field for questions with more than five possible answers.
This field can contain a list of multiple "🍣"-separated string values,
e.g., "Answer F🍣Answer G🍣Answer H"
- `Correct Answer *`: The correct answer
- `Correct Answer 2`: Another correct
- `Correct Answer 3`: A third correct answer
- `Hint 1`: (markdown)
- `Hint 2`:
- `Hint 3`:
- `Hint 4`:
- `Hint 5`:
- `Hint 6+`: Use this field for questions with more than five hints.
This field stores a list of "🍣"-separated string values,
e.g., "Hint 6 text🍣Hint 7 text🍣Hing 8 text"
The question, options, answers, and hints support Markdown and LaTeX formatting:
- Use two newlines to start a new paragraph
- Use the syntax `` to include images in text field
- Use dollar signs as math delimiters `$\alpha\beta$`
#### Markdown image paths
Note that image paths used in Markdown will be interpreted as relative to the
location where the chef is running. For example, if the sushi chef project directory
looks like this:
csvchef.py
figures/
exercise3/
somefig.png
content/
Channel.csv
Content.csv
Exercises.csv
ExerciseQuestions.csv
channeldir/
somefile.mp4
anotherfile.pdf
Then the code for including `somefig.png` a Markdown field of an exercise question
is ``.
Ordering
--------
The order that content nodes appear in the channel is determined based on their
filenames in alphabetical order, so the choice of filenames can be used to enforce
a particular order of items within each folder.
The filename part of the `Path *` attribute of exercises specified in Exercises.csv
gives each exercise a "virtual filename" so that exercises will appear in the same
alphabetical order, intermixed with the CSV content items defined in `Content.csv`.
Implementation details
----------------------
- To add exercises to a certain channel topic, the folder corresponding to this
topic must exist inside the `channeldir` folder (even if it contains no files).
A corresponding entry must be added to `Content.csv` to describe the metadata
for the topic node containing the exercises.
| /ricecooker-0.7.3.tar.gz/ricecooker-0.7.3/docs/csv_metadata/csv_exercises.md | 0.860911 | 0.866698 | csv_exercises.md | pypi |
Debugging HTML5 app rendering in Kolibri
========================================
The problem
-----------
The edit-preview loop for HTML5App nodes is very time consuming since it requires
running the sushichef script, waiting for the channel to publish in Studio, then
going through the channel UPDATE steps in Kolibri before you can see the edits.
Local HTMLZip replacement hack
------------------------------
It is possible to have a quick edit-refresh-debug loop for HTML5Apps using a local
Kolibri instance by zipping and putting the work-in-progress `webroot/` content
into an existing zip file in the local `.kolibrihome/content/storage/` directory.
Under normal operations files in `content/storage/` are stored based on md5 hash
of their contents, but if you replace a file with a different contents, Kolibri
will still load it.
We provide the script [kolibripreview.py](https://github.com/learningequality/ricecooker/blob/master/ricecooker/utils/kolibripreview.py)
to help with this file-replacement process used for HTML5App debugging and dev.
Prerequisites
-------------
1. [Install](https://kolibri.readthedocs.io/en/latest/install/index.html) Kolibri on your machine.
2. Find the location of `KOLIBRI_HOME` directory for your Kolibri instance.
By default Kolibri will use the directory `.kolibri` in your User's home folder.
3. [Import](https://kolibri.readthedocs.io/en/latest/manage/resources.html#import-with-token)
the **HTML5App Dev Channel** using the token `bilol-vivol` into Kolibri.
Note you can use any channel that contains .zip files for this purpose, but
the code examples below are given based on this channel, which contains the
placeholder file `9cf3a3ab65e771abfebfc67c95a8ce2a.zip` which we'll be replacing.
After this step, you can check the file `$KOLIBRI_HOME/content/storage/9/c/9cf3a3ab65e771abfebfc67c95a8ce2a.zip`
exists on your computer and view it at
[http://localhost:8080/en/learn/#/topics/c/60fe072490394595a9d77d054f7e3b52](http://localhost:8080/learn/#/topics/c/60fe072490394595a9d77d054f7e3b52)
4. Download the helper script `kolibripreview.py` and make it executable:
```bash
wget https://raw.githubusercontent.com/learningequality/ricecooker/master/ricecooker/utils/kolibripreview.py
chmod +x kolibripreview.py
```
Usage
-----
Assuming you have prepared work-in-progress draft directory `webroot`, you can
load int into Kolibri by running:
```bash
./kolibripreview.py --srcdir webroot --destzip ~/.kolibri/content/storage/9/c/9cf3a3ab65e771abfebfc67c95a8ce2a.zip
```
The script will check that the file `webroot/index.html` exists then create a zip
file from the `webroot` directory and replace the placeholder .zip file.
Opening and refreshing the page
[http://localhost:8080/en/learn/#/topics/c/60fe072490394595a9d77d054f7e3b52](http://localhost:8080/learn/#/topics/c/60fe072490394595a9d77d054f7e3b52)
will show you to the result of your work-in-progress `HTML5App`.
You'll need to re-run the script whenever you make changes to the `webroot` then
refresh the [Kolibri page](http://localhost:8080/en/learn/#/topics/c/60fe072490394595a9d77d054f7e3b52).
It's not quite webpack live dev server, but much faster than going through the
ricecooker uploadchannel > Studio PUBLISH > Kolibri UPDATE, Kolibri IMPORT steps.
Testing in different releases
-----------------------------
If you need to test your `HTML5App` works in a specific version of Kolibri, you
can quickly download the `.pex` file and run it as a "one off" test in temporary
`KOLIBRI_HOME` location (to avoid clobbering your main Kolibri install).
A `.pex` file is a self-contained Python EXecutable file that contains all libraries
and is easy to run without requiring setting up a virtual environment or installing
dependencies. You can download Kolibri `.pex` files from the [Kolibri releases page on github](https://github.com/learningequality/kolibri/releases).
The instructions below use the pex file `kolibri-0.13.2.pex` which is the latest
at the time of writing this, but you can easily adjust the commands to any version.
```bash
# Download the .pex file
wget https://github.com/learningequality/kolibri/releases/download/v0.13.2/kolibri-0.13.2.pex
# Create a temporary directory
mkdir -p ~/.kolibrihomes/kolibripreview
export KOLIBRI_HOME=~/.kolibrihomes/kolibripreview
# Setup Kolibri so you don't have to go through the setup wizard
python kolibri-0.13.2.pex manage provisiondevice \
--facility "$USER's Kolibri Facility" \
--preset informal \
--superusername devowner \
--superuserpassword admin123 \
--language_id en \
--verbosity 0 \
--noinput
# Import the HTML5App Dev Channel
python kolibri-0.13.2.pex manage importchannel network 0413dd5173014d33b5a98a8c00943724
python kolibri-0.13.2.pex manage importcontent network 0413dd5173014d33b5a98a8c00943724
# Start Kolibri (and leave it running)
python kolibri-0.13.2.pex start --foreground
```
After that you can use the script as usual:
1. Replace placeholder .zip with contents of `webroot`:
```bash
./kolibripreview.py --srcdir webroot --destzip=~/.kolibrihomes/kolibripreview/content/storage/9/c/9cf3a3ab65e771abfebfc67c95a8ce2a.zip
```
2. Open and refresh [http://localhost:8080/learn/#/topics/c/60fe072490394595a9d77d054f7e3b52](http://localhost:8080/learn/#/topics/c/60fe072490394595a9d77d054f7e3b52)
Further reading
---------------
See the docs page on [HTML Apps](../htmlapps.md) for info about technical details
and best practices for packaging web content for use in the Kolibri Learning Platform.
| /ricecooker-0.7.3.tar.gz/ricecooker-0.7.3/docs/developer/kolibripreview.md | 0.61855 | 0.658047 | kolibripreview.md | pypi |
Ricecooker content upload process
=================================
This page describes the "behind the scenes" operation of the `ricecooker` framework.
The goal is to give an overview of the processing steps that take place every
time you run a sushichef script. The goal of this page to help developers know
which parts of the code to look at when debugging ricecooker issues, adding
support for new content kinds and file types, or when implement performance optimizations.
Each section below describes one of the steps in this process.
Build tree
----------
The ricecooker tree consists of `Node` and `File` objects organized into a tree
data structure. The chef script must implement the `construct_channel` method,
which gets called by the ricecooker framework:
```python
channel = chef.construct_channel(**kwargs)
```
Validation logic
----------------
Every ricecooker `Node` has a `validate` method that performs basic checks to
make sure the node's metadata is set correctly and necessary files are provided.
Each `File` subclass comes turn has it's own validation logic to ensure the file
provided has the appropriate extension.
The tree validation logic is initiated [here](https://github.com/learningequality/ricecooker/blob/master/ricecooker/managers/tree.py#L19-L24) when the channel's `validate_tree` method is called.
Note: the files have not been processed at this point, so the node and file
`validate` methods cannot do "deep checks" on the file contents yet.
File processing
---------------
The next step of the ricecooker run occurs when we call the `process_files`
method on each node object. The file processing is initiated [here](https://github.com/learningequality/ricecooker/blob/master/ricecooker/managers/tree.py#L26-L48) and proceeds recursively through the tree.
### Node.process_files
Each `Node` subclass implements the `process_files` method which includes the
following steps:
- call `process_file` on all files associated with the node (described below)
- if the node has children, `process_files` is called on all child nodes
- call the node's `generate_thumbnail` method if it doesn't have a thumbnail
already, and the node has `derive_thumbnail` set to True, or if the global
command line argument `--thumbnail` (config.THUMBNAILS) is set to True.
See notes section "Node.generate_thumbnail".
The result of the `node.process_file()` is a list of processed filenames, that
reference files in the content-addressable storage directory `/content/storage/`.
The list of files names can contain `None` values, which indicate that some the
file processing for a certain files has failed. These None values are filtered
out [here](https://github.com/learningequality/ricecooker/blob/master/ricecooker/managers/tree.py#L35)
before the list is passed onto the file diff and file upload steps.
### File.process_file
Each `File` subclass implements the `process_file` method that takes care of:
- downloading the `path` (a web URL or a local filepath) and possibly,
possibly performing format conversions (e.g. for videos and subtitles)
- saves the file to the content-hash based filesystem in `/storage` and keeping
track of the file saved in `.ricecookerfilecache`
- optionally runs video compression on video file and records the output
compressed version in `/storage` and `.ricecookerfilecache`
### Node.generate_thumbnail
Content Node subclasses can implement a the `generate_thumbnail` method that can
be used to automatically generate a thumbnail based on the node content.
The `generate_thumbnail` will return a `Thumbnail` object if the thumbnail
generation worked and the thumbnail will be added to the Node during inside the
`Node.process_files` method.
The actual thumbnail generation happens using one of the `pressurcooker` helper
functions that currently support PDF, ePub, HTML5, mp3 files, and videos.
File diff
---------
Ricecooker then sends the list of filenames (using the content-hash based names)
to Studio to check which files are already present.
```python
get_file_diff(tree, files_to_diff)
tree.get_file_diff(files_to_diff)
config.SESSION.post(config.file_diff_url())
```
See [`managers/tree.py`](https://github.com/learningequality/ricecooker/blob/master/ricecooker/managers/tree.py)
for the code details. Any files that have been previously uploaded to Studio do
not need to be (re)uploaded, since Studio already has those files in storage.
Studio will reply with the "file difference" list of files that Studio does not have
and need to be uploaded, as described in the next section.
File upload
-----------
Guess what happens in this step?
```python
upload_files(tree, file_diff)
tree.upload_files(file_diff)
tree.reattempt_upload_fails()
```
At the end of this process all the files from the local `storage/` directory will
also exist in the Studio's storage directory. You can verify this by trying to
access one of the files at `https://studio.learningequality.org/content/storage/c/0/c0ntetha5h0fdaf1le0a0e.ext`
with `c0ntetha5h0fdaf1le0a0e.ext` replaced by one of the filenames you find in
your local `storage/` directory. Note path prefix `c/0/` is used for filenames
starting with `c0`.
See [managers/tree.py](https://github.com/learningequality/ricecooker/blob/master/ricecooker/managers/tree.py) for details.
Structure upload
----------------
The final step happens in the function `tree.upload_tree()`, which repeatedly
calls the `add_nodes` method to upload the json metadata to Kolibri Studio,
and finally calls the `commit_channel` to finalize the process.
At the end of this chef step the complete channel (files, tree structure, and metadata)
is now on Studio. By default, the content is uploaded to a `staging` tree of the
channel, which is something like a "draft version" of the channel that is hidden
from Studio channel viewers but visible to channel editors.
The purpose of the staging tree is to allow channel editors can to review the
proposed changes in the "draft version" in the Studio web interface for changes
like nodes modified/added/removed and the total storage space requirements.
Deploying the channel (optional)
--------------------------------
Studio channel editors can use the `DEPLOY` button in the Studio web interface
to activate the "draft copy" and make it visible to all Studio users.
This is implemented by replacing the channel's `main` tree with the `staging` tree.
During [this step](https://github.com/learningequality/studio/blob/5564c1fc540d8a936fc2907c9d65bf0fb2bacb14/contentcuration/contentcuration/api.py#L103-L105), a "backup copy" of channel is saved, called the `previous_tree`.
Publish channel (optional)
--------------------------
The `PUBLISH` channel button on Studio is used to save and export a new version of the channel.
The PUBLISH action exports all the channel metadata to a sqlite3 DB file served
by Studio at the URL `/content/{{channel_id}}.sqlite3` and ensure the associated
files exist in `/content/storage/` which is served by a CDN.
This step is a prerequisite for getting the channel out of Studio and into Kolibri.
The combination of `{{channel_id}}.sqlite3` file and the files in `/content/storage`
define the Kolibri Channels content format. This is what gets exported to the folder
`KOLIBRI_DATA` on sdcard or external drives when you use the `EXPORT` action in Kolibri.
| /ricecooker-0.7.3.tar.gz/ricecooker-0.7.3/docs/developer/uploadprocess.md | 0.427158 | 0.914061 | uploadprocess.md | pypi |
from importlib.metadata import PackageNotFoundError, version
try:
__version__ = version("rich-admonitions")
except PackageNotFoundError:
__version__ = "unknown"
# Items to "export" from this module!
__export__ = {
"Admonition",
}
from dataclasses import dataclass
from typing import Iterable, Union
from rich.segment import Segment
from rich.style import Style
from rich.text import Text
from rich.padding import Padding
from rich.console import Console, ConsoleOptions, Measurement, RenderableType
@dataclass
class Admonition:
"""
A rich-text formatted admonition, with built-in themes!
"""
content: Union[str, RenderableType]
header: Union[str, Text] = "Notice"
style: Union[str, Style] = "bold"
markup: bool = True
def __init__(
self,
content: Union[str, RenderableType] = "...",
header: Union[str, Text] = "Notice",
style: Union[str, Style] = "bold blue",
markup: bool = True,
):
self.content = content
self.header = header
self.style = style
self.markup = markup
def __str__(self) -> str:
"""Render the message as a str."""
return str(self.content)
def __repr__(self) -> str:
return f"Banner(content={self.content}, header={self.header}, style={self.style}, markup={self.markup})"
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> Iterable[Segment]:
"""Render the message as a rich string."""
from rich.segment import Segment
from rich.text import Text
yield from self.render(
content=Text(self.header, style=self.style, end="")
if isinstance(self.header, str)
else self.header,
console=console,
markup=self.markup,
style=self.style,
space=1,
)
yield self.prefix(style=console.get_style(self.style))
yield Segment.line()
yield from self.render(
content=self.content,
console=console,
markup=self.markup,
style=self.style,
space=2,
)
yield Segment.line()
def __rich_measure__(
self, console: Console, options: ConsoleOptions
) -> Measurement:
"""Measure the message."""
from math import floor
from rich.measure import Measurement
return Measurement(
floor(len(self.header) * 1.25) if self.header else 8, options.max_width
)
@classmethod
def note(
cls,
content: RenderableType,
header: str = "Note",
style: Union[str, Style] = "bold magenta3",
**kwargs,
) -> "Admonition":
"""A note message."""
return cls(content, header, style, **kwargs)
@classmethod
def tip(
cls,
content: RenderableType,
header: str = "Tip",
style: Union[str, Style] = "bold green",
**kwargs,
) -> "Admonition":
"""A tip message."""
return cls(content, header, style, **kwargs)
@classmethod
def info(
cls,
content: RenderableType,
header: str = "Info",
style: Union[str, Style] = "bold cyan",
**kwargs,
) -> "Admonition":
"""An info message."""
return cls(content, header, style, **kwargs)
@classmethod
def warning(
cls,
content: RenderableType,
header: str = "Warning",
style: Union[str, Style] = "bold orange3",
**kwargs,
) -> "Admonition":
"""A warning message."""
return cls(content, header, style, **kwargs)
@classmethod
def danger(
cls,
content: RenderableType,
header: str = "Danger",
style: Union[str, Style] = "bold red",
**kwargs,
) -> "Admonition":
"""A danger message."""
return cls(content, header, style, **kwargs)
@classmethod
def prefix(cls, style: Style = Style(bold=True)) -> Segment:
"""Return the default message prefix character."""
from rich.segment import Segment
return Segment("│", style=style)
@classmethod
def render(
cls,
content: RenderableType,
console: Console,
markup: bool = False,
options: Union[ConsoleOptions, None] = None,
style: Union[str, Style] = "bold",
space: int = 2,
) -> Iterable[Segment]:
"""Render the message as a rich string."""
from rich.text import Text
from rich.padding import Padding
_style = console.get_style(style)
_options = options if options else console.options
if isinstance(content, str):
if markup:
_content: Union[Text, RenderableType] = Text.from_markup(content, end="")
else:
_content = Text(content, end="")
else:
_content = content
for line in console.render_lines(
Padding(_content, (0, space + 1, 0, space), expand=False),
options=_options.update_width(console.width - space - 1),
new_lines=True,
pad=False,
):
yield cls.prefix(style=_style)
yield from line
if __name__ != "__main__":
for _ in (*locals(), "_"):
if not _.startswith("__") and _ not in __export__:
del locals()[_] | /rich_admonitions-0.2.0-py3-none-any.whl/admonitions/__init__.py | 0.827724 | 0.215041 | __init__.py | pypi |
from __future__ import annotations
import sys
import rich_argparse._lazy_rich as r
_HIGHLIGHTS = [
r"(?:^|\s)(?P<args>-{1,2}[\w]+[\w-]*)", # highlight --words-with-dashes as args
r"`(?P<syntax>[^`]*)`", # highlight `text in backquotes` as syntax
]
_windows_console_fixed = None
def _rich_wrap(console: r.Console, text: r.Text, width: int) -> r.Lines:
# textwrap.wrap() equivalent for rich.text.Text
text = text.copy()
text.expand_tabs(8) # textwrap expands tabs first
whitespace_trans = dict.fromkeys(map(ord, "\t\n\x0b\x0c\r "), ord(" "))
text.plain = text.plain.translate(whitespace_trans)
return text.wrap(console, width)
def _rich_fill(console: r.Console, text: r.Text, width: int, indent: r.Text) -> r.Text:
# textwrap.fill() equivalent for rich.text.Text
lines = _rich_wrap(console, text, width)
return r.Text("\n").join(indent + line for line in lines)
def _initialize_win_colors() -> bool: # pragma: no cover
global _windows_console_fixed
assert sys.platform == "win32"
if _windows_console_fixed is None:
winver = sys.getwindowsversion() # type: ignore[attr-defined]
if winver.major < 10 or winver.build < 10586:
try:
import colorama
_windows_console_fixed = isinstance(sys.stdout, colorama.ansitowin32.StreamWrapper)
except Exception:
_windows_console_fixed = False
else:
import ctypes
kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
ENABLE_PROCESSED_OUTPUT = 0x1
ENABLE_WRAP_AT_EOL_OUTPUT = 0x2
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4
STD_OUTPUT_HANDLE = -11
kernel32.SetConsoleMode(
kernel32.GetStdHandle(STD_OUTPUT_HANDLE),
ENABLE_PROCESSED_OUTPUT
| ENABLE_WRAP_AT_EOL_OUTPUT
| ENABLE_VIRTUAL_TERMINAL_PROCESSING,
)
_windows_console_fixed = True
return _windows_console_fixed
def _fix_legacy_win_text(console: r.Console, text: str) -> str:
# activate legacy Windows console colors if needed (and available) or strip ANSI escape codes
if (
sys.platform == "win32"
and console.legacy_windows
and console.color_system is not None
and not _initialize_win_colors()
): # pragma: win32 cover
text = "\n".join(r.re_ansi.sub("", line) for line in text.split("\n"))
return text | /rich_argparse-1.3.0-py3-none-any.whl/rich_argparse/_common.py | 0.430866 | 0.188978 | _common.py | pypi |
import pathlib
import unicodedata
from typing import Iterator, NamedTuple
import click
from rich.console import Console
from rich.table import Table
STYLE: str = "white"
TITLE_STYLE = "green"
HEADER_STYLE = "blue"
HIGHLIGHT_STYLE = "on green"
class CodePointInfo(NamedTuple):
code_point: int
name: str
aliases: list[str] | None
def load_name_aliases() -> dict[str, list[str]]:
def non_comment(p: pathlib.Path) -> Iterator[str]:
text = p.read_text()
for line in text.split("\n"):
if not line.startswith("#"):
line = line.strip()
if line:
yield line
aliases: dict[str, list[str]] = {}
p = pathlib.Path(__file__).parent / "NameAliases.txt"
if not p.exists():
return aliases
for line in non_comment(p):
code_point, alias, _ = line.split(";")
code_point = code_point.upper()
if code_point not in aliases:
aliases[code_point] = [alias]
else:
aliases[code_point].append(alias)
return aliases
def get_code_points() -> list[CodePointInfo]:
name_aliases = load_name_aliases()
code_points: list[CodePointInfo] = []
for idx in range(256):
if idx <= 0x1F or idx >= 0x7F and idx <= 0x9F:
name = name_aliases[f"{idx:04X}"]
else:
name = [unicodedata.name(chr(idx))]
if len(name) > 1:
aliases = name[1:]
else:
aliases = None
cp = CodePointInfo(code_point=idx, name=name[0], aliases=aliases)
code_points.append(cp)
return code_points
def formatted_aliases(aliases: list[str]) -> Iterator[str]:
for alias in aliases:
if len(alias) < 4:
yield alias
else:
yield alias.title()
def code_point_to_int(code_point: str) -> int:
try:
if code_point.startswith("0x") and len(code_point) in [3, 4]:
highlight_cp = int(code_point[2:], 16)
else:
highlight_cp = int(code_point)
if highlight_cp > 255:
highlight_cp = -1
except ValueError:
highlight_cp = -1
return highlight_cp
def display_code_points(
code_points: list[CodePointInfo],
show_aliases: bool = True,
style: str = STYLE,
title_style: str = TITLE_STYLE,
header_style: str = HEADER_STYLE,
highlight_style: str = HIGHLIGHT_STYLE,
code_point: str = "",
):
if code_point != "":
highlight_cp = code_point_to_int(code_point)
highlight = highlight_style
else:
highlight_cp = -1
highlight = ""
table = Table(
title="ASCII Code Points",
title_justify="left",
title_style=title_style,
header_style=header_style,
)
if show_aliases:
table.add_column("Dec")
table.add_column("Hex")
table.add_column("Name")
table.add_column("Aliases")
for cp in code_points:
cp_style = highlight if cp.code_point == highlight_cp else style
if cp.aliases is not None:
aliases = ", ".join(formatted_aliases(cp.aliases))
else:
aliases = ""
table.add_row(
f"{cp.code_point:02d}",
f"0x{cp.code_point:02X}",
f"{cp.name.title()}",
f"{aliases}",
style=cp_style,
)
else:
table.add_column("Dec")
table.add_column("Hex")
table.add_column("Name")
table.add_column("Dec")
table.add_column("Hex")
table.add_column("Name")
for cp1, cp2 in zip(code_points[:128], code_points[128:]):
cp1_style = highlight if cp1.code_point == highlight_cp else style
cp2_style = highlight if cp2.code_point == highlight_cp else style
table.add_row(
f"[{cp1_style}]{cp1.code_point:02d}[/]",
f"[{cp1_style}]0x{cp1.code_point:02X}[/]",
f"[{cp1_style}]{cp1.name.title()}[/]",
f"[{cp2_style}]{cp2.code_point:02d}[/]",
f"[{cp2_style}]0x{cp2.code_point:02X}[/]",
f"[{cp2_style}]{cp2.name.title()}[/]",
)
console = Console()
console.print(table)
@click.command()
@click.option(
"--aliases/--no-aliases",
default=False,
help="Show a column with aliases for the name",
)
@click.option(
"--style",
default=STYLE,
metavar="STYLE",
help=f'Set the style of the text (default: "{STYLE}")',
)
@click.option(
"--title-style",
default=TITLE_STYLE,
metavar="STYLE",
help=f'Set the style of the table title (default: "{TITLE_STYLE}")',
)
@click.option(
"--header-style",
default=HEADER_STYLE,
metavar="STYLE",
help=f'Set the style of the table header (default: "{HEADER_STYLE}")',
)
@click.option(
"--highlight-style",
default=HIGHLIGHT_STYLE,
metavar="STYLE",
help=f'Set the style of highlighted text (default: "{HIGHLIGHT_STYLE}")',
)
@click.argument("code_point", default="")
def run(
aliases: bool,
style: str,
title_style: str,
header_style: str,
highlight_style: str,
code_point: str,
):
"""Display a table of ASCII code point information.
\b
If a CODE_POINT is provided then it will be hightlighted in the output.
CODE_POINT can be either a decimal (245) or a hexadecimal (0xF5) string."""
cp = get_code_points()
display_code_points(
cp, aliases, style, title_style, header_style, highlight_style, code_point
)
run() | /rich_ascii-0.2.0-py3-none-any.whl/rich_ascii/__main__.py | 0.47244 | 0.173271 | __main__.py | pypi |
import re
import random
import datetime
from rich_base_provider import db
from flask_security import current_user
from rich_base_provider.sysadmin.sys_dict.models import SysDict
class IntegralRule(db.Document):
"""
积分规则
"""
meta = {
"collection": "integral_rule"
}
integral_rule_id = db.StringField() # 积分规则ID
org_id = db.StringField() # 本积分规格所属机构
integral_rule_type = db.StringField() # 积分规则类型(消费(0)、充值(1)、系统(2)...)
child_rule_type = db.StringField(default="") # 子规则类型 作为integral_rule_type的子规则类型
integral_condition = db.IntField() # 积分规则成立条件(金额(消费、充值))
receive_integral_count = db.IntField(default=0) # 得到的积分数目
status = db.StringField() # 当前积分规则状态(启用(0)、停用(1)、删除(2))
remarks = db.StringField() # 规则备注
create_by = db.StringField() # 创建人
create_time = db.DateTimeField(default=datetime.datetime.now) # 创建时间
update_by = db.StringField() # 更新人
update_time = db.DateTimeField() # 更新时间
@staticmethod
def get_dict_id(dict_type, dict_name):
status_value = SysDict.get_dict_by_type_and_name(dict_type=dict_type, dict_name=dict_name)
return status_value.dict_id
@classmethod
def get_integral_rule_list_by_data(cls, page=1, per_page=20, org_code="", search_data=""):
"""
根据页码、搜索条件查询指定数目记录
:param page:
:param per_page:
:param org_code:
:param search_data:
:return:
"""
start_index = (int(page) - 1) * int(per_page)
search = [
{
'$lookup': {
'from': 'sys_org',
'localField': 'org_id',
'foreignField': 'org_id',
'as': 'sys_org'
}
},
{'$unwind': "$sys_org"},
{'$lookup': {'from': 'sys_dict',
'localField': 'integral_rule_type',
'foreignField': 'dict_id',
'as': 'integral_rule_type_dict'
}},
{'$unwind': "$integral_rule_type_dict"},
{'$lookup': {'from': 'sys_dict',
'localField': 'status',
'foreignField': 'dict_id',
'as': 'status_dict'
}},
{'$unwind': "$status_dict"},
{'$match': {
"$and": [
{"sys_org.org_code": re.compile(r'^{}'.format(org_code))},
{"status_dict.dict_type": "integral_status"},
{"integral_rule_type_dict.dict_type": "integral_rule_type"},
{'status': {'$nin': [cls.get_dict_id("integral_status", "删除")]}}
],
"$or": [
{"sys_org.org_name": re.compile(search_data)},
{"integral_rule_type_dict.dict_name": re.compile(search_data)},
{"status_dict.dict_name": re.compile(search_data)},
{"integral_condition": re.compile(search_data)},
{"receive_integral_count": re.compile(search_data)},
{'remarks': re.compile(search_data)}
]
}},
{'$skip': start_index},
{'$limit': int(per_page)},
{'$sort': {"create_time": -1}},
{'$project': {'integral_rule_id': 1,
'sys_org.org_name': 1,
'integral_rule_type_dict.dict_name': 1,
'child_rule_type': 1,
'integral_condition': 1,
'receive_integral_count': 1,
'status_dict.dict_name': 1,
'remarks': 1,
'update_time': 1
}}
]
return cls.objects.aggregate(*search)
@classmethod
def get_integral_rule_total_count(cls, org_code, search_data):
"""获取积分规则记录总数"""
search = [
{
'$lookup': {
'from': 'sys_org',
'localField': 'org_id',
'foreignField': 'org_id',
'as': 'sys_org'
}
},
{'$unwind': "$sys_org"},
{'$lookup': {'from': 'sys_dict',
'localField': 'integral_rule_type',
'foreignField': 'dict_id',
'as': 'integral_rule_type_dict'
}},
{'$unwind': "$integral_rule_type_dict"},
{'$lookup': {'from': 'sys_dict',
'localField': 'status',
'foreignField': 'dict_id',
'as': 'status_dict'
}},
{'$unwind': "$status_dict"},
{'$match': {
"$and": [
{"sys_org.org_code": re.compile(r'^{}'.format(org_code))},
{"status_dict.dict_type": "integral_status"},
{"integral_rule_type_dict.dict_type": "integral_rule_type"},
{'status': {'$nin': [cls.get_dict_id("integral_status", "删除")]}}
],
"$or": [
{"sys_org.org_name": re.compile(search_data)},
{"integral_rule_type_dict.dict_name": re.compile(search_data)},
{"status_dict.dict_name": re.compile(search_data)},
{"integral_condition": re.compile(search_data)},
{"receive_integral_count": re.compile(search_data)},
{'remarks': re.compile(search_data)}
]
}},
{'$sort': {"create_time": -1}},
{'$group': {'_id': 0,
'count': {'$sum': 1}}}
]
return list(cls.objects.aggregate(*search))[0].get('count')
@classmethod
def get_org_integral_rule_list_by_data(cls, current_org_id, integral_condition):
"""
根据积分规则所属机构以及满足金额获取本机构启用状态下的积分规则
:param current_org_id:
:param integral_condition:
:return:
"""
return cls.objects(org_id=current_org_id, status__in=[cls.get_dict_id("integral_status", "启用")],
integral_rule_type=cls.get_dict_id("integral_rule_type", "充值"),
integral_condition__lte=integral_condition).order_by("-integral_condition").all()
@classmethod
def get_integral_rule_list_by_kwargs(cls, **kwargs):
"""
根据kwargs字典获取积分规则数据
:param kwargs:
:return:
"""
org_id = kwargs.get("org_id", "")
integral_rule_type = kwargs.get("integral_rule_type", "")
child_rule_type = kwargs.get("child_rule_type", "")
integral_condition = kwargs.get("integral_condition", "")
receive_integral_count = kwargs.get("receive_integral_count", "")
if org_id and integral_rule_type and child_rule_type and integral_condition and receive_integral_count:
return cls.objects(org_id=org_id, integral_rule_type=integral_rule_type, child_rule_type=child_rule_type,
integral_condition=integral_condition,
receive_integral_count=receive_integral_count,
status__nin=[cls.get_dict_id("integral_status", "删除")]).all()
elif org_id and integral_rule_type and integral_condition and receive_integral_count:
return cls.objects(org_id=org_id, integral_rule_type=integral_rule_type,
integral_condition=int(integral_condition),
receive_integral_count=int(receive_integral_count),
status__nin=[cls.get_dict_id("integral_status", "删除")]).all()
@classmethod
def insert_integral_rule_by_create_dict(cls, create_dict):
"""
根据create_dict创建积分规则
:param create_dict:
:return:
"""
integral_rule_id = "IR" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + str(random.randint(0, 999999))
login_user_code = current_user.user_code
new_integral_rule = IntegralRule(
integral_rule_id=integral_rule_id,
org_id=create_dict["org_id"],
integral_rule_type=create_dict["integral_rule_type"],
child_rule_type=create_dict["child_rule_type"],
integral_condition=create_dict["integral_condition"],
receive_integral_count=create_dict["receive_integral_count"],
status=create_dict["status"],
remarks=create_dict["remarks"],
create_by=login_user_code,
update_by=login_user_code,
create_time=datetime.datetime.now(),
update_time=datetime.datetime.now()
)
new_integral_rule.save()
@classmethod
def get_integral_rule_by_integral_rule_id(cls, integral_rule_id):
"""
根据积分规则ID获取本记录详细信息
:param integral_rule_id:
:return:
"""
return cls.objects(integral_rule_id=integral_rule_id,
status__nin=[cls.get_dict_id("integral_status", "删除")]).first()
@classmethod
def update_integral_rule_by_update_dict(cls, update_dict):
"""
根据update_dict字典数据编辑积分规则记录
:param update_dict:
:return:
"""
login_user_code = current_user.user_code
old_integral_rule = cls.get_integral_rule_by_integral_rule_id(update_dict["integral_rule_id"])
# 判断数据是否修改
if old_integral_rule.integral_rule_type != update_dict["integral_rule_type"]:
old_integral_rule.integral_rule_type = update_dict["integral_rule_type"]
if old_integral_rule.child_rule_type != update_dict["child_rule_type"]:
old_integral_rule.child_rule_type = update_dict["child_rule_type"]
if old_integral_rule.integral_condition != update_dict["integral_condition"]:
old_integral_rule.integral_condition = update_dict["integral_condition"]
if old_integral_rule.receive_integral_count != update_dict["receive_integral_count"]:
old_integral_rule.receive_integral_count = update_dict["receive_integral_count"]
if old_integral_rule.status != update_dict["status"]:
old_integral_rule.status = update_dict["status"]
if old_integral_rule.remarks != update_dict["remarks"]:
old_integral_rule.remarks = update_dict["remarks"]
old_integral_rule.update_by = login_user_code
old_integral_rule.update_time = datetime.datetime.now()
old_integral_rule.save()
@classmethod
def delete_integral_rule_by_integral_rule_id(cls, integral_rule_id):
"""
根据积分规则编号删除积分规则记录
:param integral_rule_id:
:return:
"""
login_user_code = current_user.user_code
delete_integral_rule = cls.get_integral_rule_by_integral_rule_id(integral_rule_id)
delete_integral_rule.status = cls.get_dict_id("integral_status", "删除")
delete_integral_rule.update_by = login_user_code
delete_integral_rule.update_time = datetime.datetime.now()
delete_integral_rule.save() | /rich_base_provider-1.0.1.tar.gz/rich_base_provider-1.0.1/rich_base_provider/sysadmin/integral/integral_rule/models.py | 0.400398 | 0.151529 | models.py | pypi |
from operator import itemgetter
import sys
from typing import TYPE_CHECKING, List, NoReturn, Optional, Tuple
import click
from pygments.util import ClassNotFound
from rich.console import Console, RenderableType
from rich.markup import escape
from rich.text import Text
console = Console()
error_console = Console(stderr=True)
if TYPE_CHECKING:
from rich.console import ConsoleOptions, RenderResult
from rich.measure import Measurement
BOXES = [
"none",
"ascii",
"ascii2",
"square",
"rounded",
"heavy",
"double",
]
BOX_TEXT = ", ".join(sorted(BOXES))
COMMON_LEXERS = {
"html": "html",
"py": "python",
"md": "markdown",
"js": "javascript",
"xml": "xml",
"json": "json",
"toml": "toml",
}
VERSION = "1.8.0"
AUTO = 0
SYNTAX = 1
PRINT = 2
MARKDOWN = 3
RST = 4
JSON = 5
RULE = 6
INSPECT = 7
CSV = 8
IPYNB = 9
def on_error(message: str, error: Optional[Exception] = None, code=-1) -> NoReturn:
"""Render an error message then exit the app."""
if error:
error_text = Text(message)
error_text.stylize("bold red")
error_text += ": "
error_text += error_console.highlighter(str(error))
error_console.print(error_text)
else:
error_text = Text(message, style="bold red")
error_console.print(error_text)
sys.exit(code)
def read_resource(path: str, lexer: Optional[str]) -> Tuple[str, Optional[str]]:
"""Read a resource form a file or stdin."""
if not path:
on_error("missing path or URL")
if path.startswith(("http://", "https://")):
import requests
response = requests.get(path)
text = response.text
try:
mime_type: str = response.headers["Content-Type"]
if ";" in mime_type:
mime_type = mime_type.split(";", 1)[0]
except KeyError:
pass
else:
if not lexer:
_, dot, ext = path.rpartition(".")
if dot and ext:
ext = ext.lower()
lexer = COMMON_LEXERS.get(ext, None)
if lexer is None:
from pygments.lexers import get_lexer_for_mimetype
try:
lexer = get_lexer_for_mimetype(mime_type).name
except Exception:
pass
return (text, lexer)
try:
if path == "-":
return (sys.stdin.read(), None)
with open(path, "rt") as resource_file:
text = resource_file.read()
if not lexer:
_, dot, ext = path.rpartition(".")
if dot and ext:
ext = ext.lower()
lexer = COMMON_LEXERS.get(ext, None)
if not lexer:
from pygments.lexers import guess_lexer_for_filename
try:
lexer = guess_lexer_for_filename(path, text).name
except ClassNotFound:
return (text, "text")
return (text, lexer)
except Exception as error:
on_error(f"unable to read {escape(path)}", error)
class ForceWidth:
"""Force a renderable to a given width."""
def __init__(self, renderable: "RenderableType", width: int = 80) -> None:
self.renderable = renderable
self.width = width
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
child_options = options.update_width(self.width)
yield from console.render(self.renderable, child_options)
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
from rich.measure import Measurement
return Measurement(self.width, self.width)
def blend_text(
message: str, color1: Tuple[int, int, int], color2: Tuple[int, int, int]
) -> Text:
"""Blend text from one color to another."""
text = Text(message)
r1, g1, b1 = color1
r2, g2, b2 = color2
dr = r2 - r1
dg = g2 - g1
db = b2 - b1
size = len(text)
for index in range(size):
blend = index / size
color = f"#{int(r1 + dr * blend):2X}{int(g1 + dg * blend):2X}{int(b1 + db * blend):2X}"
text.stylize(color, index, index + 1)
return text
class RichCommand(click.Command):
"""Override Clicks help with a Richer version."""
# TODO: Extract this in to a general tool, i.e. rich-click
def format_help(self, ctx, formatter):
from rich.highlighter import RegexHighlighter
from rich.panel import Panel
from rich.table import Table
from rich.theme import Theme
class OptionHighlighter(RegexHighlighter):
highlights = [
r"(?P<switch>\-\w)",
r"(?P<option>\-\-[\w\-]+)",
]
highlighter = OptionHighlighter()
console = Console(
theme=Theme(
{
"option": "bold cyan",
"switch": "bold green",
}
),
highlighter=highlighter,
)
console.print(
f"[b]Rich CLI[/b] [magenta]v{VERSION}[/] 🤑\n\n[dim]Rich text and formatting in the terminal\n",
justify="center",
)
console.print(
"Usage: [b]rich[/b] [b][OPTIONS][/] [b cyan]<PATH,TEXT,URL, or '-'>\n"
)
options_table = Table(highlight=True, box=None, show_header=False)
for param in self.get_params(ctx)[1:]:
if len(param.opts) == 2:
opt1 = highlighter(param.opts[1])
opt2 = highlighter(param.opts[0])
else:
opt2 = highlighter(param.opts[0])
opt1 = Text("")
if param.metavar:
opt2 += Text(f" {param.metavar}", style="bold yellow")
options = Text(" ".join(reversed(param.opts)))
help_record = param.get_help_record(ctx)
if help_record is None:
help = ""
else:
help = Text.from_markup(param.get_help_record(ctx)[-1], emoji=False)
if param.metavar:
options += f" {param.metavar}"
options_table.add_row(opt1, opt2, highlighter(help))
console.print(
Panel(
options_table, border_style="dim", title="Options", title_align="left"
)
)
from rich.color import Color
console.print(
blend_text(
"♥ https://www.textualize.io",
Color.parse("#b169dd").triplet,
Color.parse("#542c91").triplet,
),
justify="left",
style="bold",
)
@click.command(cls=RichCommand)
@click.argument("resource", metavar="<PATH or TEXT or '-'>", default="")
@click.option(
"--print",
"-p",
"_print",
is_flag=True,
help="Print [u]console markup[/u]. [dim]See https://rich.readthedocs.io/en/latest/markup.html",
)
@click.option("--rule", "-u", is_flag=True, help="Display a horizontal [u]rule[/u].")
@click.option("--json", "-j", is_flag=True, help="Display as [u]JSON[/u].")
@click.option("--markdown", "-m", is_flag=True, help="Display as [u]markdown[/u].")
@click.option("--rst", is_flag=True, help="Display [u]restructured text[/u].")
@click.option("--csv", is_flag=True, help="Display [u]CSV[/u] as a table.")
@click.option("--ipynb", is_flag=True, help="Display [u]Jupyter notebook[/u].")
@click.option("--syntax", is_flag=True, help="[u]Syntax[/u] highlighting.")
@click.option("--inspect", is_flag=True, help="[u]Inspect[/u] a python object.")
@click.option(
"--head",
"-h",
type=click.IntRange(min=1),
metavar="LINES",
default=None,
help="Display first [b]LINES[/] of the file (requires --syntax or --csv).",
)
@click.option(
"--tail",
"-t",
type=click.IntRange(min=1),
metavar="LINES",
default=None,
help="Display last [b]LINES[/] of the file (requires --syntax or --csv).",
)
@click.option(
"--emoji", "-j", is_flag=True, help="Enable emoji code. [dim]e.g. :sparkle:"
)
@click.option("--left", "-l", is_flag=True, help="Align to left.")
@click.option("--right", "-r", is_flag=True, help="Align to right.")
@click.option("--center", "-c", is_flag=True, help="Align to center.")
@click.option("--text-left", "-L", is_flag=True, help="Justify text to left.")
@click.option("--text-right", "-R", is_flag=True, help="Justify text to right.")
@click.option("--text-center", "-C", is_flag=True, help="Justify text to center.")
@click.option(
"--text-full", "-F", is_flag=True, help="Justify text to both left and right edges."
)
@click.option(
"--soft", is_flag=True, help="Enable soft wrapping of text (requires --print)."
)
@click.option(
"--expand", "-e", is_flag=True, help="Expand to full width (requires --panel)."
)
@click.option(
"--width",
"-w",
metavar="SIZE",
type=int,
help="Fit output to [b]SIZE[/] characters.",
default=-1,
)
@click.option(
"--max-width",
"-W",
metavar="SIZE",
type=int,
help="Set maximum width to [b]SIZE[/] characters.",
default=-1,
)
@click.option(
"--style", "-s", metavar="STYLE", help="Set text style to [b]STYLE[/b].", default=""
)
@click.option(
"--rule-style",
metavar="STYLE",
help="Set rule style to [b]STYLE[/b].",
default="bright_green",
)
@click.option(
"--rule-char",
metavar="CHARACTER",
default="─",
help="Use [b]CHARACTER[/b] to generate a line with --rule.",
)
@click.option(
"--padding",
"-d",
metavar="TOP,RIGHT,BOTTOM,LEFT",
help="Padding around output. [dim]1, 2 or 4 comma separated integers, e.g. 2,4",
)
@click.option(
"--panel",
"-a",
default="none",
type=click.Choice(BOXES),
metavar="BOX",
help=f"Set panel type to [b]BOX[/b]. [dim]{BOX_TEXT}",
)
@click.option(
"--panel-style",
"-S",
default="",
metavar="STYLE",
help="Set the panel style to [b]STYLE[/b] (requires --panel).",
)
@click.option(
"--theme",
metavar="THEME",
help="Set syntax theme to [b]THEME[/b]. [dim]See https://pygments.org/styles/",
default="ansi_dark",
envvar="RICH_THEME",
)
@click.option(
"--line-numbers", "-n", is_flag=True, help="Enable line number in syntax."
)
@click.option(
"--guides",
"-g",
is_flag=True,
help="Enable indentation guides in syntax highlighting",
)
@click.option(
"--lexer",
"-x",
metavar="LEXER",
default=None,
help="Use [b]LEXER[/b] for syntax highlighting. [dim]See https://pygments.org/docs/lexers/",
)
@click.option("--hyperlinks", "-y", is_flag=True, help="Render hyperlinks in markdown.")
@click.option(
"--no-wrap", is_flag=True, help="Don't word wrap syntax highlighted files."
)
@click.option(
"--title", metavar="TEXT", default="", help="Set panel title to [b]TEXT[/]."
)
@click.option(
"--caption", metavar="TEXT", default="", help="Set panel caption to [b]TEXT[/]."
)
@click.option(
"--force-terminal",
is_flag=True,
help="Force terminal output when not writing to a terminal.",
)
@click.option(
"--export-html",
"-o",
metavar="PATH",
default="",
help="Write HTML to [b]PATH[/b].",
)
@click.option(
"--export-svg", metavar="PATH", default="", help="Write SVG to [b]PATH[/b]."
)
@click.option("--pager", is_flag=True, help="Display in an interactive pager.")
@click.option("--version", "-v", is_flag=True, help="Print version and exit.")
def main(
resource: str,
version: bool = False,
_print: bool = False,
syntax: bool = False,
rule: bool = False,
rule_char: Optional[str] = None,
json: bool = False,
markdown: bool = False,
rst: bool = False,
csv: bool = False,
ipynb: bool = False,
inspect: bool = True,
emoji: bool = False,
left: bool = False,
right: bool = False,
center: bool = False,
text_left: bool = False,
text_right: bool = False,
text_center: bool = False,
soft: bool = False,
head: Optional[int] = None,
tail: Optional[int] = None,
text_full: bool = False,
expand: bool = False,
width: int = -1,
max_width: int = -1,
style: str = "",
rule_style: str = "",
no_wrap: bool = True,
padding: str = "",
panel: str = "",
panel_style: str = "",
title: str = "",
caption: str = "",
theme: str = "",
line_numbers: bool = False,
guides: bool = False,
lexer: str = "",
hyperlinks: bool = False,
force_terminal: bool = False,
export_html: str = "",
export_svg: str = "",
pager: bool = False,
):
"""Rich toolbox for console output."""
if version:
sys.stdout.write(f"{VERSION}\n")
return
console = Console(
emoji=emoji,
record=bool(export_html or export_svg),
force_terminal=force_terminal if force_terminal else None,
)
def print_usage() -> None:
console.print(
r"Usage: [b]rich [OPTIONS][/b] [b cyan]<PATH,TEXT,URL, or '-'>[/]"
)
console.print("See [bold green]rich --help[/] for options")
console.print()
sys.exit(0)
if width > 0:
expand = True
print_padding: List[int] = []
if padding:
try:
print_padding = [int(pad) for pad in padding.split(",")]
except Exception:
on_error(f"padding should be 1, 2 or 4 integers separated by commas")
else:
if len(print_padding) not in (1, 2, 4):
on_error(f"padding should be 1, 2 or 4 integers separated by commas")
renderable: RenderableType = ""
resource_format = AUTO
if _print:
resource_format = PRINT
elif syntax:
resource_format = SYNTAX
elif json:
resource_format = JSON
elif markdown:
resource_format = MARKDOWN
elif rule:
resource_format = RULE
elif inspect:
resource_format = INSPECT
elif csv:
resource_format = CSV
elif rst:
resource_format = RST
elif ipynb:
resource_format = IPYNB
if resource_format == AUTO and "." in resource:
import os.path
ext = ""
if resource.startswith(("http://", "https://")):
from urllib.parse import urlparse
try:
path = urlparse(resource).path
except Exception:
pass
else:
ext = os.path.splitext(path)[-1].lower()
else:
ext = os.path.splitext(resource)[-1].lower()
if ext == ".md":
resource_format = MARKDOWN
elif ext == ".json":
resource_format = JSON
elif ext in (".csv", ".tsv"):
resource_format = CSV
elif ext == ".rst":
resource_format = RST
elif ext == ".ipynb":
resource_format = IPYNB
if resource_format == AUTO:
resource_format = SYNTAX
if resource_format in (PRINT, RULE):
from rich.text import Text
justify = "default"
if text_left:
justify = "left"
elif text_right:
justify = "right"
elif text_center:
justify = "center"
elif text_full:
justify = "full"
try:
if resource == "-":
renderable = Text.from_markup(
sys.stdin.read(), justify=justify, emoji=emoji
)
else:
renderable = Text.from_markup(resource, justify=justify, emoji=emoji)
renderable.no_wrap = no_wrap
except Exception as error:
on_error(f"unable to parse console markup", error)
if rule:
from rich.rule import Rule
from rich.style import Style
try:
render_rule_style = Style.parse(rule_style)
except Exception as error:
on_error("unable to parse rule style", error)
renderable = Rule(
resource,
style=render_rule_style,
characters=rule_char or "─",
align="center" if justify in ("full", "default") else justify,
)
elif resource_format == JSON:
from rich.json import JSON as RichJSON
json_data, _lexer = read_resource(resource, lexer)
try:
renderable = RichJSON(json_data)
except Exception as error:
on_error("unable to read json", error)
elif resource_format == MARKDOWN:
from .markdown import Markdown
markdown_data, lexer = read_resource(resource, lexer)
renderable = Markdown(markdown_data, code_theme=theme, hyperlinks=hyperlinks)
elif resource_format == RST:
from rich_rst import RestructuredText
rst_data, _ = read_resource(resource, lexer)
renderable = RestructuredText(
rst_data,
code_theme=theme,
default_lexer=lexer or "python",
show_errors=False,
)
elif resource_format == INSPECT:
try:
inspect_data = eval(resource)
except Exception:
console.print_exception()
on_error(f"unable to eval {resource!r}")
from rich._inspect import Inspect
renderable = Inspect(
inspect_data, help=False, dunder=False, all=False, methods=True
)
elif resource_format == CSV:
renderable = render_csv(resource, head, tail, title, caption)
elif resource_format == IPYNB:
renderable = render_ipynb(
resource,
theme,
hyperlinks,
lexer,
head,
tail,
line_numbers,
guides,
no_wrap,
)
else:
if not resource:
print_usage()
from rich.syntax import Syntax
try:
if resource == "-":
code = sys.stdin.read()
else:
code, lexer = read_resource(resource, lexer)
num_lines = len(code.splitlines())
line_range = _line_range(head, tail, num_lines)
renderable = Syntax(
code,
lexer,
theme=theme,
line_numbers=line_numbers,
indent_guides=guides,
word_wrap=not no_wrap,
line_range=line_range,
)
except Exception as error:
on_error("unable to read file", error)
if print_padding:
from rich.padding import Padding
renderable = Padding(renderable, tuple(print_padding), expand=expand)
if panel != "none":
from rich import box
from rich.panel import Panel
from rich.style import Style
try:
render_border_style = Style.parse(panel_style)
except Exception as error:
on_error("unable to parse panel style", error)
renderable = Panel(
renderable,
getattr(box, panel.upper()),
expand=expand,
title=title,
subtitle=caption,
border_style=render_border_style,
)
if style:
from rich.style import Style
from rich.styled import Styled
try:
text_style = Style.parse(style)
except Exception as error:
on_error("unable to parse style", error)
else:
renderable = Styled(renderable, text_style)
if width > 0 and not pager:
renderable = ForceWidth(renderable, width=width)
justify = "default"
if left:
justify = "left"
elif right:
justify = "right"
elif center:
justify = "center"
if pager:
if justify != "default":
from rich.align import Align
renderable = Align(renderable, justify)
from .pager import PagerApp, PagerRenderable
if width < 0:
width = console.width
render_options = console.options.update(width=width - 1)
lines = console.render_lines(renderable, render_options, new_lines=True)
PagerApp.run(title=resource, content=PagerRenderable(lines, width=width))
else:
try:
console.print(
renderable,
width=None if max_width <= 0 else max_width,
soft_wrap=soft,
justify=justify,
)
except Exception as error:
on_error("failed to print resource", error)
if export_html:
try:
console.save_html(export_html, clear=False)
except Exception as error:
on_error("failed to save HTML", error)
if export_svg:
try:
console.save_svg(export_svg, clear=False)
except Exception as error:
on_error("failed to save SVG", error)
def render_csv(
resource: str,
head: Optional[int] = None,
tail: Optional[int] = None,
title: Optional[str] = None,
caption: Optional[str] = None,
) -> RenderableType:
"""Render resource as CSV.
Args:
resource (str): Resource string.
Returns:
RenderableType: Table renderable.
"""
import io
import csv
import re
from rich import box
from rich.table import Table
from operator import itemgetter
is_number = re.compile(r"\-?[0-9]*?\.?[0-9]*?").fullmatch
csv_data, _ = read_resource(resource, "csv")
sniffer = csv.Sniffer()
try:
dialect = sniffer.sniff(csv_data[:1024], delimiters=",\t|;")
has_header = sniffer.has_header(csv_data[:1024])
except csv.Error as error:
if resource.lower().endswith(".csv"):
dialect = csv.get_dialect("excel")
has_header = True
elif resource.lower().endswith(".tsv"):
dialect = csv.get_dialect("excel-tab")
has_header = True
else:
on_error(str(error))
csv_file = io.StringIO(csv_data)
reader = csv.reader(csv_file, dialect=dialect)
table = Table(
show_header=has_header,
box=box.HEAVY_HEAD if has_header else box.SQUARE,
border_style="blue",
title=title,
caption=caption,
caption_justify="right",
)
rows = iter(reader)
if has_header:
header = next(rows)
for column in header:
table.add_column(column)
table_rows = [row for row in rows if row]
if head is not None:
table_rows = table_rows[:head]
elif tail is not None:
table_rows = table_rows[-tail:]
for row in table_rows:
if row:
table.add_row(*row)
for index, table_column in enumerate(table.columns):
get_index = itemgetter(index)
for row in table_rows:
try:
value = get_index(row)
if value and not is_number(value):
break
except Exception:
break
else:
table_column.justify = "right"
table_column.style = "bold green"
table_column.header_style = "bold green"
return table
def render_ipynb(
resource: str,
theme: str,
hyperlinks: bool,
lexer: str,
head: Optional[int],
tail: Optional[int],
line_numbers: bool,
guides: bool,
no_wrap: bool,
) -> RenderableType:
"""Render resource as Jupyter notebook.
Args:
resource (str): Resource string.
theme (str): Syntax theme for code cells.
hyperlinks (bool): Whether to render hyperlinks in Markdown cells.
lexer (str): Lexer for code cell syntax highlighting (if no language set in notebook).
head (int): Display first `head` lines of each cell.
tail (int): Display last `tail` lines of each cell.
line_numbers (bool): Enable line number in code cells.
guides (bool): Enable indentation guides in code cell syntax highlighting.
no_wrap (bool): Don't word wrap syntax highlighted cells.
Returns:
RenderableType: Notebook as Markdown renderable.
"""
import json
from rich.syntax import Syntax
from rich.console import Group
from rich.panel import Panel
from .markdown import Markdown
notebook_str, _ = read_resource(resource, None)
notebook_dict = json.loads(notebook_str)
lexer = lexer or notebook_dict.get("metadata", {}).get("kernelspec", {}).get(
"language", ""
)
renderable: RenderableType
new_line = True
cells: List[RenderableType] = []
for cell in notebook_dict["cells"]:
if new_line:
cells.append("")
if "execution_count" in cell:
execution_count = cell["execution_count"] or " "
cells.append(f"[green]In [[#66ff00]{execution_count}[/#66ff00]]:[/green]")
source = "".join(cell["source"])
if cell["cell_type"] == "code":
num_lines = len(source.splitlines())
line_range = _line_range(head, tail, num_lines)
renderable = Panel(
Syntax(
source,
lexer,
theme=theme,
line_numbers=line_numbers,
indent_guides=guides,
word_wrap=not no_wrap,
line_range=line_range,
),
border_style="dim",
)
elif cell["cell_type"] == "markdown":
renderable = Markdown(source, code_theme=theme, hyperlinks=hyperlinks)
else:
renderable = Text(source)
new_line = True
cells.append(renderable)
for output in cell.get("outputs", []):
output_type = output["output_type"]
if output_type == "stream":
renderable = Text.from_ansi("".join(output["text"]))
new_line = False
elif output_type == "error":
renderable = Text.from_ansi("\n".join(output["traceback"]).rstrip())
new_line = True
elif output_type == "execute_result":
execution_count = output.get("execution_count", " ") or " "
renderable = Text.from_markup(
f"[red]Out[[#ee4b2b]{execution_count}[/#ee4b2b]]:[/red]\n"
)
data = output["data"].get("text/plain", "")
if isinstance(data, list):
renderable += Text.from_ansi("".join(data))
else:
renderable += Text.from_ansi(data)
new_line = True
else:
continue
cells.append(renderable)
renderable = Group(*cells)
return renderable
def _line_range(
head: Optional[int], tail: Optional[int], num_lines: int
) -> Optional[Tuple[int, int]]:
if head and tail:
on_error("cannot specify both head and tail")
if head:
line_range = (1, head)
elif tail:
start_line = num_lines - tail + 2
finish_line = num_lines + 1
line_range = (start_line, finish_line)
else:
line_range = None
return line_range
def run():
main()
if __name__ == "__main__":
run() | /rich_cli-1.8.0-py3-none-any.whl/rich_cli/__main__.py | 0.553747 | 0.171651 | __main__.py | pypi |
from enum import auto
from typing import Iterable, List
from rich.console import Console, ConsoleOptions, RenderResult
from rich.measure import Measurement
from rich.segment import Segment
from textual import events
from textual.app import App
from textual.widgets import ScrollView
class PagerRenderable:
def __init__(
self, lines: Iterable[List[Segment]], new_lines: bool = False, width: int = 80
) -> None:
"""A simple renderable containing a number of lines of segments. May be used as an intermediate
in rendering process.
Args:
lines (Iterable[List[Segment]]): Lists of segments forming lines.
new_lines (bool, optional): Insert new lines after each line. Defaults to False.
"""
self.lines = list(lines)
self.new_lines = new_lines
self.width = width
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if self.new_lines:
new_line = Segment.line()
for line in self.lines:
yield from line
yield new_line
else:
for line in self.lines:
yield from line
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
return Measurement(self.width, self.width)
class PagerApp(App):
"""App to scroll renderable"""
def __init__(
self,
*args,
content=None,
**kwargs,
) -> None:
self.content = content
super().__init__(*args, **kwargs)
async def on_load(self, event: events.Load) -> None:
await self.bind("q", "quit", "Quit")
async def on_key(self, event: events.Key) -> None:
if event.key == "j":
self.body.scroll_up()
elif event.key == "k":
self.body.scroll_down()
elif event.key == " ":
self.body.page_down()
elif event.key == "ctrl+u":
self.body.target_y -= self.body.size.height // 2
self.body.animate("y", self.body.target_y, easing="out_cubic")
elif event.key == "ctrl+d":
self.body.target_y += self.body.size.height // 2
self.body.animate("y", self.body.target_y, easing="out_cubic")
async def on_mount(self, event: events.Mount) -> None:
self.body = body = ScrollView(auto_width=True)
await self.view.dock(body)
await body.focus()
await body.update(self.content) | /rich_cli-1.8.0-py3-none-any.whl/rich_cli/pager.py | 0.84869 | 0.224406 | pager.py | pypi |
# rich-click
**Format [click](https://click.palletsprojects.com/) help output nicely with [Rich](https://github.com/Textualize/rich).**
- Click is a _"Python package for creating beautiful command line interfaces"_.
- Rich is a _"Python library for rich text and beautiful formatting in the terminal"_.
The intention of `rich-click` is to provide attractive help output from
click, formatted with rich, with minimal customisation required.
## Features
- 🌈 Rich command-line formatting of click help and error messages
- 💫 Nice styles be default, usage is simply `import rich_click as click`
- 💻 CLI tool to run on _other people's_ tools (prefix the command with `rich-click`)
- 🎁 Group commands and options into named panels
- ❌ Well formatted error messages
- 🔢 Easily give custom sort order for options and commands
- 🎨 Extensive customisation of styling and behaviour possible

_Screenshot from [`examples/03_groups_sorting.py`](examples/03_groups_sorting.py)_
## Installation
You can install `rich-click` from the [Python Package Index (PyPI)](https://pypi.org/project/rich-click/) with `pip` or equivalent.
```bash
python -m pip install rich-click
```
Conda users can find `rich-click` on [conda forge](https://anaconda.org/conda-forge/rich-click).
Just set up conda to use conda-forge (see [docs](https://conda-forge.org/docs/user/introduction.html#how-can-i-install-packages-from-conda-forge)) then run:
```bash
conda install rich-click
```
Users on macOS can install `rich-click` via [MacPorts](https://ports.macports.org/port/py-rich-click/).
```bash
sudo port install py-rich-click
```
Note that rich-click requires `click>=7` but formatted subcommands (groups) only work with `click>=8`. With v7 the output simply reverts to default click output.
## Usage
### Import as click
To use `rich-click`, switch out your normal `click` import with `rich-click`, using the same namespace:
```python
import rich_click as click
```
That's it ✨ Then continue to use `click` as you would normally.
> See [`examples/01_simple.py`](examples/01_simple.py) for an example.
The intention is to maintain most / all of the normal click functionality and arguments.
If you spot something that breaks or is missing once you start using the plugin, please create an issue about it.
### Declarative
If you prefer, you can `RichGroup` or `RichCommand` with the `cls` argument in your click usage instead.
This means that you can continue to use the unmodified `click` package in parallel.
> See [`examples/02_declarative.py`](examples/02_declarative.py) for an example.
### Command-line usage
`rich-click` comes with a CLI tool that allows you to format the click help output from _any_ package.
As long as that tool is using click and isn't already passing custom `cls` objects, it should work.
However, please consider it an experimental feature at this point.
To use, simply prefix to your normal command.
For example, to get richified click help text from a package called `awesometool`, you could run:
```console
$ rich-click awesometool --help
Usage: awesometool [OPTIONS]
..more richified output below..
```
### Patching
In some situations, you might be registering a command from another Click CLI that does not use Rich-Click:
```python
import rich_click as click
from some_library import another_cli
@click.group("my-cli")
def cli():
pass
# `another_cli` will NOT have Rich-Click markup. :(
cli.add_command(another_cli)
```
In this situation, `another_cli` retains its original behavior. In order to make `another_cli` work with Rich-Click, you need to patch `click` before you import `another_cli`. You can patch Click with `rich_click.cli.patch` like this:
```python
import rich_click as click
from rich_click.cli import patch
patch()
from some_library import another_cli # noqa: E402
@click.group("my-cli")
def cli():
pass
# `another_cli` will have Rich-Click markup. :)
cli.add_command(another_cli)
```
## Customisation
There are a large number of customisation options in rich-click.
These can be modified by changing variables in the `click.rich_click` namespace.
Note that most normal click options should still work, such as `show_default=True`, `required=True` and `hidden=True`.
> Note: All images below are auto-generated using another side-project of mine: [rich-codex](https://github.com/ewels/rich-codex). Pretty cool!
### Using rich markup
In order to be as widely compatible as possible with a simple import, rich-click does _not_ parse rich formatting markup (eg. `[red]`) by default. You need to opt-in to this behaviour.
To use rich markup in your help texts, add the following:
```python
click.rich_click.USE_RICH_MARKUP = True
```
Remember that you'll need to escape any regular square brackets using a back slash in your help texts,
for example: `[dim]\[my-default: foo][\]`

> See [`examples/04_rich_markup.py`](examples/04_rich_markup.py) for an example.
### Using Markdown
If you prefer, you can use Markdown text.
You must choose either Markdown or rich markup. If you specify both, Markdown takes preference.
```python
click.rich_click.USE_MARKDOWN = True
```

> See [`examples/05_markdown.py`](examples/05_markdown.py) for an example.
### Positional arguments
The default click behaviour is to only show positional arguments in the top usage string,
and not in the list below with the options.
If you prefer, you can tell rich-click to show arguments with `SHOW_ARGUMENTS`.
By default, they will get their own panel but you can tell rich-click to bundle them together with `GROUP_ARGUMENTS_OPTIONS`:
```python
click.rich_click.SHOW_ARGUMENTS = True
click.rich_click.GROUP_ARGUMENTS_OPTIONS = True
```

> See [`examples/06_arguments.py`](examples/06_arguments.py) for an example.
### Metavars and option choices
Metavars are click's way of showing expected input types.
For example, if you have an option that must be an integer, the metavar is `INTEGER`.
If you have a choice, the metavar is a list of the possible values.
By default, rich-click shows metavars in their own column.
However, if you have a long list of choices, this column can be quite wide and result in a lot of white space:

It may look better to show metavars appended to the help text, instead of in their own column.
For this, use the following:
```python
click.rich_click.SHOW_METAVARS_COLUMN = False
click.rich_click.APPEND_METAVARS_HELP = True
```

> See [`examples/08_metavars.py`](examples/08_metavars.py) for an example.
### Error messages
By default, rich-click gives some nice formatting to error messages:

You can customise the _Try 'command --help' for help._ message with `ERRORS_SUGGESTION`
using rich-click though, and add some text after the error with `ERRORS_EPILOGUE`.
For example, from [`examples/07_custom_errors.py`](examples/07_custom_errors.py):
```python
click.rich_click.STYLE_ERRORS_SUGGESTION = "magenta italic"
click.rich_click.ERRORS_SUGGESTION = "Try running the '--help' flag for more information."
click.rich_click.ERRORS_EPILOGUE = "To find out more, visit [link=https://mytool.com]https://mytool.com[/link]"
```

> See [`examples/07_custom_errors.py`](examples/07_custom_errors.py) for an example.
### Help width
The default behaviour of rich-click is to use the full width of the terminal for output.
However, if you've carefully crafted your help texts for the default narrow click output, you may find that you now have a lot of whitespace at the side of the panels.
To limit the maximum width of the help output, set `MAX_WIDTH` in characters, as follows:
```python
click.rich_click.MAX_WIDTH = 100
```
### Styling
Most aspects of rich-click formatting can be customised, from colours to alignment.
For example, to print the option flags in a different colour, you can use:
```python
click.rich_click.STYLE_OPTION = "magenta"
```
To add a blank line between rows of options, you can use:
```python
click.rich_click.STYLE_OPTIONS_TABLE_LEADING = 1
click.rich_click.STYLE_OPTIONS_TABLE_BOX = "SIMPLE"
```
You can make some really ~horrible~ _colourful_ solutions using these styles if you wish:
<!-- RICH-CODEX
extra_env:
TERMINAL_WIDTH: 160
-->

> See [`examples/10_table_styles.py`](examples/10_table_styles.py) for an example.
See the [_Configuration options_](#configuration-options) section below for the full list of available options.
## Groups and sorting
`rich-click` gives functionality to list options and subcommands in groups, printed as separate panels.
It accepts a list of options / commands which means you can also choose a custom sorting order.
- For options (flags), set `click.rich_click.OPTION_GROUPS`
- For subcommands (groups), set `click.rich_click.COMMAND_GROUPS`

> See [`examples/03_groups_sorting.py`](examples/03_groups_sorting.py) for a full example.
### Options
To group option flags into two sections with custom names, see the following example:
```python
click.rich_click.OPTION_GROUPS = {
"mytool": [
{
"name": "Simple options",
"options": ["--name", "--description", "--version", "--help"],
},
{
"name": "Advanced options",
"options": ["--force", "--yes", "--delete"],
},
]
}
```
If you omit `name` it will use `Commands` (can be configured with `OPTIONS_PANEL_TITLE`).
### Commands
Here we create two groups of commands for the base command of `mytool`.
Any subcommands not listed will automatically be printed in a panel at the end labelled "Commands" as usual.
```python
click.rich_click.COMMAND_GROUPS = {
"mytool": [
{
"name": "Commands for uploading",
"commands": ["sync", "upload"],
},
{
"name": "Download data",
"commands": ["get", "fetch", "download"],
},
]
}
```
If you omit `name` it will use `Commands` (can be configured with `COMMANDS_PANEL_TITLE`).
### Multiple commands
If you use multiple nested subcommands, you can specify their commands using the top-level dictionary keys:
```python
click.rich_click.COMMAND_GROUPS = {
"mytool": [{"commands": ["sync", "auth"]}],
"mytool sync": [
{
"name": "Commands for uploading",
"commands": ["sync", "upload"],
},
{
"name": "Download data",
"commands": ["get", "fetch", "download"],
},
],
"mytool auth":[{"commands": ["login", "logout"]}],
}
```
### Table styling
Typically you would style the option / command tables using the global config options.
However, if you wish you may style tables on a per-group basis using the `table_styles` key:
```python
click.rich_click.COMMAND_GROUPS = {
"mytool": [
{
"commands": ["sync", "auth"],
"table_styles": {
"show_lines": True,
"row_styles": ["magenta", "yellow", "cyan", "green"],
"border_style": "red",
"box": "DOUBLE",
},
},
],
}
```
The available keys are: `show_lines`, `leading`, `box`, `border_style`, `row_styles`, `pad_edge`, `padding`.
## Configuration options
Here is the full list of config options:
```python
# Default styles
STYLE_OPTION = "bold cyan"
STYLE_ARGUMENT = "bold cyan"
STYLE_SWITCH = "bold green"
STYLE_METAVAR = "bold yellow"
STYLE_METAVAR_APPEND = "dim yellow"
STYLE_METAVAR_SEPARATOR = "dim"
STYLE_HEADER_TEXT = ""
STYLE_FOOTER_TEXT = ""
STYLE_USAGE = "yellow"
STYLE_USAGE_COMMAND = "bold"
STYLE_DEPRECATED = "red"
STYLE_HELPTEXT_FIRST_LINE = ""
STYLE_HELPTEXT = "dim"
STYLE_OPTION_HELP = ""
STYLE_OPTION_DEFAULT = "dim"
STYLE_OPTION_ENVVAR = "dim yellow"
STYLE_REQUIRED_SHORT = "red"
STYLE_REQUIRED_LONG = "dim red"
STYLE_OPTIONS_PANEL_BORDER = "dim"
ALIGN_OPTIONS_PANEL = "left"
STYLE_OPTIONS_TABLE_SHOW_LINES = False
STYLE_OPTIONS_TABLE_LEADING = 0
STYLE_OPTIONS_TABLE_PAD_EDGE = False
STYLE_OPTIONS_TABLE_PADDING = (0, 1)
STYLE_OPTIONS_TABLE_BOX = ""
STYLE_OPTIONS_TABLE_ROW_STYLES = None
STYLE_OPTIONS_TABLE_BORDER_STYLE = None
STYLE_COMMANDS_PANEL_BORDER = "dim"
ALIGN_COMMANDS_PANEL = "left"
STYLE_COMMANDS_TABLE_SHOW_LINES = False
STYLE_COMMANDS_TABLE_LEADING = 0
STYLE_COMMANDS_TABLE_PAD_EDGE = False
STYLE_COMMANDS_TABLE_PADDING = (0, 1)
STYLE_COMMANDS_TABLE_BOX = ""
STYLE_COMMANDS_TABLE_ROW_STYLES = None
STYLE_COMMANDS_TABLE_BORDER_STYLE = None
STYLE_ERRORS_PANEL_BORDER = "red"
ALIGN_ERRORS_PANEL = "left"
STYLE_ERRORS_SUGGESTION = "dim"
STYLE_ABORTED = "red"
MAX_WIDTH = None # Set to an int to limit to that many characters
COLOR_SYSTEM = "auto" # Set to None to disable colors
# Fixed strings
HEADER_TEXT = None
FOOTER_TEXT = None
DEPRECATED_STRING = "(Deprecated) "
DEFAULT_STRING = "[default: {}]"
ENVVAR_STRING = "[env var: {}]"
REQUIRED_SHORT_STRING = "*"
REQUIRED_LONG_STRING = "[required]"
RANGE_STRING = " [{}]"
APPEND_METAVARS_HELP_STRING = "({})"
ARGUMENTS_PANEL_TITLE = "Arguments"
OPTIONS_PANEL_TITLE = "Options"
COMMANDS_PANEL_TITLE = "Commands"
ERRORS_PANEL_TITLE = "Error"
ERRORS_SUGGESTION = None # Default: Try 'cmd -h' for help. Set to False to disable.
ERRORS_EPILOGUE = None
ABORTED_TEXT = "Aborted."
# Behaviours
SHOW_ARGUMENTS = False # Show positional arguments
SHOW_METAVARS_COLUMN = True # Show a column with the option metavar (eg. INTEGER)
APPEND_METAVARS_HELP = False # Append metavar (eg. [TEXT]) after the help text
GROUP_ARGUMENTS_OPTIONS = False # Show arguments with options instead of in own panel
USE_MARKDOWN = False # Parse help strings as markdown
USE_MARKDOWN_EMOJI = True # Parse emoji codes in markdown :smile:
USE_RICH_MARKUP = False # Parse help strings for rich markup (eg. [red]my text[/])
COMMAND_GROUPS = {} # Define sorted groups of panels to display subcommands
OPTION_GROUPS = {} # Define sorted groups of panels to display options and arguments
USE_CLICK_SHORT_HELP = False # Use click's default function to truncate help text
```
## Contributing
Contributions and suggestions for new features are welcome, as are bug reports!
Please create a new [issue](https://github.com/ewels/rich-click/issues)
or better still, dive right in with a pull-request.
### Local setup
1. Create a new venv with a python3.7+ interpreter using `python3 -m venv venv`
2. Activate the venv with `source venv/bin/activate`
3. Install our the package as an editable including all dev dependencies with `pip3 install -e ."[dev]"`
4. Install pre-commit with `pre-commit install`
#### Pre-commit
Our pre-commit hooks contain the following hooks:
- [Prettier](https://prettier.io/): formats our markdown and yaml files nicely.
- no relative imports: prevents you from using relative imports.
- [iSort](https://pycqa.github.io/isort/): will automatically sort the imports alphabetically.
- [black](https://black.readthedocs.io/): will automatically format your code to be according to standardized python format.
- [flake8](https://flake8.pycqa.org/): will do linting checks to make sure all your code is correctly styled and used.
- [mypy](http://mypy-lang.org/): static type checker which verifies you are not using objects incorrectly.
As mentioned, some of these tools automatically fix your code while other only highlight potential issues.
Sometimes it will be enough to try to commit a second time and it will pass, while other times it may require
manual changes to your code.
In rare cases it may be difficult or undesirable to change to code to pass the linting rules.
If this happens, it's ok to add a flake8 `# noqa` or mypy `# type: ignore` comment to skip that line.
For details of how to do this, please see the [flake8 docs](https://flake8.pycqa.org/en/3.1.1/user/ignoring-errors.html#in-line-ignoring-errors)
and [mypy docs](https://mypy.readthedocs.io/en/stable/common_issues.html#spurious-errors-and-locally-silencing-the-checker).
## Credits
This package was written by Phil Ewels ([@ewels](http://github.com/ewels/)),
based on initial code by Will McGugan ([@willmcgugan](https://github.com/willmcgugan)).
Furthermore, these contributors helped make the package what it is today:
- [@harens](http://github.com/harens/)
- [@fridex](http://github.com/fridex/)
- [@pawamoy](http://github.com/pawamoy/)
- [@jorrick](http://github.com/harens/)
| /rich-click-1.6.1.tar.gz/rich-click-1.6.1/README.md | 0.453504 | 0.929792 | README.md | pypi |
import time
from contextlib import contextmanager
import pandas as pd
from rich import print
from rich.box import MINIMAL, SIMPLE, SIMPLE_HEAD, SQUARE
from rich.columns import Columns
from rich.console import Console
from rich.live import Live
from rich.measure import Measurement
from rich.table import Table
console = Console()
BEAT_TIME = 0.008
COLORS = ["cyan", "magenta", "red", "green", "blue", "purple"]
@contextmanager
def beat(length: int = 1) -> None:
with console:
yield
time.sleep(length * BEAT_TIME)
class DataFramePrettify:
"""Create animated and pretty Pandas DataFrame
Parameters
----------
df : pd.DataFrame
The data you want to prettify
row_limit : int, optional
Number of rows to show, by default 20
col_limit : int, optional
Number of columns to show, by default 10
first_rows : bool, optional
Whether to show first n rows or last n rows, by default True. If this is set to False, show last n rows.
first_cols : bool, optional
Whether to show first n columns or last n columns, by default True. If this is set to False, show last n rows.
delay_time : int, optional
How fast is the animation, by default 5. Increase this to have slower animation.
clear_console: bool, optional
Clear the console before printing the table, by default True. If this is set to False the previous console input/output is maintained
"""
def __init__(
self,
df: pd.DataFrame,
row_limit: int = 20,
col_limit: int = 10,
first_rows: bool = True,
first_cols: bool = True,
delay_time: int = 5,
clear_console: bool = True,
) -> None:
self.df = df.reset_index().rename(columns={"index": ""})
self.table = Table(show_footer=False)
self.table_centered = Columns(
(self.table,), align="center", expand=True
)
self.num_colors = len(COLORS)
self.delay_time = delay_time
self.row_limit = row_limit
self.first_rows = first_rows
self.col_limit = col_limit
self.first_cols = first_cols
self.clear_console = clear_console
if first_cols:
self.columns = self.df.columns[:col_limit]
else:
self.columns = list(self.df.columns[-col_limit:])
self.columns.insert(0, "index")
if first_rows:
self.rows = self.df.values[:row_limit]
else:
self.rows = self.df.values[-row_limit:]
if self.clear_console:
console.clear()
def _add_columns(self):
for col in self.columns:
with beat(self.delay_time):
self.table.add_column(str(col))
def _add_rows(self):
for row in self.rows:
with beat(self.delay_time):
if self.first_cols:
row = row[: self.col_limit]
else:
row = row[-self.col_limit :]
row = [str(item) for item in row]
self.table.add_row(*list(row))
def _move_text_to_right(self):
for i in range(len(self.table.columns)):
with beat(self.delay_time):
self.table.columns[i].justify = "right"
def _add_random_color(self):
for i in range(len(self.table.columns)):
with beat(self.delay_time):
self.table.columns[i].header_style = COLORS[
i % self.num_colors
]
def _add_style(self):
for i in range(len(self.table.columns)):
with beat(self.delay_time):
self.table.columns[i].style = (
"bold " + COLORS[i % self.num_colors]
)
def _adjust_box(self):
for box in [SIMPLE_HEAD, SIMPLE, MINIMAL, SQUARE]:
with beat(self.delay_time):
self.table.box = box
def _dim_row(self):
with beat(self.delay_time):
self.table.row_styles = ["none", "dim"]
def _adjust_border_color(self):
with beat(self.delay_time):
self.table.border_style = "bright_yellow"
def _change_width(self):
original_width = Measurement.get(console, self.table).maximum
width_ranges = [
[original_width, console.width, 2],
[console.width, original_width, -2],
[original_width, 90, -2],
[90, original_width + 1, 2],
]
for width_range in width_ranges:
for width in range(*width_range):
with beat(self.delay_time):
self.table.width = width
with beat(self.delay_time):
self.table.width = None
def _add_caption(self):
if self.first_rows:
row_text = "first"
else:
row_text = "last"
if self.first_cols:
col_text = "first"
else:
col_text = "last"
with beat(self.delay_time):
self.table.caption = f"Only the {row_text} {self.row_limit} rows and the {col_text} {self.col_limit} columns is shown here."
with beat(self.delay_time):
self.table.caption = f"Only the [bold green] {row_text} {self.row_limit} rows[/bold green] and the [bold red]{self.col_limit} {col_text} columns[/bold red] is shown here."
with beat(self.delay_time):
self.table.caption = f"Only the [bold magenta not dim] {row_text} {self.row_limit} rows[/bold magenta not dim] and the [bold green not dim]{col_text} {self.col_limit} columns[/bold green not dim] are shown here."
def prettify(self):
with Live(
self.table_centered,
console=console,
refresh_per_second=self.delay_time,
vertical_overflow="ellipsis",
):
self._add_columns()
self._add_rows()
self._move_text_to_right()
self._add_random_color()
self._add_style()
# self._adjust_box()
self._adjust_border_color()
# self._change_width()
self._add_caption()
return self.table
def prettify(
df: pd.DataFrame,
row_limit: int = 20,
col_limit: int = 10,
first_rows: bool = True,
first_cols: bool = True,
delay_time: int = 5,
clear_console: bool = True,
):
"""Create animated and pretty Pandas DataFrame
Parameters
----------
df : pd.DataFrame
The data you want to prettify
row_limit : int, optional
Number of rows to show, by default 20
col_limit : int, optional
Number of columns to show, by default 10
first_rows : bool, optional
Whether to show first n rows or last n rows, by default True. If this is set to False, show last n rows.
first_cols : bool, optional
Whether to show first n columns or last n columns, by default True. If this is set to False, show last n rows.
delay_time : int, optional
How fast is the animation, by default 5. Increase this to have slower animation.
clear_console: bool, optional
Clear the console before priting the table, by default True. If this is set to false the previous console input/output is maintained
"""
if isinstance(df, pd.DataFrame) or isinstance(df, pd.DataFrame):
DataFramePrettify(
df, row_limit, col_limit, first_rows, first_cols, delay_time,clear_console
).prettify()
else:
# In case users accidentally pass a non-datafame input, use rich's print instead
print(df) | /rich-dataframe-0.2.0.tar.gz/rich-dataframe-0.2.0/rich_dataframe/rich_dataframe.py | 0.618435 | 0.378172 | rich_dataframe.py | pypi |
import logging
from dataclasses import dataclass
from itertools import islice
from queue import Queue
from signal import SIGWINCH
from typing import Generic, Iterable, List, Optional, Set, TypeVar
from more_itertools import mark_ends
from prompt_toolkit.key_binding import KeyPress
from prompt_toolkit.keys import Keys
from returns.result import safe
from rich.console import Console, ConsoleOptions, RenderResult
from rich.style import Style
from rich.table import Column, Table
from rich_elm import events
from rich_elm.events import Signal
logger = logging.getLogger(__name__)
T = TypeVar("T")
def saturating_add(i: int, a: int, max: int) -> int:
if (sum := i + a) > max:
return max
return sum
def saturating_sub(i: int, s: int, min: int) -> int:
if (sum := i - s) < min:
return min
return sum
def max_index(l: List):
return len(l) - 1
@dataclass
class Select(Generic[T]):
item: T
selected: bool = False
def toggle(self):
self.selected = not self.selected
@dataclass
class Cursor(Generic[T]):
items: List[T]
cursor: int = 0
@classmethod
def from_iterable(cls, i: Iterable[T]) -> "Cursor":
return Cursor(items=list(i))
def bump_up(self):
self.cursor = saturating_sub(self.cursor, 1, 0)
def bump_down(self):
self.cursor = saturating_add(self.cursor, 1, max_index(self.items))
def jump_to_top(self):
self.cursor = 0
def jump_to_bottom(self):
self.cursor = max_index(self.items)
def current(self) -> T:
return self.items[self.cursor]
@dataclass
class ListSelectRender:
data: Cursor[Select[str]]
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
logger.info(f"{len(self.data.items)=}, {self.data.cursor=}")
table = Table(
*(
Column(header=name, no_wrap=True, min_width=1)
for name in ["scrollbar", "toggle", "text"]
),
box=None,
show_header=False,
)
if self.data.cursor >= options.max_height:
# v O ...
# v O ...
# v O ... max_height = 3
# O ...
# X ... cursor = 4
start = (self.data.cursor - options.max_height) + 1
else:
start = 0
for is_first, is_last, (i, candidate) in mark_ends(
islice(enumerate(self.data.items), start, start + options.max_height)
):
if is_first and not is_last:
if i == 0:
scrollbar = "■"
else:
scrollbar = "▲"
elif is_first and is_last:
scrollbar = "■"
elif is_last:
if i == max_index(self.data.items):
scrollbar = "■"
else:
scrollbar = "▼"
else:
scrollbar = "|"
if candidate.selected:
toggled = "+"
else:
toggled = " "
if i == self.data.cursor:
style = Style(bgcolor="white", color="black")
else:
style = None
table.add_row(scrollbar, toggled, candidate.item, style=style)
return table.__rich_console__(console, options)
if __name__ == "__main__":
from logging import FileHandler
logger.addHandler(FileHandler("list-view.log", mode="w"))
logger.setLevel(logging.DEBUG)
@safe(exceptions=(KeyboardInterrupt,)) # type: ignore
def list_viewer_safe(candidates: Iterable[str]) -> Set[str]:
queue: "Queue[KeyPress | Signal]" = Queue()
with Console(stderr=True).screen() as ctx, events.for_signals(
SIGWINCH, queue=queue
), events.for_stdin(queue=queue):
console: Console = ctx.console
state: Cursor[Select[str]] = Cursor.from_iterable(
Select(c) for c in candidates
)
console.update_screen(ListSelectRender(state)) # Initial display
while event := queue.get():
if isinstance(event, Signal):
console.update_screen(ListSelectRender(state)) # Redraw on resize
elif isinstance(event.key, Keys):
if event.key == Keys.Up or event.key == Keys.Left:
state.bump_up()
elif event.key == Keys.Down or event.key == Keys.Right:
state.bump_down()
elif event.key == Keys.Tab:
state.items[state.cursor].toggle()
elif event.key == Keys.Home:
state.jump_to_top()
elif event.key == Keys.End:
state.jump_to_bottom()
elif event.key == Keys.Enter:
return set(
candidate.item
for candidate in state.items
if candidate.selected
)
else:
raise NotImplementedError(event)
console.update_screen(ListSelectRender(state))
elif isinstance(event.key, str):
if event.key == "q":
return set(
candidate.item
for candidate in state.items
if candidate.selected
)
def list_viewer(candidates: Iterable[str]) -> Optional[Set[str]]:
return list_viewer_safe(candidates).value_or(None)
print(
list_viewer(
[
"The Zen of Python, by Tim Peters",
"Beautiful is better than ugly.",
"Explicit is better than implicit.",
"Simple is better than complex.",
"Complex is better than complicated.",
"Flat is better than nested.",
"Sparse is better than dense.",
"Readability counts.",
"Special cases aren't special enough to break the rules.",
"Although practicality beats purity.",
"Errors should never pass silently.",
"Unless explicitly silenced.",
"In the face of ambiguity, refuse the temptation to guess.",
"There should be one-- and preferably only one --obvious way to do it.",
"Although that way may not be obvious at first unless you're Dutch.",
"Now is better than never.",
"Although never is often better than *right* now.",
"If the implementation is hard to explain, it's a bad idea.",
"If the implementation is easy to explain, it may be a good idea.",
"Namespaces are one honking great idea -- let's do more of those!",
]
)
) | /rich_elm-0.2.0-py3-none-any.whl/rich_elm/list_select.py | 0.696371 | 0.193547 | list_select.py | pypi |
import sys
from typing import IO, Any, Callable, Dict, Optional, Tuple
import spacy
from spacy import Errors, registry
from tqdm import tqdm
from .table_printer import RichTablePrinter
@registry.loggers("rich-logger")
def rich_logger(
progress_bar: bool = False,
) -> Callable[
[spacy.Language],
Tuple[Callable[[Optional[Dict[str, Any]]], None], Callable[[], None]],
]:
"""
A rich based logger that renders nicely in Jupyter notebooks and console
Parameters
----------
progress_bar: bool
Whether to show a training progress bar or not
Returns
-------
Tuple[Callable[[Optional[Dict[str, Any]]], None], Callable[[], None]]]
"""
def setup_printer(
nlp: spacy.Language, stdout: IO = sys.stdout, stderr: IO = sys.stderr
) -> Tuple[Callable[[Optional[Dict[str, Any]]], None], Callable[[], None]]:
# ensure that only trainable components are logged
logged_pipes = [
name
for name, proc in nlp.pipeline
if hasattr(proc, "is_trainable") and proc.is_trainable
]
eval_frequency = nlp.config["training"]["eval_frequency"]
score_weights = nlp.config["training"]["score_weights"]
score_cols = [
col for col, value in score_weights.items() if value is not None
] + ["speed"]
fields = {"epoch": {}, "step": {}}
for pipe in logged_pipes:
fields[f"loss_{pipe}"] = {
"format": "{0:.2f}",
"name": f"Loss {pipe}".upper(),
"goal": "lower_is_better",
}
for score, weight in score_weights.items():
if score != "speed" and weight is not None:
fields[score] = {
"format": "{0:.2f}",
"name": score.upper(),
"goal": "higher_is_better",
}
fields["speed"] = {"name": "WPS"}
fields["duration"] = {"name": "DURATION"}
table_printer = RichTablePrinter(fields=fields)
table_printer.hijack_tqdm()
progress: Optional[tqdm] = None
last_seconds = 0
def log_step(info: Optional[Dict[str, Any]]) -> None:
nonlocal progress, last_seconds
if info is None:
# If we don't have a new checkpoint, just return.
if progress is not None:
progress.update(1)
return
data = {
"epoch": info["epoch"],
"step": info["step"],
}
for pipe in logged_pipes:
data[f"loss_{pipe}"] = float(info["losses"][pipe])
for col in score_cols:
score = info["other_scores"].get(col, 0.0)
try:
score = float(score)
except TypeError:
err = Errors.E916.format(name=col, score_type=type(score))
raise ValueError(err) from None
if col != "speed":
score *= 100
data[col] = score
data["duration"] = info["seconds"] - last_seconds
last_seconds = info["seconds"]
table_printer.log(data)
if progress_bar:
# Set disable=None, so that it disables on non-TTY
progress = tqdm(
total=eval_frequency, disable=None, leave=False, file=stderr
)
progress.set_description(f"Epoch {info['epoch'] + 1}")
def finalize() -> None:
table_printer.finalize()
return log_step, finalize
return setup_printer | /rich_logger-0.3.1-py3-none-any.whl/rich_logger/spacy_logger.py | 0.680454 | 0.305529 | spacy_logger.py | pypi |
import sys
import os
from typing import Dict, List, Optional
import rich
from rich.padding import Padding, PaddingDimensions
from rich.table import Table
from rich.style import Style
from rich.styled import Styled
from rich.text import Text
from rich.console import Console, ConsoleOptions, RenderResult
__version__ = "0.1.0"
__author__ = "Martin Larralde <martin.larralde@embl.de>"
__license__ = "MIT"
@rich.repr.auto
class RichAlignment:
"""A `rich` renderable object to display a multiple sequence alignment."""
_DEFAULT = Style(color="gray30", bold=True)
_STYLES = {
l: Style(color=c, bold=True)
for ls, c in [
("AVFPMILW", "red"),
("DE", "blue"),
("RK", "purple"),
("STYHCNGQ", "green"),
]
for l in ls
}
def __init__(
self,
names: List[str],
sequences: List[str],
*,
gap_character: str = "-",
max_name_width: int = 10,
padding: PaddingDimensions = (1, 2, 1, 2),
styles: Optional[Dict[str, Style]] = None,
default_style: Optional[Style] = None,
) -> None:
"""Create a new `RichAlignment` object.
Arguments:
names (`list` of `str`): A list of sequence names to show.
sequences (`list` of `str`): A list of aligned sequences to show.
Keyword Arguments:
gap_character (`str`): The character to treat as a gap in the
alignment, used for counting the offset in the coordinates
column.
max_name_width (`int`): The maximum number of characters to
display in the sequence name column.
padding (`rich.padding.PaddingDimensions`): The padding for the
sequence blocks. Vertical padding will be used between
blocks, and horizontal padding will be used between columns.
styles (`dict` of `rich.style.Style`): A dictionary mapping
individual sequence characters to the style they should be
rendered with.
default_style (`rich.style.Style`): The default style to use
for unknown characters.
"""
if len(names) != len(sequences):
raise ValueError("`names` and `sequences` must have the same length")
if max_name_width <= 0:
raise ValueError("`max_name_width` must be strictly positive")
if sequences and not all(len(seq) == len(sequences[0]) for seq in sequences):
raise ValueError("All strings in `sequences` must have the same length")
self.names = names
self.sequences = sequences
self.sequence_length = len(self.sequences[0]) if sequences else 0
self.gap_character = gap_character
self.max_name_width = max_name_width
self.padding = Padding.unpack(padding)
self.styles = self._STYLES.copy() if styles is None else styles
self.default_style = self._DEFAULT if default_style is None else default_style
def __rich_console__(
self,
console: Console,
options: ConsoleOptions,
) -> RenderResult:
# compute width of the columns so that we know how to wrap the sequences
length_width = len(str(self.sequence_length))
name_width = min(self.max_name_width, max(map(len, self.names)))
if options.no_wrap:
block_length = self.sequence_length
else:
block_length = (
options.max_width - name_width - length_width - self.padding[1] * 3
)
# create a grid to store the different blocks of wrapped sequences
grid = Table.grid(padding=(self.padding[0], 0, self.padding[2], 0), pad_edge=False)
grid.add_column(width=options.max_width, no_wrap=True)
for block_pos in range(0, self.sequence_length, block_length):
# create the grid with the current sequence block
table = Table.grid(
padding=(0, self.padding[1], 0, self.padding[3]),
pad_edge=False,
)
table.add_column(width=name_width, no_wrap=True, overflow="ellipsis")
table.add_column(width=length_width, no_wrap=True, justify="right")
table.add_column(width=block_length, no_wrap=True)
# add each sequence to the block
for name, characters in zip(self.names, self.sequences):
offset = (
block_pos - characters[:block_pos].count(self.gap_character) + 1
)
letters = [
(letter, self.styles.get(letter, self.default_style))
for letter in characters[block_pos : block_pos + block_length]
]
cell_name = name[:name_width-1] + "…" if len(name) > self.max_name_width else name
table.add_row(
cell_name,
Styled(str(offset), rich.style.Style(bold=True, color="cyan")),
Text.assemble(*letters, no_wrap=True),
)
# add the block to the grid
grid.add_row(table)
# render the grid
yield grid | /rich-msa-0.1.0.tar.gz/rich-msa-0.1.0/rich_msa/__init__.py | 0.798265 | 0.308236 | __init__.py | pypi |
from __future__ import annotations
from pathlib import Path, PurePath
from typing import Iterable, Mapping, Tuple, Union, Optional, List
from PIL import Image as PILImageModule
from PIL.Image import Image
from PIL.Image import Resampling
from rich.console import Console, ConsoleOptions, RenderResult
from rich.segment import Segment, Segments
from rich.style import Style
class Pixels:
def __init__(self) -> None:
self._segments: Segments | None = None
@staticmethod
def from_image(
image: Image,
):
segments = Pixels._segments_from_image(image)
return Pixels.from_segments(segments)
@staticmethod
def from_image_path(
path: Union[PurePath, str],
resize: Optional[Tuple[int, int]] = None,
) -> Pixels:
"""Create a Pixels object from an image. Requires 'image' extra dependencies.
Args:
path: The path to the image file.
resize: A tuple of (width, height) to resize the image to.
"""
with PILImageModule.open(Path(path)) as image:
segments = Pixels._segments_from_image(image, resize)
return Pixels.from_segments(segments)
@staticmethod
def _segments_from_image(
image: Image, resize: Optional[Tuple[int, int]] = None
) -> list[Segment]:
if resize:
image = image.resize(resize, resample=Resampling.NEAREST)
width, height = image.width, image.height
rgba_image = image.convert("RGBA")
get_pixel = rgba_image.getpixel
parse_style = Style.parse
null_style = Style.null()
segments = []
for y in range(height):
this_row: List[Segment] = []
row_append = this_row.append
for x in range(width):
r, g, b, a = get_pixel((x, y))
style = parse_style(f"on rgb({r},{g},{b})") if a > 0 else null_style
row_append(Segment(" ", style))
row_append(Segment("\n", null_style))
# TODO: Double-check if this is required - I've forgotten...
if not all(t[1] == "" for t in this_row[:-1]):
segments += this_row
return segments
@staticmethod
def from_segments(
segments: Iterable[Segment],
) -> Pixels:
"""Create a Pixels object from an Iterable of Segments instance."""
pixels = Pixels()
pixels._segments = Segments(segments)
return pixels
@staticmethod
def from_ascii(
grid: str, mapping: Optional[Mapping[str, Segment]] = None
) -> Pixels:
"""
Create a Pixels object from a 2D-grid of ASCII characters.
Each ASCII character can be mapped to a Segment (a character and style combo),
allowing you to add a splash of colour to your grid.
Args:
grid: A 2D grid of characters (a multi-line string).
mapping: Maps ASCII characters to Segments. Occurrences of a character
will be replaced with the corresponding Segment.
"""
if mapping is None:
mapping = {}
if not grid:
return Pixels.from_segments([])
segments = []
for character in grid:
segment = mapping.get(character, Segment(character))
segments.append(segment)
return Pixels.from_segments(segments)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
yield self._segments or ""
if __name__ == "__main__":
console = Console()
images_path = Path(__file__).parent / "../tests/.sample_data/images"
pixels = Pixels.from_image_path(images_path / "bulbasaur.png")
console.print(pixels)
grid = """\
xx xx
ox ox
Ox Ox
xx xx
xxxxxxxxxxxxxxxxx
"""
mapping = {
"x": Segment(" ", Style.parse("yellow on yellow")),
"o": Segment(" ", Style.parse("on white")),
"O": Segment("O", Style.parse("white on blue")),
}
pixels = Pixels.from_ascii(grid, mapping)
console.print(pixels) | /rich_pixels-2.1.1-py3-none-any.whl/rich_pixels/_pixel.py | 0.866557 | 0.343727 | _pixel.py | pypi |
import argparse
import logging
import os
import sys
from rich.progress import track
import pygount.analysis
import pygount.common
import pygount.write
#: Valid formats for option --format.
VALID_OUTPUT_FORMATS = ("cloc-xml", "sloccount", "summary", "rich")
_DEFAULT_ENCODING = "automatic"
_DEFAULT_OUTPUT_FORMAT = "sloccount"
_DEFAULT_OUTPUT = "STDOUT"
_DEFAULT_SOURCE_PATTERNS = os.curdir
_DEFAULT_SUFFIXES = "*"
_HELP_ENCODING = '''encoding to use when reading source code; use "automatic"
to take BOMs, XML prolog and magic headers into account and fall back to
UTF-8 or CP1252 if none fits; use "automatic;<fallback>" to specify a
different fallback encoding than CP1252; use "chardet" to let the chardet
package determine the encoding; default: "%(default)s"'''
_HELP_EPILOG = """SHELL-PATTERN is a pattern using *, ? and ranges like [a-z]
as placeholders. PATTERNS is a comma separated list of SHELL-PATTERN. The
prefix [regex] indicated that the PATTERNS use regular expression syntax. If
default values are available, [...] indicates that the PATTERNS extend the
existing default values."""
_HELP_FORMAT = 'output format, one of: {0}; default: "%(default)s"'.format(
", ".join(['"' + format + '"' for format in VALID_OUTPUT_FORMATS])
)
_HELP_GENERATED = """comma separated list of regular expressions to detect
generated code; default: %(default)s"""
_HELP_FOLDERS_TO_SKIP = """comma separated list of glob patterns for folder
names not to analyze. Use "..." as first entry to append patterns to the
default patterns; default: %(default)s"""
_HELP_NAMES_TO_SKIP = """comma separated list of glob patterns for file names
not to analyze. Use "..." as first entry to append patterns to the default
patterns; default: %(default)s"""
_HELP_SUFFIX = '''limit analysis on files matching any suffix in comma
separated LIST; shell patterns are possible; example: "py,sql"; default:
"%(default)s"'''
_OUTPUT_FORMAT_TO_WRITER_CLASS_MAP = {
"cloc-xml": pygount.write.ClocXmlWriter,
"sloccount": pygount.write.LineWriter,
"summary": pygount.write.SummaryWriter,
"rich": pygount.write.RichWriter,
}
assert set(VALID_OUTPUT_FORMATS) == set(_OUTPUT_FORMAT_TO_WRITER_CLASS_MAP.keys())
_log = logging.getLogger("pygount")
def _check_encoding(name, encoding_to_check, alternative_encoding, source=None):
"""
Check that ``encoding`` is a valid Python encoding
:param name: name under which the encoding is known to the user, e.g. 'default encoding'
:param encoding_to_check: name of the encoding to check, e.g. 'utf-8'
:param source: source where the encoding has been set, e.g. option name
:raise pygount.common.OptionError if ``encoding`` is not a valid Python encoding
"""
assert name is not None
if encoding_to_check not in (alternative_encoding, "chardet", None):
try:
"".encode(encoding_to_check)
except LookupError:
raise pygount.common.OptionError(
'{0} is "{1}" but must be "{2}" or a known Python encoding'.format(
name, encoding_to_check, alternative_encoding
),
source,
)
class Command:
"""
Command interface for pygount, where options starting with defaults can
gradually be set and finally :py:meth:`execute()`.
"""
def __init__(self):
self.set_encodings(_DEFAULT_ENCODING)
self._folders_to_skip = pygount.common.regexes_from(pygount.analysis.DEFAULT_FOLDER_PATTERNS_TO_SKIP_TEXT)
self._generated_regexs = pygount.common.regexes_from(pygount.analysis.DEFAULT_GENERATED_PATTERNS_TEXT)
self._has_duplicates = False
self._has_summary = False
self._is_verbose = False
self._names_to_skip = pygount.common.regexes_from(pygount.analysis.DEFAULT_NAME_PATTERNS_TO_SKIP_TEXT)
self._output = _DEFAULT_OUTPUT
self._output_format = _DEFAULT_OUTPUT_FORMAT
self._source_patterns = _DEFAULT_SOURCE_PATTERNS
self._suffixes = pygount.common.regexes_from(_DEFAULT_SUFFIXES)
def set_encodings(self, encoding, source=None):
encoding_is_chardet = (encoding == "chardet") or (encoding.startswith("chardet;"))
if encoding_is_chardet and not pygount.analysis.has_chardet: # pragma: no cover
raise pygount.common.OptionError('chardet must be installed to set default encoding to "chardet"')
if encoding in ("automatic", "chardet"):
default_encoding = encoding
fallback_encoding = None
else:
if encoding.startswith("automatic;") or encoding.startswith("chardet;"):
first_encoding_semicolon_index = encoding.find(";")
default_encoding = encoding[:first_encoding_semicolon_index]
fallback_encoding = encoding[first_encoding_semicolon_index + 1 :]
else:
default_encoding = encoding
fallback_encoding = pygount.analysis.DEFAULT_FALLBACK_ENCODING
self.set_default_encoding(default_encoding, source)
self.set_fallback_encoding(fallback_encoding, source)
@property
def default_encoding(self):
return self._default_encoding
def set_default_encoding(self, default_encoding, source=None):
_check_encoding("default encoding", default_encoding, "automatic", source)
self._default_encoding = default_encoding
@property
def fallback_encoding(self):
return self._fallback_encoding
def set_fallback_encoding(self, fallback_encoding, source=None):
_check_encoding("fallback encoding", fallback_encoding, "automatic", source)
self._fallback_encoding = fallback_encoding
@property
def folders_to_skip(self):
return self._folders_to_skip
def set_folders_to_skip(self, regexes_or_patterns_text, source=None):
self._folders_to_skip = pygount.common.regexes_from(
regexes_or_patterns_text, pygount.analysis.DEFAULT_FOLDER_PATTERNS_TO_SKIP_TEXT, source
)
@property
def generated_regexps(self):
return self._generated_regexs
def set_generated_regexps(self, regexes_or_patterns_text, source=None):
self._generated_regexs = pygount.common.regexes_from(
regexes_or_patterns_text, pygount.analysis.DEFAULT_GENERATED_PATTERNS_TEXT, source
)
@property
def has_duplicates(self):
return self._has_duplicates
def set_has_duplicates(self, has_duplicates, source=None):
self._has_duplicates = bool(has_duplicates)
@property
def is_verbose(self):
return self._is_verbose
def set_is_verbose(self, is_verbose, source=None):
self._is_verbose = bool(is_verbose)
@property
def names_to_skip(self):
return self._names_to_skip
def set_names_to_skip(self, regexes_or_pattern_text, source=None):
self._names_to_skip = pygount.common.regexes_from(
regexes_or_pattern_text, pygount.analysis.DEFAULT_NAME_PATTERNS_TO_SKIP_TEXT, source
)
@property
def output(self):
return self._output
def set_output(self, output, source=None):
assert output is not None
self._output = output
@property
def output_format(self):
return self._output_format
def set_output_format(self, output_format, source=None):
assert output_format is not None
if output_format not in VALID_OUTPUT_FORMATS:
raise pygount.common.OptionError(
"format is {0} but must be one of: {1}".format(output_format, VALID_OUTPUT_FORMATS), source
)
self._output_format = output_format
@property
def source_patterns(self):
return self._source_patterns
def set_source_patterns(self, glob_patterns_or_text, source=None):
assert glob_patterns_or_text is not None
self._source_patterns = pygount.common.as_list(glob_patterns_or_text)
assert len(self._source_patterns) >= 0
@property
def suffixes(self):
return self._suffixes
def set_suffixes(self, regexes_or_patterns_text, source=None):
assert regexes_or_patterns_text is not None
self._suffixes = pygount.common.regexes_from(regexes_or_patterns_text, _DEFAULT_SUFFIXES, source)
def argument_parser(self):
parser = argparse.ArgumentParser(description="count source lines of code", epilog=_HELP_EPILOG)
parser.add_argument("--duplicates", "-d", action="store_true", help="analyze duplicate files")
parser.add_argument("--encoding", "-e", default=_DEFAULT_ENCODING, help=_HELP_ENCODING)
parser.add_argument(
"--folders-to-skip",
"-F",
metavar="PATTERNS",
default=pygount.analysis.DEFAULT_FOLDER_PATTERNS_TO_SKIP_TEXT,
help=_HELP_FOLDERS_TO_SKIP,
)
parser.add_argument(
"--format",
"-f",
metavar="FORMAT",
choices=VALID_OUTPUT_FORMATS,
default=_DEFAULT_OUTPUT_FORMAT,
help=_HELP_FORMAT,
)
parser.add_argument(
"--generated",
"-g",
metavar="PATTERNS",
default=pygount.analysis.DEFAULT_GENERATED_PATTERNS_TEXT,
help=_HELP_GENERATED,
)
parser.add_argument(
"--names-to-skip",
"-N",
metavar="PATTERNS",
default=pygount.analysis.DEFAULT_NAME_PATTERNS_TO_SKIP_TEXT,
help=_HELP_NAMES_TO_SKIP,
)
parser.add_argument(
"--out",
"-o",
metavar="FILE",
default=_DEFAULT_OUTPUT,
help='file to write results to; use "STDOUT" for standard output; default: "%(default)s"',
)
parser.add_argument("--suffix", "-s", metavar="PATTERNS", default=_DEFAULT_SUFFIXES, help=_HELP_SUFFIX)
parser.add_argument(
"source_patterns",
metavar="SHELL-PATTERN",
nargs="*",
default=[os.getcwd()],
help="source files and directories to scan; can use glob patterns; default: current directory",
)
parser.add_argument("--verbose", "-v", action="store_true", help="explain what is being done")
parser.add_argument("--version", action="version", version="%(prog)s " + pygount.common.__version__)
return parser
def parsed_args(self, arguments):
assert arguments is not None
parser = self.argument_parser()
args = parser.parse_args(arguments)
if args.encoding == "automatic":
default_encoding = args.encoding
fallback_encoding = None
elif args.encoding == "chardet":
if not pygount.analysis.has_chardet: # pragma: no cover
parser.error("chardet must be installed in order to specify --encoding=chardet")
default_encoding = args.encoding
fallback_encoding = None
else:
if args.encoding.startswith("automatic;"):
first_encoding_semicolon_index = args.encoding.find(";")
default_encoding = args.encoding[:first_encoding_semicolon_index]
fallback_encoding = args.encoding[first_encoding_semicolon_index + 1 :]
encoding_to_check = ("fallback encoding", fallback_encoding)
else:
default_encoding = args.encoding
fallback_encoding = None
encoding_to_check = ("encoding", default_encoding)
if encoding_to_check is not None:
name, encoding = encoding_to_check
try:
"".encode(encoding)
except LookupError:
parser.error(
"{0} specified with --encoding must be a known Python encoding: {1}".format(name, encoding)
)
return args, default_encoding, fallback_encoding
def apply_arguments(self, arguments=None):
if arguments is None: # pragma: no cover
arguments = sys.argv[1:]
args, default_encoding, fallback_encoding = self.parsed_args(arguments)
self.set_default_encoding(default_encoding, "option --encoding")
self.set_fallback_encoding(fallback_encoding, "option --encoding")
self.set_folders_to_skip(args.folders_to_skip, "option --folders-to-skip")
self.set_generated_regexps(args.generated, "option --generated")
self.set_has_duplicates(args.duplicates, "option --duplicates")
self.set_is_verbose(args.verbose, "option --verbose")
self.set_names_to_skip(args.names_to_skip, "option --folders-to-skip")
self.set_output(args.out, "option --out")
self.set_output_format(args.format, "option --format")
self.set_source_patterns(args.source_patterns, "option PATTERNS")
self.set_suffixes(args.suffix, "option --suffix")
def execute(self):
_log.setLevel(logging.INFO if self.is_verbose else logging.WARNING)
source_scanner = pygount.analysis.SourceScanner(
self.source_patterns, self.suffixes, self.folders_to_skip, self.names_to_skip
)
source_paths_and_groups_to_analyze = list(source_scanner.source_paths())
duplicate_pool = pygount.analysis.DuplicatePool() if not self.has_duplicates else None
if self.output == "STDOUT":
target_file = sys.stdout
has_target_file_to_close = False
else:
target_file = open(self.output, "w", encoding="utf-8", newline="")
has_target_file_to_close = True
try:
writer_class = _OUTPUT_FORMAT_TO_WRITER_CLASS_MAP[self.output_format]
if issubclass(writer_class, pygount.write.RichWriter):
source_paths_and_groups_to_analyze = track(source_paths_and_groups_to_analyze)
with writer_class(target_file) as writer:
for source_path, group in source_paths_and_groups_to_analyze:
statistics = pygount.analysis.SourceAnalysis.from_file(
source_path,
group,
self.default_encoding,
self.fallback_encoding,
generated_regexes=self._generated_regexs,
duplicate_pool=duplicate_pool,
)
writer.add(statistics)
finally:
if has_target_file_to_close:
try:
target_file.close()
except Exception as error:
raise OSError('cannot write output to "{0}": {1}'.format(self.output, error))
def pygount_command(arguments=None):
result = 1
command = Command()
try:
command.apply_arguments(arguments)
command.execute()
result = 0
except KeyboardInterrupt: # pragma: no cover
_log.error("interrupted as requested by user")
except (pygount.common.OptionError, OSError) as error:
_log.error(error)
except Exception as error:
_log.exception(error)
return result
def main(): # pragma: no cover
logging.basicConfig(level=logging.WARNING)
sys.exit(pygount_command())
if __name__ == "__main__": # pragma: no cover
main() | /rich-pygount-1.3.4.tar.gz/rich-pygount-1.3.4/pygount/command.py | 0.572484 | 0.180359 | command.py | pypi |
import functools
import re
from typing import Dict, Hashable
from .analysis import SourceAnalysis
_PSEUDO_LANGUAGE_REGEX = re.compile("^__[a-z]+__$")
@functools.total_ordering
class LanguageSummary:
"""
Summary of a source code counts from multiple files of the same language.
"""
def __init__(self, language: str):
self._language = language
self._code_count = 0
self._documentation_count = 0
self._empty_count = 0
self._file_count = 0
self._string_count = 0
self._is_pseudo_language = _PSEUDO_LANGUAGE_REGEX.match(self.language) is not None
@property
def language(self) -> str:
"""the language to be summarized"""
return self._language
@property
def code_count(self) -> int:
"""sum lines of code for this language"""
return self._code_count
@property
def documentation_count(self) -> int:
"""sum lines of documentation for this language"""
return self._documentation_count
@property
def empty_count(self) -> int:
"""sum empty lines for this language"""
return self._empty_count
@property
def file_count(self) -> int:
"""number of source code files for this language"""
return self._file_count
@property
def string_count(self) -> int:
"""sum number of lines containing only strings for this language"""
return self._string_count
@property
def is_pseudo_language(self) -> bool:
"""``True`` if the language is not a real programming language"""
return self._is_pseudo_language
def sort_key(self) -> Hashable:
"""sort key to sort multiple languages by importance"""
return self.code_count, self.documentation_count, self.string_count, self.empty_count, self.language
def __eq__(self, other):
return self.sort_key() == other.sort_key()
def __lt__(self, other):
return self.sort_key() < other.sort_key()
def add(self, source_analysis: SourceAnalysis) -> None:
"""
Add counts from ``source_analysis`` to total counts for this language.
"""
assert source_analysis is not None
assert source_analysis.language == self.language
self._file_count += 1
if source_analysis.is_countable:
self._code_count += source_analysis.code_count
self._documentation_count += source_analysis.documentation_count
self._empty_count += source_analysis.empty_count
self._string_count += source_analysis.string_count
def __repr__(self):
result = "{0}(language={1!r}, file_count={2}".format(self.__class__.__name__, self.language, self.file_count)
if not self.is_pseudo_language:
result += ", code_count={0}, documentation_count={1}, empty_count={2}, string_count={3}".format(
self.code_count, self.documentation_count, self.empty_count, self.string_count
)
result += ")"
return result
class ProjectSummary:
"""
Summary of source code counts for several languages and files.
"""
def __init__(self):
self._language_to_language_summary_map = {}
self._total_code_count = 0
self._total_documentation_count = 0
self._total_empty_count = 0
self._total_string_count = 0
self._total_file_count = 0
self._total_line_count = 0
@property
def language_to_language_summary_map(self) -> Dict[str, LanguageSummary]:
"""
A map containing summarized counts for each language added with :py:meth:`add()` so far.
"""
return self._language_to_language_summary_map
@property
def total_code_count(self) -> int:
return self._total_code_count
@property
def total_documentation_count(self) -> int:
return self._total_documentation_count
@property
def total_empty_count(self) -> int:
return self._total_empty_count
@property
def total_string_count(self) -> int:
return self._total_string_count
@property
def total_file_count(self) -> int:
return self._total_file_count
@property
def total_line_count(self) -> int:
return self._total_line_count
def add(self, source_analysis: SourceAnalysis) -> None:
"""
Add counts from ``source_analysis`` to total counts.
"""
self._total_file_count += 1
language_summary = self.language_to_language_summary_map.get(source_analysis.language)
if language_summary is None:
language_summary = LanguageSummary(source_analysis.language)
self.language_to_language_summary_map[source_analysis.language] = language_summary
language_summary.add(source_analysis)
if source_analysis.is_countable:
self._total_code_count += source_analysis.code_count
self._total_documentation_count += source_analysis.documentation_count
self._total_empty_count += source_analysis.empty_count
self._total_line_count += (
source_analysis.code_count
+ source_analysis.documentation_count
+ source_analysis.empty_count
+ source_analysis.string_count
)
self._total_string_count += source_analysis.string_count
def __repr__(self):
return "{0}(total_file_count={1}, total_line_count={2}, " "languages={3}".format(
self.__class__.__name__,
self.total_file_count,
self.total_line_count,
sorted(self.language_to_language_summary_map.keys()),
) | /rich-pygount-1.3.4.tar.gz/rich-pygount-1.3.4/pygount/summary.py | 0.872347 | 0.278099 | summary.py | pypi |
import codecs
import collections
import glob
import hashlib
import itertools
import logging
import os
import re
from enum import Enum
from typing import Dict, Generator, List, Optional, Pattern, Sequence, Set, Tuple, Union
import pygments.lexer
import pygments.lexers
import pygments.token
import pygments.util
import pygount.common
import pygount.lexers
import pygount.xmldialect
from pygount.common import deprecated
# Attempt to import chardet.
try:
import chardet.universaldetector
_detector = chardet.universaldetector.UniversalDetector()
except ImportError:
_detector = None
has_chardet = bool(_detector)
#: Fallback encoding to use if no encoding is specified
DEFAULT_FALLBACK_ENCODING = "cp1252"
#: Default glob patterns for folders not to analyze.
DEFAULT_FOLDER_PATTERNS_TO_SKIP_TEXT = ", ".join(
[".?*", "_svn", "__pycache__"] # Subversion hack for Windows # Python byte code
)
#: Pygments token type; we need to define our own type because pygments' ``_TokenType`` is internal.
TokenType = type(pygments.token.Token)
class SourceState(Enum):
"""
Possible values for :py:attr:`SourceAnalysis.state`.
"""
#: successfully analyzed
analyzed = 1
#: source code is a binary
binary = 2
#: source code is an identical copy of another
duplicate = 3
#: source code is empty (file size = 0)
empty = 4
#: source could not be parsed
error = 5
#: source code has been generated
generated = 6
# TODO: 'huge' = auto() # source code exceeds size limit
#: pygments does not offer any lexer to analyze the source
unknown = 7
#: Default patterns for regular expressions to detect generated code.
#: The '(?i)' indicates that the patterns are case insensitive.
DEFAULT_GENERATED_PATTERNS_TEXT = pygount.common.REGEX_PATTERN_PREFIX + ", ".join(
[
r"(?i).*automatically generated",
r"(?i).*do not edit",
r"(?i).*generated with the .+ utility",
r"(?i).*this is a generated file",
r"(?i).*generated automatically",
]
)
#: Default glob patterns for file names not to analyze.
DEFAULT_NAME_PATTERNS_TO_SKIP_TEXT = ", ".join([".*", "*~"])
_log = logging.getLogger("pygount")
_MARK_TO_NAME_MAP = (("c", "code"), ("d", "documentation"), ("e", "empty"), ("s", "string"))
_BOM_TO_ENCODING_MAP = collections.OrderedDict(
(
# NOTE: We need an ordered dict due to the overlap between utf-32-le and utf-16-be.
(codecs.BOM_UTF8, "utf-8-sig"),
(codecs.BOM_UTF32_LE, "utf-32-le"),
(codecs.BOM_UTF16_BE, "utf-16-be"),
(codecs.BOM_UTF16_LE, "utf-16-le"),
(codecs.BOM_UTF32_BE, "utf-32-be"),
)
)
_XML_PROLOG_REGEX = re.compile(r'<\?xml\s+.*encoding="(?P<encoding>[-_.a-zA-Z0-9]+)".*\?>')
_CODING_MAGIC_REGEX = re.compile(r".+coding[:=][ \t]*(?P<encoding>[-_.a-zA-Z0-9]+)\b", re.DOTALL)
_STANDARD_PLAIN_TEXT_NAME_PATTERNS = (
# Text files for (moribund) gnits standards.
"authors",
"bugs",
"changelog",
"copying",
"install",
"license",
"news",
"readme",
"thanks",
# Other common text files.
"changes",
"faq",
"readme\\.1st",
"read\\.me",
"todo",
)
_PLAIN_TEXT_PATTERN = "(^" + "$)|(^".join(_STANDARD_PLAIN_TEXT_NAME_PATTERNS) + "$)"
#: Regular expression to detect plain text files by name.
_PLAIN_TEXT_NAME_REGEX = re.compile(_PLAIN_TEXT_PATTERN, re.IGNORECASE)
#: Mapping for file suffixes to lexers for which pygments offers no official one.
_SUFFIX_TO_FALLBACK_LEXER_MAP = {
"fex": pygount.lexers.MinimalisticWebFocusLexer(),
"idl": pygount.lexers.IdlLexer(),
"m4": pygount.lexers.MinimalisticM4Lexer(),
"txt": pygount.lexers.PlainTextLexer(),
"vbe": pygount.lexers.MinimalisticVBScriptLexer(),
"vbs": pygount.lexers.MinimalisticVBScriptLexer(),
}
for _oracle_suffix in ("pck", "pkb", "pks", "pls"):
_SUFFIX_TO_FALLBACK_LEXER_MAP[_oracle_suffix] = pygments.lexers.get_lexer_by_name("plpgsql")
class DuplicatePool:
"""
A pool that collects information about potential duplicate files.
"""
def __init__(self):
self._size_to_paths_map = {}
self._size_and_hash_to_path_map = {}
@staticmethod
def _hash_for(path_to_hash):
buffer_size = 1024 * 1024
md5_hash = hashlib.md5()
with open(path_to_hash, "rb", buffer_size) as file_to_hash:
data = file_to_hash.read(buffer_size)
while len(data) >= 1:
md5_hash.update(data)
data = file_to_hash.read(buffer_size)
return md5_hash.digest()
def duplicate_path(self, source_path: str) -> Optional[str]:
"""
Path to a duplicate for ``source_path`` or ``None`` if no duplicate exists.
Internally information is stored to identify possible future duplicates of
``source_path``.
"""
result = None
source_size = os.path.getsize(source_path)
paths_with_same_size = self._size_to_paths_map.get(source_size)
if paths_with_same_size is None:
self._size_to_paths_map[source_size] = [source_path]
else:
source_hash = DuplicatePool._hash_for(source_path)
if len(paths_with_same_size) == 1:
# Retrofit the initial path with the same size and its hash.
initial_path_with_same_size = paths_with_same_size[0]
initial_hash = DuplicatePool._hash_for(initial_path_with_same_size)
self._size_and_hash_to_path_map[(source_size, initial_hash)] = initial_path_with_same_size
result = self._size_and_hash_to_path_map.get((source_size, source_hash))
self._size_and_hash_to_path_map[(source_size, source_hash)] = source_path
return result
class SourceAnalysis:
"""
Results from analyzing a source path.
Prefer the factory methods :py:meth:`from_file()` and :py:meth:`from_state` to
calling the constructor.
"""
def __init__(
self,
path: str,
language: str,
group: str,
code: int,
documentation: int,
empty: int,
string: int,
state: SourceState,
state_info: Optional[str] = None,
):
SourceAnalysis._check_state_info(state, state_info)
self._path = path
self._language = language
self._group = group
self._code = code
self._documentation = documentation
self._empty = empty
self._string = string
self._state = state
self._state_info = state_info
@staticmethod
def from_state(
source_path: str, group: str, state: SourceState, state_info: Optional[str] = None
) -> "SourceAnalysis":
"""
Factory method to create a :py:class:`SourceAnalysis` with all counts
set to 0 and everything else according to the specified parameters.
"""
assert source_path is not None
assert group is not None
assert state != SourceState.analyzed, "use from() for analyzable sources"
SourceAnalysis._check_state_info(state, state_info)
return SourceAnalysis(
path=source_path,
language="__{0}__".format(state.name),
group=group,
code=0,
documentation=0,
empty=0,
string=0,
state=state,
state_info=state_info,
)
@staticmethod
def _check_state_info(state: SourceState, state_info: Optional[str]):
states_that_require_state_info = [SourceState.duplicate, SourceState.error, SourceState.generated]
assert (state in states_that_require_state_info) == (
state_info is not None
), "state=%s and state_info=%s but state_info must be specified for the following states: %s" % (
state,
state_info,
states_that_require_state_info,
)
@staticmethod
def from_file(
source_path: str,
group: str,
encoding: str = "automatic",
fallback_encoding: str = "cp1252",
generated_regexes=pygount.common.regexes_from(DEFAULT_GENERATED_PATTERNS_TEXT),
duplicate_pool: Optional[DuplicatePool] = None,
) -> "SourceAnalysis":
"""
Factory method to create a :py:class:`SourceAnalysis` by analyzing
the source code in ``source_path``.
:param source_path: path to source code to analyze
:param group: name of a logical group the sourc code belongs to, e.g. a
package.
:param encoding: encoding according to :func:`encoding_for`
:param fallback_encoding: fallback encoding according to
:func:`encoding_for`
:param generated_regexes: list of regular expression that if found within the first few lines
if a source code identify is as generated source code for which SLOC should not be counted
:param duplicate_pool: a :class:`DuplicatePool` where information about possible duplicates is
collected, or ``None`` if possible duplicates should be counted multiple times.
"""
assert encoding is not None
assert generated_regexes is not None
result = None
lexer = None
source_code = None
source_size = os.path.getsize(source_path)
if source_size == 0:
_log.info("%s: is empty", source_path)
result = SourceAnalysis.from_state(source_path, group, SourceState.empty)
elif is_binary_file(source_path):
_log.info("%s: is binary", source_path)
result = SourceAnalysis.from_state(source_path, group, SourceState.binary)
elif not has_lexer(source_path):
_log.info("%s: unknown language", source_path)
result = SourceAnalysis.from_state(source_path, group, SourceState.unknown)
elif duplicate_pool is not None:
duplicate_path = duplicate_pool.duplicate_path(source_path)
if duplicate_path is not None:
_log.info("%s: is a duplicate of %s", source_path, duplicate_path)
result = SourceAnalysis.from_state(source_path, group, SourceState.duplicate, duplicate_path)
if result is None:
if encoding in ("automatic", "chardet"):
encoding = encoding_for(source_path, encoding, fallback_encoding)
try:
with open(source_path, "r", encoding=encoding) as source_file:
source_code = source_file.read()
except (LookupError, OSError, UnicodeError) as error:
_log.warning("cannot read %s using encoding %s: %s", source_path, encoding, error)
result = SourceAnalysis.from_state(source_path, group, SourceState.error, error)
if result is None:
lexer = guess_lexer(source_path, source_code)
assert lexer is not None
if (result is None) and (len(generated_regexes) != 0):
number_line_and_regex = matching_number_line_and_regex(pygount.common.lines(source_code), generated_regexes)
if number_line_and_regex is not None:
number, _, regex = number_line_and_regex
message = "line {0} matches {1}".format(number, regex)
_log.info("%s: is generated code because %s", source_path, message)
result = SourceAnalysis.from_state(source_path, group, SourceState.generated, message)
if result is None:
assert lexer is not None
assert source_code is not None
language = lexer.name
if ("xml" in language.lower()) or (language == "Genshi"):
dialect = pygount.xmldialect.xml_dialect(source_path, source_code)
if dialect is not None:
language = dialect
_log.info("%s: analyze as %s using encoding %s", source_path, language, encoding)
mark_to_count_map = {"c": 0, "d": 0, "e": 0, "s": 0}
for line_parts in _line_parts(lexer, source_code):
mark_to_increment = "e"
for mark_to_check in ("d", "s", "c"):
if mark_to_check in line_parts:
mark_to_increment = mark_to_check
mark_to_count_map[mark_to_increment] += 1
result = SourceAnalysis(
path=source_path,
language=language,
group=group,
code=mark_to_count_map["c"],
documentation=mark_to_count_map["d"],
empty=mark_to_count_map["e"],
string=mark_to_count_map["s"],
state=SourceState.analyzed,
state_info=None,
)
assert result is not None
return result
@property
def path(self) -> str:
return self._path
@property
def language(self) -> str:
"""
The programming language the analyzed source code is written in; if
:py:attr:`state` does not equal :py:attr:`SourceState.analyzed` this
will be a pseudo language.
"""
return self._language
@property
def group(self) -> str:
"""
Group the source code belongs to; this can be any text useful to group
the files later on. It is perfectly valid to put all files in the same
group.
(Note: this property is mostly there for compatibility with the
original SLOCCount.)
"""
return self._group
@property
def code_count(self) -> int:
"""number of lines containing code"""
return self._code
@property
def documentation_count(self) -> int:
"""number of lines containing documentation (resp. comments)"""
return self._documentation
@property
def empty_count(self) -> int:
"""
number of empty lines, including lines containing only white space,
white characters or white code words
See also: :py:func:`white_characters`, :py:func:`white_code_words`
"""
return self._empty
@property
def string_count(self) -> int:
"""number of lines containing only strings but no other code"""
return self._string
@property
def code(self) -> int:
# TODO #47: Remove deprecated property.
return self._code
@property
def documentation(self) -> int:
# TODO #47: Remove deprecated property.
return self._documentation
@property
def empty(self) -> int:
# TODO #47: Remove deprecated property.
return self._empty
@property
def string(self) -> int:
# TODO #47: Remove deprecated property.
return self._string
@property
def state(self) -> SourceState:
"""
The state of the analysis after parsing the source file.
"""
return self._state
@property
def state_info(self) -> Optional[Union[str, Exception]]:
"""
Possible additional information about :py:attr:`state`:
* :py:attr:`SourceState.duplicate`: path to the original source file
the :py:attr:`path` is a duplicate of
* :py:attr:`SourceState.error`: the :py:exc:`Exception` causing the
error
* :py:attr:`SourceState.generated`: a human readable explanation why
the file is considered to be generated
"""
return self._state_info
@property
def is_countable(self) -> bool:
"""
``True`` if source counts can be counted towards a total.
"""
return self.state in (SourceState.analyzed, SourceState.duplicate)
def __repr__(self):
result = "{0}(path={1!r}, language={2!r}, group={3!r}, state={4}".format(
self.__class__.__name__, self.path, self.language, self.group, self.state.name
)
if self.state == SourceState.analyzed:
result += ", code_count={0}, documentation_count={1}, empty_count={2}, string_count={3}".format(
self.code_count, self.documentation_count, self.empty_count, self.string_count
)
if self.state_info is not None:
result += ", state_info={0!r}".format(self.state_info)
result += ")"
return result
class SourceScanner:
"""
Scanner for source code files matching certain conditions.
"""
def __init__(
self,
source_patterns,
suffixes="*",
folders_to_skip=pygount.common.regexes_from(DEFAULT_FOLDER_PATTERNS_TO_SKIP_TEXT),
name_to_skip=pygount.common.regexes_from(DEFAULT_NAME_PATTERNS_TO_SKIP_TEXT),
):
self._source_patterns = source_patterns
self._suffixes = pygount.common.regexes_from(suffixes)
self._folder_regexps_to_skip = folders_to_skip
self._name_regexps_to_skip = name_to_skip
@property
def source_patterns(self):
return self._source_patterns
@property
def suffixes(self):
return self._suffixes
@property
def folder_regexps_to_skip(self):
return self._folder_regexps_to_skip
@folder_regexps_to_skip.setter
def folder_regexps_to_skip(self, regexps_or_pattern_text):
self._folder_regexps_to_skip.append = pygount.common.regexes_from(
regexps_or_pattern_text, self.folder_regexps_to_skip
)
@property
def name_regexps_to_skip(self):
return self._name_regexps_to_skip
@name_regexps_to_skip.setter
def name_regexps_to_skip(self, regexps_or_pattern_text):
self._name_regexp_to_skip = pygount.common.regexes_from(regexps_or_pattern_text, self.name_regexps_to_skip)
def _is_path_to_skip(self, name, is_folder):
assert os.sep not in name, "name=%r" % name
regexps_to_skip = self._folder_regexps_to_skip if is_folder else self._name_regexps_to_skip
return any(path_name_to_skip_regex.match(name) is not None for path_name_to_skip_regex in regexps_to_skip)
def _paths_and_group_to_analyze_in(self, folder, group):
assert folder is not None
assert group is not None
for name in os.listdir(folder):
path = os.path.join(folder, name)
if not os.path.islink(path):
is_folder = os.path.isdir(path)
if self._is_path_to_skip(os.path.basename(path), is_folder):
_log.debug("skip due to matching skip pattern: %s", path)
elif is_folder:
yield from self._paths_and_group_to_analyze_in(path, group)
else:
yield path, group
def _paths_and_group_to_analyze(self, path_to_analyse_pattern, group=None):
for path_to_analyse in glob.glob(path_to_analyse_pattern):
if os.path.islink(path_to_analyse):
_log.debug("skip link: %s", path_to_analyse)
else:
is_folder = os.path.isdir(path_to_analyse)
if self._is_path_to_skip(os.path.basename(path_to_analyse), is_folder):
_log.debug("skip due to matching skip pattern: %s", path_to_analyse)
else:
actual_group = group
if is_folder:
if actual_group is None:
actual_group = os.path.basename(path_to_analyse)
if actual_group == "":
# Compensate for trailing path separator.
actual_group = os.path.basename(os.path.dirname(path_to_analyse))
yield from self._paths_and_group_to_analyze_in(path_to_analyse_pattern, actual_group)
else:
if actual_group is None:
actual_group = os.path.dirname(path_to_analyse)
if actual_group == "":
actual_group = os.path.basename(os.path.dirname(os.path.abspath(path_to_analyse)))
yield path_to_analyse, actual_group
def _source_paths_and_groups_to_analyze(self, patterns_to_analyze):
assert patterns_to_analyze is not None
result = []
for pattern in patterns_to_analyze:
try:
result.extend(self._paths_and_group_to_analyze(pattern))
except OSError as error:
raise OSError('cannot scan "{0}" for source files: {1}'.format(pattern, error))
result = sorted(set(result))
return result
def source_paths(self) -> Generator[str, None, None]:
"""
Paths to source code files matching all the conditions for this scanner.
"""
source_paths_and_groups_to_analyze = self._source_paths_and_groups_to_analyze(self.source_patterns)
for source_path, group in source_paths_and_groups_to_analyze:
suffix = os.path.splitext(source_path)[1].lstrip(".")
is_suffix_to_analyze = any(suffix_regexp.match(suffix) for suffix_regexp in self.suffixes)
if is_suffix_to_analyze:
yield source_path, group
else:
_log.info("skip due to suffix: %s", source_path)
_LANGUAGE_TO_WHITE_WORDS_MAP = {"batchfile": {"@"}, "python": {"pass"}, "sql": {"begin", "end"}}
for _language in _LANGUAGE_TO_WHITE_WORDS_MAP.keys():
assert _language.islower()
def matching_number_line_and_regex(
source_lines: Sequence[str], generated_regexes: Sequence[Pattern], max_line_count: int = 15
) -> Optional[Tuple[int, str, Pattern]]:
"""
The first line and its number (starting with 0) in the source code that
indicated that the source code is generated.
:param source_lines: lines of text to scan
:param generated_regexes: regular expressions a line must match to indicate
the source code is generated.
:param max_line_count: maximum number of lines to scan
:return: a tuple of the form ``(number, line, regex)`` or ``None`` if the
source lines do not match any ``generated_regexes``.
"""
initial_numbers_and_lines = enumerate(itertools.islice(source_lines, max_line_count))
matching_number_line_and_regexps = (
(number, line, matching_regex)
for number, line in initial_numbers_and_lines
for matching_regex in generated_regexes
if matching_regex.match(line)
)
possible_first_matching_number_line_and_regexp = list(itertools.islice(matching_number_line_and_regexps, 1))
result = (
possible_first_matching_number_line_and_regexp[0] if possible_first_matching_number_line_and_regexp else None
)
return result
def white_characters(language_id: str) -> str:
"""
Characters that count as white space if they are the only characters in a
line.
"""
assert language_id is not None
assert language_id.islower()
return "(),:;[]{}"
def white_code_words(language_id: str) -> Dict[str, List[str]]:
"""
Words that do not count as code if it is the only word in a line.
"""
assert language_id is not None
assert language_id.islower()
return _LANGUAGE_TO_WHITE_WORDS_MAP.get(language_id, set())
def _delined_tokens(tokens: Sequence[Tuple[TokenType, str]]) -> Generator[TokenType, None, None]:
for token_type, token_text in tokens:
newline_index = token_text.find("\n")
while newline_index != -1:
yield token_type, token_text[: newline_index + 1]
token_text = token_text[newline_index + 1 :]
newline_index = token_text.find("\n")
if token_text != "":
yield token_type, token_text
def _pythonized_comments(tokens: Sequence[Tuple[TokenType, str]]) -> Generator[TokenType, None, None]:
"""
Similar to tokens but converts strings after a colon (:) to comments.
"""
is_after_colon = True
for token_type, token_text in tokens:
if is_after_colon and (token_type in pygments.token.String):
token_type = pygments.token.Comment
elif token_text == ":":
is_after_colon = True
elif token_type not in pygments.token.Comment:
is_whitespace = len(token_text.rstrip(" \f\n\r\t")) == 0
if not is_whitespace:
is_after_colon = False
yield token_type, token_text
def _line_parts(lexer: pygments.lexer.Lexer, text: str) -> Generator[Set[str], None, None]:
line_marks = set()
tokens = _delined_tokens(lexer.get_tokens(text))
if lexer.name == "Python":
tokens = _pythonized_comments(tokens)
language_id = lexer.name.lower()
white_text = " \f\n\r\t" + white_characters(language_id)
white_words = white_code_words(language_id)
for token_type, token_text in tokens:
# NOTE: Pygments treats preprocessor statements as special comments.
is_actual_comment = token_type in pygments.token.Comment and token_type not in (
pygments.token.Comment.Preproc,
pygments.token.Comment.PreprocFile,
)
if is_actual_comment:
line_marks.add("d") # 'documentation'
elif token_type in pygments.token.String:
line_marks.add("s") # 'string'
else:
is_white_text = (token_text.strip() in white_words) or (token_text.rstrip(white_text) == "")
if not is_white_text:
line_marks.add("c") # 'code'
if token_text.endswith("\n"):
yield line_marks
line_marks = set()
if len(line_marks) >= 1:
yield line_marks
def encoding_for(source_path: str, encoding: str = "automatic", fallback_encoding: Optional[str] = None) -> str:
"""
The encoding used by the text file stored in ``source_path``.
The algorithm used is:
* If ``encoding`` is ``'automatic``, attempt the following:
1. Check BOM for UTF-8, UTF-16 and UTF-32.
2. Look for XML prolog or magic heading like ``# -*- coding: cp1252 -*-``
3. Read the file using UTF-8.
4. If all this fails, use the ``fallback_encoding`` and ignore any
further encoding errors.
* If ``encoding`` is ``'chardet`` use :mod:`chardet` to obtain the encoding.
* For any other ``encoding`` simply use the specified value.
"""
assert encoding is not None
if encoding == "automatic":
with open(source_path, "rb") as source_file:
heading = source_file.read(128)
result = None
if len(heading) == 0:
# File is empty, assume a dummy encoding.
result = "utf-8"
if result is None:
# Check for known BOMs.
for bom, encoding in _BOM_TO_ENCODING_MAP.items():
if heading[: len(bom)] == bom:
result = encoding
break
if result is None:
# Look for common headings that indicate the encoding.
ascii_heading = heading.decode("ascii", errors="replace")
ascii_heading = ascii_heading.replace("\r\n", "\n")
ascii_heading = ascii_heading.replace("\r", "\n")
ascii_heading = "\n".join(ascii_heading.split("\n")[:2]) + "\n"
coding_magic_match = _CODING_MAGIC_REGEX.match(ascii_heading)
if coding_magic_match is not None:
result = coding_magic_match.group("encoding")
else:
first_line = ascii_heading.split("\n")[0]
xml_prolog_match = _XML_PROLOG_REGEX.match(first_line)
if xml_prolog_match is not None:
result = xml_prolog_match.group("encoding")
elif encoding == "chardet":
assert (
_detector is not None
), 'without chardet installed, encoding="chardet" must be rejected before calling encoding_for()'
_detector.reset()
with open(source_path, "rb") as source_file:
for line in source_file.readlines():
_detector.feed(line)
if _detector.done:
break
result = _detector.result["encoding"]
if result is None:
_log.warning(
"%s: chardet cannot determine encoding, assuming fallback encoding %s", source_path, fallback_encoding
)
result = fallback_encoding
else:
# Simply use the specified encoding.
result = encoding
if result is None:
# Encoding 'automatic' or 'chardet' failed to detect anything.
if fallback_encoding is not None:
# If defined, use the fallback encoding.
result = fallback_encoding
else:
try:
# Attempt to read the file as UTF-8.
with open(source_path, "r", encoding="utf-8") as source_file:
source_file.read()
result = "utf-8"
except UnicodeDecodeError:
# UTF-8 did not work out, use the default as last resort.
result = DEFAULT_FALLBACK_ENCODING
_log.debug("%s: no fallback encoding specified, using %s", source_path, result)
assert result is not None
return result
@deprecated("use {0}.{1}".format(SourceAnalysis.__name__, SourceAnalysis.from_state.__name__))
def pseudo_source_analysis(source_path, group, state, state_info=None):
return SourceAnalysis.from_state(source_path, group, state, state_info)
#: BOMs to indicate that a file is a text file even if it contains zero bytes.
_TEXT_BOMS = (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE, codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE, codecs.BOM_UTF8)
def is_binary_file(source_path: str) -> bool:
with open(source_path, "rb") as source_file:
initial_bytes = source_file.read(8192)
return not any(initial_bytes.startswith(bom) for bom in _TEXT_BOMS) and b"\0" in initial_bytes
def is_plain_text(source_path):
return _PLAIN_TEXT_NAME_REGEX.match(os.path.basename(source_path))
def has_lexer(source_path: str) -> bool:
"""
Initial quick check if there is a lexer for ``source_path``. This removes
the need for calling :py:func:`pygments.lexers.guess_lexer_for_filename()`
which fully reads the source file.
"""
result = bool(pygments.lexers.find_lexer_class_for_filename(source_path))
if not result:
suffix = os.path.splitext(os.path.basename(source_path))[1].lstrip(".")
result = suffix in _SUFFIX_TO_FALLBACK_LEXER_MAP
return result
def guess_lexer(source_path: str, text: str) -> pygments.lexer.Lexer:
if is_plain_text(source_path):
result = pygount.lexers.PlainTextLexer()
else:
try:
result = pygments.lexers.guess_lexer_for_filename(source_path, text)
except pygments.util.ClassNotFound:
suffix = os.path.splitext(os.path.basename(source_path))[1].lstrip(".")
result = _SUFFIX_TO_FALLBACK_LEXER_MAP.get(suffix)
return result
@deprecated("use {0}.{1}".format(SourceAnalysis.__name__, SourceAnalysis.from_file.__name__))
def source_analysis(
source_path,
group,
encoding="automatic",
fallback_encoding="cp1252",
generated_regexes=pygount.common.regexes_from(DEFAULT_GENERATED_PATTERNS_TEXT),
duplicate_pool: Optional[DuplicatePool] = None,
):
return SourceAnalysis.from_file(source_path, group, encoding, fallback_encoding, generated_regexes, duplicate_pool) | /rich-pygount-1.3.4.tar.gz/rich-pygount-1.3.4/pygount/analysis.py | 0.584745 | 0.196248 | analysis.py | pypi |
import typing as t
from collections import defaultdict
from dataclasses import dataclass
from rich import box
from rich.columns import Columns
from rich.console import ConsoleRenderable
from rich.panel import Panel
from rich.syntax import Syntax
from rich.table import Table
from rich_tables.utils import (
FIELDS_MAP,
JSONDict,
border_panel,
colored_with_bg,
format_with_color,
get_val,
list_table,
md_panel,
new_table,
predictably_random_color,
simple_panel,
time2human,
wrap,
)
class Diff(t.TypedDict):
additions: int
deletions: int
class File(Diff):
path: str
class Commit(Diff):
committedDate: str
statusCheckRollup: str
message: str
class Reaction(t.TypedDict):
user: str
content: str
class Content(t.TypedDict):
createdAt: str
author: str
body: str
class IssueComment(Content):
reactions: t.List[Reaction]
class ReviewComment(IssueComment):
outdated: bool
path: str
diffHunk: str
pullRequestReview: str
class ReviewThread(t.TypedDict):
path: str
isResolved: bool
isOutdated: bool
resolvedBy: str
comments: t.List[ReviewComment]
class Review(Content):
id: str
state: str
threads: t.List[ReviewThread]
def make(self):
return self["id"]
@dataclass
class PullRequest:
id: str
additions: int
author: str
body: str
comments: t.List[IssueComment]
commits: t.List[Commit]
createdAt: str
deletions: int
files: t.List[File]
labels: t.List[str]
participants: t.List[str]
repository: str
reviewDecision: str
reviewRequests: t.List[str]
reviewThreads: t.List[ReviewThread]
reviews: t.List[Review]
state: str
title: str
updatedAt: str
url: str
def fmt_add_del(file: JSONDict) -> t.List[str]:
added, deleted = file["additions"], file["deletions"]
additions = f"+{added}" if added else ""
deletions = f"-{deleted}" if deleted else ""
return [wrap(additions.rjust(5), "b green"), wrap(deletions.rjust(3), "b red")]
def state_color(state: str) -> str:
return {
"True": "green",
True: "green",
"APPROVED": "green",
"RESOLVED": "s green",
"OPEN": "green",
"MERGED": "magenta",
"PENDING": "yellow",
"OUTDATED": "yellow",
"COMMENTED": "yellow",
"CHANGES_REQUESTED": "yellow",
"REVIEW_REQUIRED": "red",
"DISMISSED": "gray42",
"False": "red",
}.get(state, "default")
def fmt_state(state: str) -> str:
return wrap(state, f"b {state_color(state)}")
def resolved_border_style(resolved: bool) -> str:
return {True: "green", False: "yellow"}.get(resolved, "")
def top_level_comment_title(comment: Content) -> str:
return " ".join(get_val(comment, f) for f in ["state", "author", "createdAt"])
def comment_panel(comment: IssueComment, **kwargs: t.Any) -> Panel:
reactions = [
wrap(f":{r['content'].lower()}:", "bold") + " " + get_val(r, "user")
for r in comment.get("reactions", [])
]
return md_panel(
comment["body"].replace("suggestion", "python"),
title=top_level_comment_title(comment),
subtitle="\n".join(reactions) + "\n",
**kwargs,
)
def resolved_title(thread: ReviewThread) -> str:
if thread["isResolved"]:
resolved = (
fmt_state("RESOLVED")
+ wrap(" by ", "white")
+ format_with_color(thread["resolvedBy"])
)
else:
resolved = fmt_state("PENDING")
return " ".join(
[
wrap(thread["path"], "b magenta"),
resolved,
fmt_state("OUTDATED") if thread["isOutdated"] else "",
]
)
def diff_panel(title: str, rows: t.List[t.List]) -> Panel:
return border_panel(
new_table(rows=rows),
title=title,
border_style=f"dim {predictably_random_color(title)}",
)
def make_thread(thread: ReviewThread) -> Panel:
comments = thread["comments"][-1:] if thread["isResolved"] else thread["comments"]
comments_col = list_table(map(comment_panel, comments), padding=(1, 0, 0, 0))
diff = Syntax(
thread["comments"][0]["diffHunk"],
"diff",
theme="paraiso-dark",
background_color="black",
)
return border_panel(
new_table(rows=[[diff, simple_panel(comments_col)]], highlight=False),
highlight=False,
border_style=resolved_border_style(thread["isResolved"]),
title=resolved_title(thread),
)
PR_FIELDS_MAP = {
"state": lambda x: wrap(fmt_state(x), "b"),
"reviewDecision": lambda x: wrap(fmt_state(x), "b"),
"dates": lambda x: new_table(
rows=[
[wrap(r" ⬤ ", "b green"), time2human(x[0])],
[wrap(r" ◯ ", "b yellow"), time2human(x[1])],
]
),
"path": lambda x: wrap(x, "b"),
"message": lambda x: wrap(x, "i"),
"files": lambda files: diff_panel(
"files", [[*fmt_add_del(f), get_val(f, "path")] for f in files]
),
"commits": lambda commits: diff_panel(
"commits",
[
[
*fmt_add_del(commit),
get_val(commit, "message"),
get_val(commit, "committedDate"),
]
for commit in commits
],
),
"reviewRequests": lambda x: " ".join(map(colored_with_bg, x)),
"participants": lambda x: "\n".join(
map(format_with_color, map("{:^20}".format, x)) # noqa
),
}
class PullRequestTable(PullRequest):
def make_info_subpanel(self, attr: str):
return simple_panel(
get_val(self, attr),
title=wrap(attr, "b"),
title_align="center",
expand=True,
align="center",
)
@property
def repo(self) -> str:
return wrap(self.repository, f"b {predictably_random_color(self.repository)}")
@property
def dates(self) -> t.Tuple[str, str]:
return self.createdAt, self.updatedAt
@property
def pr_state(self) -> str:
return "MERGED" if self.state == "MERGED" else self.reviewDecision
@property
def info(self) -> Panel:
fields = "author", "dates", "participants", "reviewRequests"
return border_panel(
new_table(
rows=[
[
Columns(
map(self.make_info_subpanel, fields),
align="center",
expand=True,
equal=True,
)
],
[md_panel(self.body)],
]
),
title=self.repo,
box=box.DOUBLE_EDGE,
border_style=state_color(self.pr_state),
subtitle=(
f"[b]{fmt_state(self.reviewDecision)}[white] // "
+ f"{fmt_state(self.state)}[/]"
),
align="center",
title_align="center",
subtitle_align="center",
)
@property
def files_commits(self) -> Table:
return new_table(rows=[[get_val(self, "files"), get_val(self, "commits")]])
@staticmethod
def format_comment(comment: t.Union[Review, IssueComment]) -> Panel:
if "id" in comment:
comment["threads"].sort(key=lambda t: t["isResolved"])
resolved_count = sum((t["isResolved"] for t in comment["threads"]))
total_count = len(comment["threads"])
status = wrap(" ⬤ " * resolved_count, "b green") + wrap(
" ◯ " * (total_count - resolved_count), "b red"
)
return border_panel(
list_table(
[
simple_panel(md_panel(comment["body"])),
*map(make_thread, comment["threads"]),
]
),
subtitle=comment["state"],
border_style=state_color(comment["state"]),
title=top_level_comment_title(comment) + f" {status} resolved",
box=box.HEAVY,
)
return comment_panel(comment, border_style="yellow", box=box.HEAVY)
@property
def reviews_and_comments(self) -> t.List[t.Union[Review, IssueComment]]:
return self.reviews + self.comments
@property
def top_level_comments(self) -> Panel:
comments = sorted(self.reviews_and_comments, key=lambda c: c["createdAt"])
for comment in comments:
yield self.format_comment(comment)
def pulls_table(
data: t.List[PullRequest],
) -> t.Iterable[t.Union[str, ConsoleRenderable]]:
FIELDS_MAP.update(PR_FIELDS_MAP)
pr = data[0]
pr_table = PullRequestTable(**pr)
pr_table.reviews = [
r for r in pr_table.reviews if r["state"] != "COMMENTED" or r["body"]
]
yield pr_table.info
yield pr_table.files_commits
review_id_to_threads = defaultdict(list)
for thread in pr_table.reviewThreads:
review_id_to_threads[thread["comments"][0]["pullRequestReview"]].append(thread)
for review in pr_table.reviews:
review["threads"] = review_id_to_threads[review["id"]]
yield from pr_table.top_level_comments | /rich_tables-0.3.0.tar.gz/rich_tables-0.3.0/src/rich_tables/github.py | 0.636918 | 0.329958 | github.py | pypi |
import itertools as it
import operator as op
import re
from collections import defaultdict
from functools import lru_cache, partial
from typing import Any, Dict, Iterable, List, Tuple
from rich import box
from rich.align import Align
from rich.console import ConsoleRenderable, Group
from rich.panel import Panel
from rich.table import Table
from rich_tables.utils import (
DISPLAY_HEADER,
FIELDS_MAP,
border_panel,
new_table,
predictably_random_color,
simple_panel,
wrap,
)
JSONDict = Dict[str, Any]
TRACK_FIELDS = [
"track",
"length",
"artist",
"title",
"bpm",
"last_played",
"plays",
"skips",
"helicopta",
"hidden",
]
ALBUM_IGNORE = set(TRACK_FIELDS) | {
"album_color",
"albumartist_color",
"album",
"album_title",
"comments",
"genre",
"tracktotal",
"albumartist",
}
new_table = partial(new_table, collapse_padding=True, expand=True, padding=0)
def get_header(key: str) -> str:
return DISPLAY_HEADER.get(key, key)
@lru_cache(maxsize=128)
def get_val(track: JSONDict, field: str) -> Any:
trackdict = dict(track)
return FIELDS_MAP[field](trackdict[field]) if trackdict.get(field) else ""
def get_vals(
fields: Iterable[str], tracks: Iterable[JSONDict]
) -> Iterable[Iterable[str]]:
return [[get_val(tuple(t.items()), f) for f in fields] for t in tracks]
def tracks_table(tracks, fields, color, sort):
# type: (List[JSONDict], Iterable[str], str, bool) -> Table
return new_table(
*map(get_header, fields),
rows=get_vals(
fields,
sorted(tracks, key=op.methodcaller("get", "track", "")) if sort else tracks,
),
border_style=color,
padding=(0, 0, 0, 1),
)
def album_stats(tracks: List[JSONDict]) -> JSONDict:
def agg(field: str, default: int = 0) -> Iterable[int]:
return ((x.get(field) or default) for x in tracks)
stats: JSONDict = dict(
bpm=round(sum(agg("bpm")) / len(tracks)),
rating=round(sum(agg("rating")) / len(tracks), 2),
plays=sum(agg("plays")),
skips=sum(agg("skips")),
mtime=max(agg("mtime")),
last_played=max(agg("last_played")),
tracktotal=(str(len(tracks)), str(tracks[0].get("tracktotal")) or str(0)),
comments="\n---\n---\n".join(set(agg("comments", ""))),
)
return stats
def add_colors(album: JSONDict) -> None:
for field in "album", "albumartist":
val = (album.get(field) or "").replace("Various Artists", "VA")
color = predictably_random_color(val)
album[f"{field}_color"] = color
val = album.get(field)
album[field] = wrap(val, f"b i {color}") if val else ""
def format_title(title: str) -> str:
return wrap(f" {title} ", "i white on grey3")
def album_title(album: JSONDict) -> Table:
name = album["album"]
artist = album.get("albumartist") or album.get("artist")
genre = album.get("genre") or ""
released = album.get("released", "")
return new_table(
rows=[[format_title(f"{name} by {artist}"), format_title(released), genre]]
)
def album_info(tracks: List[JSONDict]) -> JSONDict:
first = tracks[0]
fields = sorted([f for f in tracks[0] if f not in TRACK_FIELDS])
album = defaultdict(str, zip(fields, op.itemgetter(*fields)(first)))
album.update(**album_stats(tracks))
add_colors(album)
for field, val in filter(op.truth, sorted(album.items())):
album[field] = get_val(tuple(album.items()), field)
album["album_title"] = album_title(album)
return album
def album_info_table(album: JSONDict) -> Table:
def should_display(keyval: Tuple[str, Any]) -> bool:
return keyval[1] and keyval[0] not in ALBUM_IGNORE
items = filter(should_display, sorted(album.items()))
table = new_table(rows=map(lambda x: (get_header(x[0]), x[1]), items))
table.columns[0].style = "b " + album["album_color"]
return table
def album_panel(tracks: List[JSONDict]) -> Panel:
album = album_info(tracks)
# ignore the artist field if there is only one found
track_fields = [
t
for t in TRACK_FIELDS
if len(tracks) > 1 or len(set(map(lambda x: x.get("artist"), tracks))) == 1
]
tracklist = tracks_table(tracks, track_fields, album["album_color"], sort=True)
_, track = max(map(op.itemgetter("last_played", "track"), tracks))
if int(re.sub(r"\D", "", str(track).replace("A", "1"))) > 0:
row_no = tracklist.columns[0]._cells.index(str(track))
tracklist.rows[row_no].style = "b white on #000000"
tracklist.add_row(
*[album.get(k) or "" for k in ["tracktotal", *track_fields[1:]]],
style="d white on grey11",
)
comments = album.get("comments")
return border_panel(
Group(
album["album_title"],
Align.center(
simple_panel(comments, style="grey54", expand=True, align="center")
)
if comments
else "",
new_table(rows=[[album_info_table(album), tracklist]]),
),
box=box.DOUBLE_EDGE,
style=album["albumartist_color"],
)
def albums_table(all_tracks: List[JSONDict]) -> Iterable[ConsoleRenderable]:
def get_album(track: JSONDict) -> str:
return track.get("album") or ""
for track in all_tracks:
if not track["album"] and "single" in track.get("albumtype", ""):
track["album"] = "singles"
track["albumartist"] = track["label"]
for album_name, tracks in it.groupby(sorted(all_tracks, key=get_album), get_album):
yield album_panel(list(tracks)) | /rich_tables-0.3.0.tar.gz/rich_tables-0.3.0/src/rich_tables/music.py | 0.618896 | 0.262549 | music.py | pypi |
from __future__ import annotations
import logging
import re
from dataclasses import dataclass
from queue import Queue
from signal import SIGWINCH
from typing import List, Optional
from prompt_toolkit.key_binding import KeyPress
from prompt_toolkit.keys import Keys
from returns.result import safe
from rich.console import Console, ConsoleOptions, ConsoleRenderable, RenderResult
from rich.style import Style
from rich.text import Span, Text
from rich_tea import events
from rich_tea.events import Signal
from rich_tea.util import saturating_add, saturating_sub
logger = logging.getLogger(__name__)
@dataclass
class TextCursor:
text: str
cursor: int
@classmethod
def empty(cls) -> TextCursor:
return TextCursor(text="", cursor=0)
def bump_left(self):
self.cursor = saturating_sub(self.cursor, 1, 0)
def bump_right(self):
self.cursor = saturating_add(self.cursor, 1, len(self.text))
def jump_to_left(self):
self.cursor = 0
def jump_to_right(self):
self.cursor = len(self.text)
@property
def before_cursor(self) -> str:
"""Excluding cursor"""
return self.text[: self.cursor]
@property
def after_cursor(self) -> str:
"""Including cursor"""
return self.text[self.cursor :]
def insert(self, char: str):
if len(char) != 1:
raise RuntimeError(f"{char} must have length 1")
self.text = f"{self.before_cursor}{char}{self.after_cursor}"
self.cursor += 1
def remove_left(self):
self.text = f"{self.before_cursor[:-1]}{self.after_cursor}"
self.cursor = saturating_sub(self.cursor, 1, 0)
def remove_right(self):
self.text = f"{self.before_cursor}{self.after_cursor[1:]}"
@property
def word_boundaries(self) -> List[int]:
matches: List[re.Match] = re.finditer(r"\b", self.text)
return [match.span()[0] for match in matches]
def bump_left_by_word(self):
self.cursor = next(
filter(lambda i: i < self.cursor, reversed(self.word_boundaries)),
self.cursor,
)
def bump_right_by_word(self):
self.cursor = next(
filter(lambda i: i > self.cursor, self.word_boundaries), self.cursor
)
def remove_left_word(self):
cached_after = self.after_cursor
self.bump_left_by_word()
self.text = f"{self.before_cursor}{cached_after}"
@dataclass
class TextCursorRender(ConsoleRenderable):
data: TextCursor
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
yield Text(
text=f"{self.data.text} ",
spans=[
Span(
start=self.data.cursor,
end=self.data.cursor + 1,
style=Style(reverse=True),
)
],
)
if __name__ == "__main__":
from logging import FileHandler
logger.addHandler(FileHandler("text-box.log", mode="w"))
logger.setLevel(logging.DEBUG)
@safe(exceptions=(KeyboardInterrupt,)) # type: ignore
def text_cursor_safe() -> str:
queue: "Queue[KeyPress | Signal]" = Queue()
with Console(stderr=True).screen() as ctx, events.for_signals(
SIGWINCH, queue=queue
), events.for_stdin(queue=queue):
console: Console = ctx.console
state = TextCursor.empty()
console.update_screen(TextCursorRender(state)) # Initial display
while event := queue.get():
logger.debug(event)
if isinstance(event, Signal):
console.update_screen(TextCursorRender(state)) # Redraw on resize
elif isinstance(event.key, Keys):
if event.key == Keys.Left:
state.bump_left()
elif event.key == Keys.Right:
state.bump_right()
elif event.key == Keys.ControlA:
state.jump_to_left()
elif event.key == Keys.ControlE:
state.jump_to_right()
elif event.key == Keys.ControlU:
state = state.empty()
elif event.key == Keys.Enter:
return state.text
elif event.key == Keys.Backspace:
state.remove_left()
elif event.key == Keys.Delete:
state.remove_right()
elif event.key == Keys.ControlW: # ControlBackspace
state.remove_left_word()
elif event.key == Keys.ControlLeft:
state.bump_left_by_word()
elif event.key == Keys.ControlRight:
state.bump_right_by_word()
else:
raise NotImplementedError(event)
console.update_screen(TextCursorRender(state))
elif isinstance(event.key, str):
state.insert(event.key)
console.update_screen(TextCursorRender(state))
def text_cursor() -> Optional[str]:
return text_cursor_safe().value_or(None)
print(text_cursor()) | /rich-tea-0.3.0.tar.gz/rich-tea-0.3.0/rich_tea/text_box.py | 0.803598 | 0.16099 | text_box.py | pypi |
import logging
from dataclasses import dataclass
from itertools import islice
from queue import Queue
from signal import SIGWINCH
from typing import Generic, Iterable, List, Optional, Set, TypeVar
from more_itertools import mark_ends
from prompt_toolkit.key_binding import KeyPress
from prompt_toolkit.keys import Keys
from returns.result import safe
from rich.console import Console, ConsoleOptions, ConsoleRenderable, RenderResult
from rich.style import Style
from rich.table import Column, Table
from rich_tea import events
from rich_tea.events import Signal
from rich_tea.util import max_index, saturating_add, saturating_sub
logger = logging.getLogger(__name__)
T = TypeVar("T")
@dataclass
class Select(Generic[T]):
item: T
selected: bool = False
def toggle(self):
self.selected = not self.selected
@dataclass
class Cursor(Generic[T]):
items: List[T]
cursor: int = 0
@classmethod
def from_iterable(cls, i: Iterable[T]) -> "Cursor":
return Cursor(items=list(i))
def bump_up(self):
self.cursor = saturating_sub(self.cursor, 1, 0)
def bump_down(self):
self.cursor = saturating_add(self.cursor, 1, max_index(self.items))
def jump_to_top(self):
self.cursor = 0
def jump_to_bottom(self):
self.cursor = max_index(self.items)
def current(self) -> T:
return self.items[self.cursor]
def scrollbar_character(
is_first: bool, is_last: bool, index: int, max_index: int
) -> str:
if is_first and not is_last:
if index == 0:
scrollbar = "■"
else:
scrollbar = "▲"
elif is_first and is_last:
scrollbar = "■"
elif is_last:
if index == max_index:
scrollbar = "■"
else:
scrollbar = "▼"
else:
scrollbar = "|"
return scrollbar
@dataclass
class ListRender(ConsoleRenderable):
data: Cursor[str]
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
table = Table(
*(
Column(header=name, no_wrap=True, min_width=1)
for name in ["scrollbar", "text"]
),
box=None,
show_header=False,
)
if self.data.cursor >= options.max_height:
start = (self.data.cursor - options.max_height) + 1
else:
start = 0
for is_first, is_last, (i, s) in mark_ends(
islice(enumerate(self.data.items), start, start + options.max_height)
):
scrollbar = scrollbar_character(
is_first, is_last, i, max_index(self.data.items)
)
if i == self.data.cursor:
style = Style(bgcolor="white", color="black")
else:
style = None
table.add_row(scrollbar, s, style=style)
return table.__rich_console__(console, options)
@dataclass
class ListSelectRender(ConsoleRenderable):
data: Cursor[Select[str]]
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
logger.info(f"{len(self.data.items)=}, {self.data.cursor=}")
table = Table(
*(
Column(header=name, no_wrap=True, min_width=1)
for name in ["scrollbar", "toggle", "text"]
),
box=None,
show_header=False,
)
if self.data.cursor >= options.max_height:
# v O ...
# v O ...
# v O ... max_height = 3
# O ...
# X ... cursor = 4
start = (self.data.cursor - options.max_height) + 1
else:
start = 0
for is_first, is_last, (i, candidate) in mark_ends(
islice(enumerate(self.data.items), start, start + options.max_height)
):
scrollbar = scrollbar_character(
is_first, is_last, i, max_index(self.data.items)
)
if candidate.selected:
toggled = "+"
else:
toggled = " "
if i == self.data.cursor:
style = Style(bgcolor="white", color="black")
else:
style = None
table.add_row(scrollbar, toggled, candidate.item, style=style)
return table.__rich_console__(console, options)
if __name__ == "__main__":
from logging import FileHandler
logger.addHandler(FileHandler("list-select.log", mode="w"))
logger.setLevel(logging.DEBUG)
@safe(exceptions=(KeyboardInterrupt,)) # type: ignore
def list_viewer_safe(candidates: Iterable[str]) -> Set[str]:
queue: "Queue[KeyPress | Signal]" = Queue()
with Console(stderr=True).screen() as ctx, events.for_signals(
SIGWINCH, queue=queue
), events.for_stdin(queue=queue):
console: Console = ctx.console
state: Cursor[Select[str]] = Cursor.from_iterable(
Select(c) for c in candidates
)
console.update_screen(ListSelectRender(state)) # Initial display
while event := queue.get():
if isinstance(event, Signal):
console.update_screen(ListSelectRender(state)) # Redraw on resize
elif isinstance(event.key, Keys):
if event.key == Keys.Up or event.key == Keys.Left:
state.bump_up()
elif event.key == Keys.Down or event.key == Keys.Right:
state.bump_down()
elif event.key == Keys.Tab:
state.items[state.cursor].toggle()
elif event.key == Keys.Home:
state.jump_to_top()
elif event.key == Keys.End:
state.jump_to_bottom()
elif event.key == Keys.Enter:
return set(
candidate.item
for candidate in state.items
if candidate.selected
)
else:
raise NotImplementedError(event)
console.update_screen(ListSelectRender(state))
elif isinstance(event.key, str):
if event.key == "q":
return set(
candidate.item
for candidate in state.items
if candidate.selected
)
def list_viewer(candidates: Iterable[str]) -> Optional[Set[str]]:
return list_viewer_safe(candidates).value_or(None)
print(
list_viewer(
[
"The Zen of Python, by Tim Peters",
"Beautiful is better than ugly.",
"Explicit is better than implicit.",
"Simple is better than complex.",
"Complex is better than complicated.",
"Flat is better than nested.",
"Sparse is better than dense.",
"Readability counts.",
"Special cases aren't special enough to break the rules.",
"Although practicality beats purity.",
"Errors should never pass silently.",
"Unless explicitly silenced.",
"In the face of ambiguity, refuse the temptation to guess.",
"There should be one-- and preferably only one --obvious way to do it.",
"Although that way may not be obvious at first unless you're Dutch.",
"Now is better than never.",
"Although never is often better than *right* now.",
"If the implementation is hard to explain, it's a bad idea.",
"If the implementation is easy to explain, it may be a good idea.",
"Namespaces are one honking great idea -- let's do more of those!",
]
)
) | /rich-tea-0.3.0.tar.gz/rich-tea-0.3.0/rich_tea/list_select.py | 0.652352 | 0.169063 | list_select.py | pypi |
from __future__ import unicode_literals
from .base_node_renderer import BaseNodeRenderer
class BaseBlockRenderer(BaseNodeRenderer):
def render(self, node):
return "<{0}>{1}</{0}>".format(self._render_tag, self._render_content(node))
def _render_content(self, node):
result = []
for c in node["content"]:
renderer = self._find_renderer(c)
if renderer is None:
continue
result.append(renderer.render(c))
return "".join(result)
@property
def _render_tag(self):
return "div"
class HeadingOneRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h1"
class HeadingTwoRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h2"
class HeadingThreeRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h3"
class HeadingFourRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h4"
class HeadingFiveRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h5"
class HeadingSixRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h6"
class ParagraphRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "p"
class OrderedListRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "ol"
class UnorderedListRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "ul"
class ListItemRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "li"
class BlockQuoteRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "blockquote"
class HrRenderer(BaseNodeRenderer):
def render(self, _node):
return "<hr />"
class HyperlinkRenderer(BaseBlockRenderer):
def render(self, node):
return '<a href="{0}">{1}</a>'.format(
node["data"]["uri"], self._render_content(node)
)
class EntryBlockRenderer(BaseNodeRenderer):
def render(self, node):
return "<div>{0}</div>".format(node["data"]["target"])
class AssetHyperlinkRenderer(BaseBlockRenderer):
ANCHOR_HTML = '<a href="{0}">{1}</a>'
def render(self, node):
asset = node["data"]["target"]
# Check by class name instead of instance type to
# avoid dependending on the Contentful SDK.
if asset.__class__.__name__ == "Asset":
return self._render_asset(asset, node)
elif isinstance(asset, dict):
if "fields" not in asset and "file" not in asset.get("fields", {}):
raise Exception("Node target is not an asset - Node: {0}".format(node))
return self._render_hash(asset, node)
else:
raise Exception("Node target is not an asset - Node: {0}".format(node))
def _render_asset(self, asset, node=None):
return self._render(
self.__class__.ANCHOR_HTML,
asset.url(),
node if node is not None else asset.title,
bool(node),
)
def _render_hash(self, asset, node=None):
return self._render(
self.__class__.ANCHOR_HTML,
asset["fields"]["file"]["url"],
node if node is not None else asset["fields"]["title"],
bool(node),
)
def _render(self, markup, url, text, formatted=True):
if formatted:
text = self._render_content(text)
return markup.format(url, text)
class AssetBlockRenderer(AssetHyperlinkRenderer):
IMAGE_HTML = '<img src="{0}" alt="{1}" />'
def _render_asset(self, asset, node=None):
if "contentType" in asset.file and "image" in asset.file["contentType"]:
return self._render(
self.__class__.IMAGE_HTML, asset.url(), asset.title, False
)
return super(AssetBlockRenderer, self)._render_asset(asset)
def _render_hash(self, asset, node=None):
if (
"contentType" in asset["fields"]["file"]
and "image" in asset["fields"]["file"]["contentType"]
):
return self._render(
self.__class__.IMAGE_HTML,
asset["fields"]["file"]["url"],
asset["fields"]["title"],
False,
)
return super(AssetBlockRenderer, self)._render_hash(asset)
class TableCellRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "td"
class TableHeaderCellRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "th"
class TableRowRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "tr"
class TableRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "table" | /rich_text_renderer-0.2.7.tar.gz/rich_text_renderer-0.2.7/rich_text_renderer/block_renderers.py | 0.788094 | 0.251119 | block_renderers.py | pypi |
from typing import Optional, Iterator, Dict, Any
import pandas as pd
from rich.table import Table
from rich_tools.text import _strip_tags
def df_to_table(
pandas_dataframe: pd.DataFrame,
rich_table: Table = Table(),
show_index: bool = True,
index_name: Optional[str] = None,
) -> Table:
"""Convert a pandas.DataFrame obj into a rich.Table obj.
Args:
pandas_dataframe (DataFrame): A Pandas DataFrame to be converted to a rich Table.
rich_table (Table): A rich Table that should be populated by the DataFrame values.
show_index (bool): Add a column with a row count to the table. Defaults to True.
index_name (str, optional): The column name to give to the index column. Defaults to None, showing no value.
Returns:
Table: The rich Table instance passed, populated with the DataFrame values."""
if show_index:
index_name = str(index_name) if index_name else ""
rich_table.add_column(index_name)
for column in pandas_dataframe.columns:
rich_table.add_column(str(column))
for index, value_list in enumerate(pandas_dataframe.values.tolist()):
row = [str(index)] if show_index else []
row += [str(x) for x in value_list]
rich_table.add_row(*row)
return rich_table
def table_to_df(rich_table: Table, remove_markup: bool = True) -> pd.DataFrame:
"""Convert a rich.Table obj into a pandas.DataFrame obj with any rich formatting removed from the values.
Args:
rich_table (Table): A rich Table that should be populated by the DataFrame values.
remove_markup (bool): Removes rich markup from the keys and values in the table if True.
Returns:
DataFrame: A pandas DataFrame with the Table data as its values."""
return pd.DataFrame(
{
_strip_tags(x.header, remove_markup): [
_strip_tags(y, remove_markup) for y in x.cells
]
for x in rich_table.columns
}
)
def table_to_dicts(
rich_table: Table, remove_markup: bool = True
) -> Iterator[Dict[str, Any]]:
"""Convert a rich.Table obj into a list of dictionary's with keys set as column names.
Args:
rich_table (Table): A rich Table instance containing data to be converted into a list of dictionary's.
remove_markup (bool): Removes rich markup from the keys and values in the table if True.
Raises:
ValueError: Raised in cases where the Table contains keys that do not make sense converted to a dict.
For example if the Table has a header with an empty string or duplicate keys.
Returns:
Iterator: A list of the input Table's rows, each as a dictionary."""
column_keys = [_strip_tags(c.header, remove_markup) for c in rich_table.columns]
if "" in column_keys:
raise ValueError("You cannot convert a Table instance that has blank header")
if len(column_keys) != len(set(column_keys)):
raise ValueError(
"You cannot convert a Table instance that has duplicate headers"
)
column_values = [
[_strip_tags(v, remove_markup) for v in c._cells] for c in rich_table.columns
]
return (dict(zip(column_keys, row_values)) for row_values in zip(*column_values)) | /rich_tools-0.5.0-py3-none-any.whl/rich_tools/table.py | 0.949983 | 0.655715 | table.py | pypi |
from typing import *
from tqdm import tqdm
from rich_torndb import BaseConn
class TableJoiner(object):
@classmethod
def join_table(cls, base_datas: List[Dict[str, Any]],
table: str, fields: List[str], base_join_key: str,
table_join_key: str, db: BaseConn,
extra_conditions: [List[str], None] = None,
left_join: bool = False, shard_size: int = 800):
"""
只支持内连接和左连接,默认为内连接
左连接时,不支持指定 extra_conditions
:param base_datas: [dict(), dict(), dict()], 基本数据,最终连表后会将数据装配进每个单元中
:param table: str, 将要连的表
:param fields: list, 要取出并装配的数据, 如
["account", "user_id as nick_user_id"]
:param base_join_key: str, 从 base_datas 中取出,用于连表的字段,如 user_id
:param table_join_key: str, 从 将要连的表 中取出,用于连表的字段,如 user_id
:param extra_conditions: list, 额外的查询条件
格式如 ["name in ('dola', 'tom')", "age > 2"]
:param db: torndb.Connection, 数据库连接对象
:param left_join: bool, 是否为左连接
:param shard_size: int 分片查询数量
:return:
"""
extra_conditions = extra_conditions if extra_conditions else []
def __separate_query(join_values) -> List[Dict[str, Any]]:
def __get_join_values_condition_sql():
return "{} in ('{}')".format(table_join_key,
"','".join(join_values))
if isinstance(join_values[0], str):
join_values_condition_sql = __get_join_values_condition_sql()
else:
join_values = [str(value) for value in join_values]
join_values_condition_sql = __get_join_values_condition_sql()
conditions = [join_values_condition_sql, ] + extra_conditions
# 拼sql
condition_sql = " AND ".join(conditions)
sql = f"SELECT {fields_sql} FROM {table} WHERE {condition_sql}"
# logging.info(sql)
return db.query(sql)
if not base_datas:
return
if left_join and extra_conditions:
raise Exception("Not support extra_conditions when using left join")
base_join_values = [base_data[base_join_key] for base_data in
base_datas]
# 连表的值可能为 None, 为空应跳过
base_join_values = list(filter(lambda value: value, base_join_values))
# 将类似 "user_id as c2_user_id" 提取成 c2_user_id, 用于下面对查询数据的提取
real_fields = cls._get_real_fields(fields)
results_map = {}
if base_join_values:
fields_sql = ", ".join(fields + [table_join_key, ])
results = list()
# 分片查询, 用于提升查询效率, 如果数量大于 1,则显示进度条
indexes = range(0, len(base_join_values), shard_size)
if len(base_join_values) > 1:
indexes = tqdm(indexes, desc=f"Joining table:{table}")
for index in indexes:
temp_join_values = base_join_values[index: index + shard_size]
results.extend(__separate_query(temp_join_values))
results_map = {
result[table_join_key]: {field: result[field] for field in
real_fields}
for result in results}
index = 0
while index < len(base_datas):
data = base_datas[index]
if data[base_join_key] in results_map:
data.update(results_map[data[base_join_key]])
elif left_join:
data.update({field: None for field in real_fields})
else:
base_datas.pop(index)
index -= 1
index += 1
@classmethod
def _merge_datas(cls, datas1, datas2, merge_key1="user_id",
merge_key2="user_id"):
datas1_map = {data[merge_key1]: data for data in datas1}
for data in datas2:
if data[merge_key2] not in datas1_map:
datas1.append(data)
return datas1
@classmethod
def _get_real_fields(cls, fields):
real_fields = list()
for field in fields:
if " as " in field:
real_field = field.split(" as ")[1]
elif " AS " in field:
real_field = field.split(" AS ")[1]
else:
real_field = field
real_fields.append(real_field)
return real_fields | /rich-torndb-0.0.1.tar.gz/rich-torndb-0.0.1/rich_torndb/table_joiner.py | 0.533154 | 0.450359 | table_joiner.py | pypi |
from typing import *
import logging as logger
from tqdm import tqdm
from rich_torndb.utils.changes_log import info_changes
from rich_torndb.utils.torndb import Connection, Row
def gen_condition_sql_n_vals(keys, data, is_update=False):
conditions, values = [], []
for key in keys:
value = data[key]
if value is None:
conditions.append(
f"{key} {'is null' if not is_update else '= NULL'}")
else:
conditions.append("{} = %s".format(key))
values.append(value)
condition_sql = (" AND ".join(conditions)
if not is_update else ", ".join(conditions))
return condition_sql, values
class BaseConn(Connection):
__TABLE_NAME__ = None
def __init__(self, host, database, user=None, password=None,
max_idle_time=250, connect_timeout=10,
time_zone="+0:00", charset="utf8", sql_mode="TRADITIONAL"):
"""
:rtype: object
"""
super().__init__(host=host, database=database, user=user,
password=password, max_idle_time=max_idle_time,
connect_timeout=connect_timeout,
time_zone=time_zone, charset=charset,
sql_mode=sql_mode)
def get_exist_row(self, table: str, id_keys: List[str],
data: Dict[str, Any]) -> Row:
condition_sql, id_key_values = gen_condition_sql_n_vals(id_keys, data)
check_exist_sql = f"SELECT id FROM {table} WHERE {condition_sql}"
exist_row = self.get(check_exist_sql, *id_key_values)
return exist_row
@classmethod
def _gen_exec_sql_and_values(cls, table: str, exist: Row,
data: Dict) -> Tuple[str, List[str]]:
keys = list(data.keys())
if exist:
if "id" in keys:
keys.remove("id")
condition_sql, values = gen_condition_sql_n_vals(keys, data,
is_update=True)
update_sql = f"UPDATE {table} SET {condition_sql} WHERE id = %s"
return update_sql, values + [exist["id"], ]
else:
keys = list(data.keys())
values = [data[key] for key in keys]
fields_sql = "`{}`".format("`, `".join(keys))
values_sql = ", ".join(["%s" for _ in keys])
insert_sql = (f"INSERT INTO {table} ({fields_sql}) "
f"VALUES ({values_sql})")
return insert_sql, values
@classmethod
def _gen_delete_sql_and_values(cls, table: str, exist_row: Row,
data: Dict) -> Tuple[str, List[str]]:
if not exist_row:
raise Exception("Exist row is None")
keys = list(data.keys())
if "id" in keys:
keys.remove("id")
condition_sql, values = gen_condition_sql_n_vals(keys, data)
update_sql = f"DELETE FROM {table} WHERE id = %s"
return update_sql, values + [exist_row["id"], ]
def insert_or_update_datas(self, id_keys: List[str],
datas: List[Dict[str, Any]],
table: str = None):
"""
:param id_keys: the column keys that make record unique.
such as ["id"] or ["day", "user_id"]
this method will use {id_keys} call check_exist,
it will raise Exception("Multiple rows") if check_exist
returns multiple record
:param datas: format [dict(), dict()],
NOTE:the datas inside,must contains columns specified in
id_keys, such as
[
{"user_id" : 1324, "gender" : "0", .....}
{"user_id" : 1323, "gender" : "1", .....}
{"user_id" : 1325, "gender" : "0", .....}
{"user_id" : 1326, "gender" : "1", .....}
]
:param table: optional, use __TABLE_NAME__ if table is not specified
:return:
"""
table = self.__TABLE_NAME__ if table is None else table
insert_num, update_num = 0, 0
with TransactionHelper(self):
for data in tqdm(datas, desc=f"Inserting into table:{table}"):
try:
exist_row = self.get_exist_row(table, id_keys, data)
exec_sql, values = self._gen_exec_sql_and_values(
table, exist_row, data)
if not exist_row:
self.execute(exec_sql, *values)
insert_num += 1
elif info_changes(table=table,
id_kvs={id_key: data[id_key]
for id_key in id_keys},
new_data=data, db=self):
self.execute(exec_sql, *values)
update_num += 1
except Exception as exp:
warning_msg = (f"{exp}\n"
f"Something happened when insert date into:"
f"{table}\n"
f"id_keys:{id_keys}, data:{data}")
logger.warning(warning_msg)
raise Exception(warning_msg)
logger.info(f"Insert successfully!!! new insert num:{insert_num},"
f" updated num:{update_num}")
return insert_num, update_num
def delete_datas(self, id_keys: List[str],
datas: List[Dict[str, Any]],
table: str = None):
"""
:param id_keys:
:param datas:
:param table:
:return:
"""
table = self.__TABLE_NAME__ if table is None else table
delete_num = 0
with TransactionHelper(self):
for data in tqdm(datas, desc=f"Deleting from table: {table}"):
try:
exist_row = self.get_exist_row(table, id_keys, data)
if not exist_row:
continue
delete_sql = f"DELETE FROM {table} WHERE id = %s"
self.execute(delete_sql, exist_row["id"])
delete_num += 1
except Exception as exp:
warning_msg = (f"{exp}\n"
f"Something happened when delete date from "
f"{table}\n"
f"id_keys:{id_keys}, data:{data}")
logger.warning(warning_msg)
raise Exception(warning_msg)
logger.info(f"Delete successfully!!! delete num:{delete_num}")
return delete_num
class TransactionHelper(object):
def __init__(self, db):
self.db = db
def __enter__(self):
self.db._ensure_connected()
self.db._db.begin()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.db._ensure_connected()
self.db._db.rollback()
raise exc_val
self.db._db.commit() | /rich-torndb-0.0.1.tar.gz/rich-torndb-0.0.1/rich_torndb/rich_torndb.py | 0.603231 | 0.181354 | rich_torndb.py | pypi |
from typing import *
import json
import datetime
import logging as logger
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime("%Y-%m-%d")
else:
return json.JSONEncoder.default(self, obj)
def is_num(value):
# noinspection PyBroadException
try:
float(value)
return True
except Exception:
return False
def union_type(*values, **kwargs):
def union(_value):
if isinstance(_value, datetime.datetime):
_value = _value.strftime("%Y-%m-%d %H:%M:%S")
if isinstance(_value, str) and is_num(_value):
_value = float(_value)
if is_num(_value):
_value = round(_value, 2)
if kwargs.get("digit_to_int") and is_num(_value):
_value = int(_value)
return _value
union_values = [union(value) for value in values]
return union_values
def info_changes(table, id_kvs, new_data, db):
"""
log changes
:param table: str table name about to update
:param id_kvs: dict 要更改的记录主键,此处兼容多个字段兼容的联合主键
format sample { key1:value1, key2:value2 }
:param new_data: dict 要更改的数据
:param db: query object
:return: 当主键对应的用户不存在时,返回None
当原数据和新数据没有差异时,也返回None
当原数据和新数据存在差异时,返回差异
"""
args = [arg for arg in new_data.keys() if arg not in id_kvs] # 过滤掉主键
if not args:
return None
values = []
id_fields = []
for id_key, value in id_kvs.items():
id_fields.append("{}=%s".format(id_key))
values.append(value)
select_sql = "SELECT {} FROM {} WHERE {}".format(", ".join(args), table,
" AND ".join(id_fields))
old_data = db.get(select_sql, *values)
if not old_data:
return None
changes = find_changes(old_data, new_data)
if changes:
details = json.dumps(changes, ensure_ascii=False, cls=DateEncoder)
logger.info(f"[Record changes] table:{table}; primary:{id_kvs}; "
f"details:{details};")
return changes
return None
def find_changes(data1: Dict[str, Any], data2: Dict[str, Any],
data1_key="old", data2_key="new", digit_to_int=False):
changes = dict()
for arg, value in data1.items():
value1, value2 = union_type(value, data2[arg],
digit_to_int=digit_to_int)
if value1 != value2:
changes[arg] = {data1_key: value1, data2_key: value2}
return changes | /rich-torndb-0.0.1.tar.gz/rich-torndb-0.0.1/rich_torndb/utils/changes_log.py | 0.567098 | 0.170992 | changes_log.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.