code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import re # noqa: F401
import sys # noqa: F401
from rockset.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel,
)
from rockset.exceptions import ApiAttributeError
from rockset.document import Document
def lazy_import():
from rockset.model.pagination_info import PaginationInfo
from rockset.model.query_error import QueryError
from rockset.model.query_field_type import QueryFieldType
from rockset.model.query_response_stats import QueryResponseStats
globals()["PaginationInfo"] = PaginationInfo
globals()["QueryError"] = QueryError
globals()["QueryFieldType"] = QueryFieldType
globals()["QueryResponseStats"] = QueryResponseStats
class QueryResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"collections": ([str],), # noqa: E501
"column_fields": ([QueryFieldType],), # noqa: E501
"last_offset": (str,), # noqa: E501
"pagination": (PaginationInfo,), # noqa: E501
"query_errors": ([QueryError],), # noqa: E501
"query_id": (str,), # noqa: E501
"query_lambda_path": (str,), # noqa: E501
"results": (
[{str: (bool, date, datetime, dict, float, int, list, str, none_type)}],
), # noqa: E501
"results_total_doc_count": (int,), # noqa: E501
"stats": (QueryResponseStats,), # noqa: E501
"warnings": ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"collections": "collections", # noqa: E501
"column_fields": "column_fields", # noqa: E501
"last_offset": "last_offset", # noqa: E501
"pagination": "pagination", # noqa: E501
"query_errors": "query_errors", # noqa: E501
"query_id": "query_id", # noqa: E501
"query_lambda_path": "query_lambda_path", # noqa: E501
"results": "results", # noqa: E501
"results_total_doc_count": "results_total_doc_count", # noqa: E501
"stats": "stats", # noqa: E501
"warnings": "warnings", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""QueryResponse - a model defined in OpenAPI
Keyword Args:
collections ([str]): List of collections referenced in the query.. [optional] # noqa: E501
column_fields ([QueryFieldType]): Meta information about each column in the result set. Not populated in `SELECT *` queries.. [optional] # noqa: E501
last_offset (str): If this was a write query, this is the log offset the query was written to. [optional] # noqa: E501
pagination (PaginationInfo): [optional] # noqa: E501
query_errors ([QueryError]): Errors encountered while executing the query.. [optional] # noqa: E501
query_id (str): Unique ID for this query.. [optional] # noqa: E501
query_lambda_path (str): The full path of the executed query lambda. Includes version information.. [optional] # noqa: E501
results ([{str: (bool, date, datetime, dict, float, int, list, str, none_type)}]): Results from the query.. [optional] # noqa: E501
results_total_doc_count (int): Number of results generated by the query. [optional] # noqa: E501
stats (QueryResponseStats): [optional] # noqa: E501
warnings ([str]): Warnings generated by the query. Only populated if `generate_warnings` is specified in the query request.. [optional] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to False for performance
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop("_check_type", False)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
if var_name == "results":
setattr(self, "results", [Document(doc) for doc in var_value])
continue
setattr(self, var_name, var_value)
return self
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""QueryResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to False for performance
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
collections ([str]): List of collections referenced in the query.. [optional] # noqa: E501
column_fields ([QueryFieldType]): Meta information about each column in the result set. Not populated in `SELECT *` queries.. [optional] # noqa: E501
last_offset (str): If this was a write query, this is the log offset the query was written to. [optional] # noqa: E501
pagination (PaginationInfo): [optional] # noqa: E501
query_errors ([QueryError]): Errors encountered while executing the query.. [optional] # noqa: E501
query_id (str): Unique ID for this query.. [optional] # noqa: E501
query_lambda_path (str): The full path of the executed query lambda. Includes version information.. [optional] # noqa: E501
results ([{str: (bool, date, datetime, dict, float, int, list, str, none_type)}]): Results from the query.. [optional] # noqa: E501
results_total_doc_count (int): Number of results generated by the query. [optional] # noqa: E501
stats (QueryResponseStats): [optional] # noqa: E501
warnings ([str]): Warnings generated by the query. Only populated if `generate_warnings` is specified in the query request.. [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", False)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
if var_name == "results":
setattr(self, "results", [Document(doc) for doc in var_value])
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(
f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes."
) | /rockset-v2-alpha-0.1.13.tar.gz/rockset-v2-alpha-0.1.13/rockset/model/query_response.py | 0.621196 | 0.153899 | query_response.py | pypi |
import datetime
import geojson
# all Rockset data types
DATATYPE_META = "__rockset_type"
DATATYPE_INT = "int"
DATATYPE_FLOAT = "float"
DATATYPE_BOOL = "bool"
DATATYPE_STRING = "string"
DATATYPE_BYTES = "bytes"
DATATYPE_NULL = "null"
DATATYPE_NULL_TYPE = "null_type"
DATATYPE_ARRAY = "array"
DATATYPE_OBJECT = "object"
DATATYPE_DATE = "date"
DATATYPE_DATETIME = "datetime"
DATATYPE_TIME = "time"
DATATYPE_TIMESTAMP = "timestamp"
DATATYPE_MONTH_INTERVAL = "month_interval"
DATATYPE_MICROSECOND_INTERVAL = "microsecond_interval"
DATATYPE_GEOGRAPHY = "geography"
def _date_fromisoformat(s):
dt = datetime.datetime.strptime(s, "%Y-%m-%d")
return dt.date()
def _time_fromisoformat(s):
try:
dt = datetime.datetime.strptime(s, "%H:%M:%S.%f")
except ValueError:
dt = datetime.datetime.strptime(s, "%H:%M:%S")
return datetime.time(
hour=dt.hour, minute=dt.minute, second=dt.second, microsecond=dt.microsecond
)
def _datetime_fromisoformat(s):
try:
dt = datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
dt = datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S")
return dt
def _timedelta_from_microseconds(us):
return datetime.timedelta(microseconds=us)
class Document(dict):
"""Represents a single record or row in query results.
This is a sub-class of dict. So, treat this object as a dict
for all practical purposes.
Only the constructor is overridden to handle the type adaptations
shown in the table above.
"""
def __init__(self, *args, **kwargs):
super(Document, self).__init__(*args, **kwargs)
for k in self.keys():
if not isinstance(self[k], dict):
continue
if DATATYPE_META not in self[k]:
continue
t = self[k][DATATYPE_META].lower()
v = self[k]["value"]
if t == DATATYPE_DATE:
self[k] = _date_fromisoformat(v)
elif t == DATATYPE_TIME:
self[k] = _time_fromisoformat(v)
elif t == DATATYPE_DATETIME:
self[k] = _datetime_fromisoformat(v)
elif t == DATATYPE_MICROSECOND_INTERVAL:
self[k] = _timedelta_from_microseconds(v)
elif t == DATATYPE_GEOGRAPHY:
self[k] = geojson.GeoJSON.to_instance(v)
def _py_type_to_rs_type(self, v):
if isinstance(v, bool):
# check for bool before int, since bools are ints too
return DATATYPE_BOOL
elif isinstance(v, int):
return DATATYPE_INT
elif isinstance(v, float):
return DATATYPE_FLOAT
elif isinstance(v, str):
return DATATYPE_STRING
elif isinstance(v, bytes):
return DATATYPE_BYTES
elif isinstance(v, type(None)):
return DATATYPE_NULL
elif isinstance(v, list):
return DATATYPE_ARRAY
elif isinstance(v, datetime.datetime):
# check for datetime first, since datetimes are dates too
return DATATYPE_DATETIME
elif isinstance(v, datetime.date):
return DATATYPE_DATE
elif isinstance(v, datetime.time):
return DATATYPE_TIME
elif isinstance(v, datetime.timedelta):
return DATATYPE_MICROSECOND_INTERVAL
elif isinstance(v, geojson.GeoJSON):
return DATATYPE_GEOGRAPHY
elif isinstance(v, dict): # keep this in the end
if DATATYPE_META not in v:
return DATATYPE_OBJECT
return v[DATATYPE_META].lower()
def fields(self, columns=None):
columns = columns or sorted(self)
return [
{"name": c, "type": self._py_type_to_rs_type(self.get(c, type(None)))}
for c in columns
] | /rockset_v2_internal-2.0.4-py3-none-any.whl/rockset/document.py | 0.486819 | 0.234955 | document.py | pypi |
class RocksetException(Exception):
"""The base exception class for all RocksetExceptions"""
class ApiTypeError(RocksetException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(RocksetException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiAttributeError(RocksetException, AttributeError):
def __init__(self, msg, path_to_item=None):
"""
Raised when an attribute reference or assignment fails.
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiAttributeError, self).__init__(full_msg)
class ApiKeyError(RocksetException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(RocksetException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
class NotFoundException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(NotFoundException, self).__init__(status, reason, http_resp)
class UnauthorizedException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(UnauthorizedException, self).__init__(status, reason, http_resp)
class ForbiddenException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ForbiddenException, self).__init__(status, reason, http_resp)
class ServiceException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ServiceException, self).__init__(status, reason, http_resp)
class BadRequestException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(BadRequestException, self).__init__(status, reason, http_resp)
class InitializationException(RocksetException):
def __init__(self, reason):
super(InitializationException, self).__init__(f"The rockset client was initialized incorrectly: {reason}")
class InputException(RocksetException):
def __init__(self, status=None, reason=None, http_resp=None):
super(InputException, self).__init__(status, reason, http_resp)
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, int):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result | /rockset_v2_internal-2.0.4-py3-none-any.whl/rockset/exceptions.py | 0.798854 | 0.307709 | exceptions.py | pypi |
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
import asyncio
from rockset.api_client import ApiClient, Endpoint as _Endpoint
from rockset.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from rockset.model.error_model import ErrorModel
from rockset.model.organization_response import OrganizationResponse
from rockset.models import *
class Organizations(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_endpoint = _Endpoint(
settings={
'response_type': (OrganizationResponse,),
'auth': [
'apikey'
],
'endpoint_path': '/v1/orgs/self',
'operation_id': 'get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def get(
self,
**kwargs
) -> typing.Union[OrganizationResponse, asyncio.Future]:
"""Get Organization # noqa: E501
Retrieve information about current organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
```python
rs = RocksetClient(api_key=APIKEY)
future = rs.Organizations.get(
async_req=True,
)
result = await future
```
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done on the data received from the server.
If False, the client will also not convert nested inner objects
into the respective model types (the outermost object
is still converted to the model).
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
OrganizationResponse
If the method is called asynchronously, returns an asyncio.Future which resolves to the response.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_endpoint.call_with_http_info(**kwargs)
body_params_dict = dict()
return_types_dict = dict() | /rockset_v2_internal-2.0.4-py3-none-any.whl/rockset/api/organizations_api.py | 0.460046 | 0.204759 | organizations_api.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rockset.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel,
)
from rockset.exceptions import ApiAttributeError
from rockset.document import Document
def lazy_import():
from rockset.model.pagination_info import PaginationInfo
from rockset.model.query_error import QueryError
from rockset.model.query_field_type import QueryFieldType
from rockset.model.query_response_stats import QueryResponseStats
globals()["PaginationInfo"] = PaginationInfo
globals()["QueryError"] = QueryError
globals()["QueryFieldType"] = QueryFieldType
globals()["QueryResponseStats"] = QueryResponseStats
class QueryResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"collections": ([str],), # noqa: E501
"column_fields": ([QueryFieldType],), # noqa: E501
"last_offset": (str,), # noqa: E501
"pagination": (PaginationInfo,), # noqa: E501
"query_errors": ([QueryError],), # noqa: E501
"query_id": (str,), # noqa: E501
"query_lambda_path": (str,), # noqa: E501
"results": (
[{str: (bool, date, datetime, dict, float, int, list, str, none_type)}],
), # noqa: E501
"results_total_doc_count": (int,), # noqa: E501
"stats": (QueryResponseStats,), # noqa: E501
"warnings": ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"collections": "collections", # noqa: E501
"column_fields": "column_fields", # noqa: E501
"last_offset": "last_offset", # noqa: E501
"pagination": "pagination", # noqa: E501
"query_errors": "query_errors", # noqa: E501
"query_id": "query_id", # noqa: E501
"query_lambda_path": "query_lambda_path", # noqa: E501
"results": "results", # noqa: E501
"results_total_doc_count": "results_total_doc_count", # noqa: E501
"stats": "stats", # noqa: E501
"warnings": "warnings", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""QueryResponse - a model defined in OpenAPI
Keyword Args:
collections ([str]): List of collections referenced in the query.. [optional] # noqa: E501
column_fields ([QueryFieldType]): Meta information about each column in the result set. Not populated in `SELECT *` queries.. [optional] # noqa: E501
last_offset (str): If this was a write query, this is the log offset the query was written to. [optional] # noqa: E501
pagination (PaginationInfo): [optional] # noqa: E501
query_errors ([QueryError]): Errors encountered while executing the query.. [optional] # noqa: E501
query_id (str): Unique ID for this query.. [optional] # noqa: E501
query_lambda_path (str): The full path of the executed query lambda. Includes version information.. [optional] # noqa: E501
results ([{str: (bool, date, datetime, dict, float, int, list, str, none_type)}]): Results from the query.. [optional] # noqa: E501
results_total_doc_count (int): Number of results generated by the query. [optional] # noqa: E501
stats (QueryResponseStats): [optional] # noqa: E501
warnings ([str]): Warnings generated by the query. Only populated if `generate_warnings` is specified in the query request.. [optional] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to False for performance
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop("_check_type", False)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
if var_name == "results":
setattr(self, "results", [Document(doc) for doc in var_value])
continue
setattr(self, var_name, var_value)
return self
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""QueryResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to False for performance
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
collections ([str]): List of collections referenced in the query.. [optional] # noqa: E501
column_fields ([QueryFieldType]): Meta information about each column in the result set. Not populated in `SELECT *` queries.. [optional] # noqa: E501
last_offset (str): If this was a write query, this is the log offset the query was written to. [optional] # noqa: E501
pagination (PaginationInfo): [optional] # noqa: E501
query_errors ([QueryError]): Errors encountered while executing the query.. [optional] # noqa: E501
query_id (str): Unique ID for this query.. [optional] # noqa: E501
query_lambda_path (str): The full path of the executed query lambda. Includes version information.. [optional] # noqa: E501
results ([{str: (bool, date, datetime, dict, float, int, list, str, none_type)}]): Results from the query.. [optional] # noqa: E501
results_total_doc_count (int): Number of results generated by the query. [optional] # noqa: E501
stats (QueryResponseStats): [optional] # noqa: E501
warnings ([str]): Warnings generated by the query. Only populated if `generate_warnings` is specified in the query request.. [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", False)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
if var_name == "results":
setattr(self, "results", [Document(doc) for doc in var_value])
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(
f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes."
) | /rockset_v2_internal-2.0.4-py3-none-any.whl/rockset/model/query_response.py | 0.621196 | 0.153899 | query_response.py | pypi |
import datetime
import geojson
# all Rockset data types
DATATYPE_META = "__rockset_type"
DATATYPE_INT = "int"
DATATYPE_FLOAT = "float"
DATATYPE_BOOL = "bool"
DATATYPE_STRING = "string"
DATATYPE_BYTES = "bytes"
DATATYPE_NULL = "null"
DATATYPE_NULL_TYPE = "null_type"
DATATYPE_ARRAY = "array"
DATATYPE_OBJECT = "object"
DATATYPE_DATE = "date"
DATATYPE_DATETIME = "datetime"
DATATYPE_TIME = "time"
DATATYPE_TIMESTAMP = "timestamp"
DATATYPE_MONTH_INTERVAL = "month_interval"
DATATYPE_MICROSECOND_INTERVAL = "microsecond_interval"
DATATYPE_GEOGRAPHY = "geography"
def _date_fromisoformat(s):
dt = datetime.datetime.strptime(s, "%Y-%m-%d")
return dt.date()
def _time_fromisoformat(s):
try:
dt = datetime.datetime.strptime(s, "%H:%M:%S.%f")
except ValueError:
dt = datetime.datetime.strptime(s, "%H:%M:%S")
return datetime.time(
hour=dt.hour, minute=dt.minute, second=dt.second, microsecond=dt.microsecond
)
def _datetime_fromisoformat(s):
try:
dt = datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
dt = datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S")
return dt
def _timedelta_from_microseconds(us):
return datetime.timedelta(microseconds=us)
class Document(dict):
"""Represents a single record or row in query results.
This is a sub-class of dict. So, treat this object as a dict
for all practical purposes.
Only the constructor is overridden to handle the type adaptations
shown in the table above.
"""
def __init__(self, *args, **kwargs):
super(Document, self).__init__(*args, **kwargs)
for k in self.keys():
if not isinstance(self[k], dict):
continue
if DATATYPE_META not in self[k]:
continue
t = self[k][DATATYPE_META].lower()
v = self[k]["value"]
if t == DATATYPE_DATE:
self[k] = _date_fromisoformat(v)
elif t == DATATYPE_TIME:
self[k] = _time_fromisoformat(v)
elif t == DATATYPE_DATETIME:
self[k] = _datetime_fromisoformat(v)
elif t == DATATYPE_MICROSECOND_INTERVAL:
self[k] = _timedelta_from_microseconds(v)
elif t == DATATYPE_GEOGRAPHY:
self[k] = geojson.GeoJSON.to_instance(v)
def _py_type_to_rs_type(self, v):
if isinstance(v, bool):
# check for bool before int, since bools are ints too
return DATATYPE_BOOL
elif isinstance(v, int):
return DATATYPE_INT
elif isinstance(v, float):
return DATATYPE_FLOAT
elif isinstance(v, str):
return DATATYPE_STRING
elif isinstance(v, bytes):
return DATATYPE_BYTES
elif isinstance(v, type(None)):
return DATATYPE_NULL
elif isinstance(v, list):
return DATATYPE_ARRAY
elif isinstance(v, datetime.datetime):
# check for datetime first, since datetimes are dates too
return DATATYPE_DATETIME
elif isinstance(v, datetime.date):
return DATATYPE_DATE
elif isinstance(v, datetime.time):
return DATATYPE_TIME
elif isinstance(v, datetime.timedelta):
return DATATYPE_MICROSECOND_INTERVAL
elif isinstance(v, geojson.GeoJSON):
return DATATYPE_GEOGRAPHY
elif isinstance(v, dict): # keep this in the end
if DATATYPE_META not in v:
return DATATYPE_OBJECT
return v[DATATYPE_META].lower()
def fields(self, columns=None):
columns = columns or sorted(self)
return [
{"name": c, "type": self._py_type_to_rs_type(self.get(c, type(None)))}
for c in columns
] | /rockset_v2-2.0.2.tar.gz/rockset_v2-2.0.2/rockset_v2/document.py | 0.486819 | 0.234955 | document.py | pypi |
class RocksetException(Exception):
"""The base exception class for all RocksetExceptions"""
class ApiTypeError(RocksetException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(RocksetException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiAttributeError(RocksetException, AttributeError):
def __init__(self, msg, path_to_item=None):
"""
Raised when an attribute reference or assignment fails.
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiAttributeError, self).__init__(full_msg)
class ApiKeyError(RocksetException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(RocksetException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
class NotFoundException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(NotFoundException, self).__init__(status, reason, http_resp)
class UnauthorizedException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(UnauthorizedException, self).__init__(status, reason, http_resp)
class ForbiddenException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ForbiddenException, self).__init__(status, reason, http_resp)
class ServiceException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ServiceException, self).__init__(status, reason, http_resp)
class BadRequestException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(BadRequestException, self).__init__(status, reason, http_resp)
class InitializationException(RocksetException):
def __init__(self, reason):
super(InitializationException, self).__init__(f"The rockset client was initialized incorrectly: {reason}")
class InputException(RocksetException):
def __init__(self, status=None, reason=None, http_resp=None):
super(InputException, self).__init__(status, reason, http_resp)
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, int):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result | /rockset_v2-2.0.2.tar.gz/rockset_v2-2.0.2/rockset_v2/exceptions.py | 0.798854 | 0.307709 | exceptions.py | pypi |
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
import asyncio
from rockset_v2.api_client import ApiClient, Endpoint as _Endpoint
from rockset_v2.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from rockset_v2.model.create_workspace_request import CreateWorkspaceRequest
from rockset_v2.model.create_workspace_response import CreateWorkspaceResponse
from rockset_v2.model.delete_workspace_response import DeleteWorkspaceResponse
from rockset_v2.model.error_model import ErrorModel
from rockset_v2.model.get_workspace_response import GetWorkspaceResponse
from rockset_v2.model.list_workspaces_response import ListWorkspacesResponse
from rockset_v2.models import *
class Workspaces(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_endpoint = _Endpoint(
settings={
'response_type': (CreateWorkspaceResponse,),
'auth': [
'apikey'
],
'endpoint_path': '/v1/orgs/self/ws',
'operation_id': 'create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'create_workspace_request',
],
'required': [
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'create_workspace_request':
(CreateWorkspaceRequest,),
},
'attribute_map': {
},
'location_map': {
'create_workspace_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_endpoint = _Endpoint(
settings={
'response_type': (DeleteWorkspaceResponse,),
'auth': [
'apikey'
],
'endpoint_path': '/v1/orgs/self/ws/{workspace}',
'operation_id': 'delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'workspace',
],
'required': [
'workspace',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'workspace':
(str,),
},
'attribute_map': {
'workspace': 'workspace',
},
'location_map': {
'workspace': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_endpoint = _Endpoint(
settings={
'response_type': (GetWorkspaceResponse,),
'auth': [
'apikey'
],
'endpoint_path': '/v1/orgs/self/ws/{workspace}',
'operation_id': 'get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'workspace',
],
'required': [
'workspace',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'workspace':
(str,),
},
'attribute_map': {
'workspace': 'workspace',
},
'location_map': {
'workspace': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_endpoint = _Endpoint(
settings={
'response_type': (ListWorkspacesResponse,),
'auth': [
'apikey'
],
'endpoint_path': '/v1/orgs/self/ws',
'operation_id': 'list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def create(
self,
*,
name: str,
description: str = None,
**kwargs
) -> typing.Union[CreateWorkspaceResponse, asyncio.Future]:
"""Create Workspace # noqa: E501
Create a new workspace. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
```python
rs = RocksetClient(api_key=APIKEY)
future = rs.Workspaces.create(
description="Datasets of system logs for the ops team.",
name="event_logs",
async_req=True,
)
result = await future
```
Keyword Args:
description (str): Longer explanation for the workspace.. [optional]
name (str): Descriptive label and unique identifier.. [required]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done on the data received from the server.
If False, the client will also not convert nested inner objects
into the respective model types (the outermost object
is still converted to the model).
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CreateWorkspaceResponse
If the method is called asynchronously, returns an asyncio.Future which resolves to the response.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['create_workspace_request'] = \
kwargs['create_workspace_request']
return self.create_endpoint.call_with_http_info(**kwargs)
def delete(
self,
*,
workspace = "commons",
**kwargs
) -> typing.Union[DeleteWorkspaceResponse, asyncio.Future]:
"""Delete Workspace # noqa: E501
Remove a workspace. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
```python
rs = RocksetClient(api_key=APIKEY)
future = rs.Workspaces.delete(
async_req=True,
)
result = await future
```
Keyword Args:
workspace (str): name of the workspace. [required] if omitted the server will use the default value of "commons"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done on the data received from the server.
If False, the client will also not convert nested inner objects
into the respective model types (the outermost object
is still converted to the model).
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
DeleteWorkspaceResponse
If the method is called asynchronously, returns an asyncio.Future which resolves to the response.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['workspace'] = \
workspace
return self.delete_endpoint.call_with_http_info(**kwargs)
def get(
self,
*,
workspace = "commons",
**kwargs
) -> typing.Union[GetWorkspaceResponse, asyncio.Future]:
"""Retrieve Workspace # noqa: E501
Get information about a single workspace. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
```python
rs = RocksetClient(api_key=APIKEY)
future = rs.Workspaces.get(
async_req=True,
)
result = await future
```
Keyword Args:
workspace (str): name of the workspace. [required] if omitted the server will use the default value of "commons"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done on the data received from the server.
If False, the client will also not convert nested inner objects
into the respective model types (the outermost object
is still converted to the model).
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GetWorkspaceResponse
If the method is called asynchronously, returns an asyncio.Future which resolves to the response.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['workspace'] = \
workspace
return self.get_endpoint.call_with_http_info(**kwargs)
def list(
self,
**kwargs
) -> typing.Union[ListWorkspacesResponse, asyncio.Future]:
"""List Workspaces # noqa: E501
List all workspaces in an organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
```python
rs = RocksetClient(api_key=APIKEY)
future = rs.Workspaces.list(
async_req=True,
)
result = await future
```
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done on the data received from the server.
If False, the client will also not convert nested inner objects
into the respective model types (the outermost object
is still converted to the model).
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ListWorkspacesResponse
If the method is called asynchronously, returns an asyncio.Future which resolves to the response.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.list_endpoint.call_with_http_info(**kwargs)
body_params_dict = dict()
return_types_dict = dict()
body_params_dict['create'] = 'create_workspace_request'
return_types_dict['create'] = CreateWorkspaceRequest | /rockset_v2-2.0.2.tar.gz/rockset_v2-2.0.2/rockset_v2/api/workspaces_api.py | 0.506103 | 0.176317 | workspaces_api.py | pypi |
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
import asyncio
from rockset_v2.api_client import ApiClient, Endpoint as _Endpoint
from rockset_v2.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from rockset_v2.model.error_model import ErrorModel
from rockset_v2.model.organization_response import OrganizationResponse
from rockset_v2.models import *
class Organizations(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_endpoint = _Endpoint(
settings={
'response_type': (OrganizationResponse,),
'auth': [
'apikey'
],
'endpoint_path': '/v1/orgs/self',
'operation_id': 'get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def get(
self,
**kwargs
) -> typing.Union[OrganizationResponse, asyncio.Future]:
"""Get Organization # noqa: E501
Retrieve information about current organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
```python
rs = RocksetClient(api_key=APIKEY)
future = rs.Organizations.get(
async_req=True,
)
result = await future
```
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done on the data received from the server.
If False, the client will also not convert nested inner objects
into the respective model types (the outermost object
is still converted to the model).
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
OrganizationResponse
If the method is called asynchronously, returns an asyncio.Future which resolves to the response.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_endpoint.call_with_http_info(**kwargs)
body_params_dict = dict()
return_types_dict = dict() | /rockset_v2-2.0.2.tar.gz/rockset_v2-2.0.2/rockset_v2/api/organizations_api.py | 0.431944 | 0.211234 | organizations_api.py | pypi |
class MOD:
def __init__(self, agent):
from base import bcolors
self.bcolors = bcolors
self.agent = agent
def scan ( self, results):
import os
import re
# Find additional modules to CMS systems
i = 0
self.agent.fb(1, 'Scanning modules')
for hit,file in results.iteritems():
j = 0
while j < len(self.agent.definitions):
# MOD: Linked to SRC name, SRC can have multiple MOD defs... therefore while loop
if self.agent.definitions[j]['type'] == 'MOD' and self.agent.definitions[j]['name'] == file['name']:
# Determine basedir of this module definition\
old,new,ext = self.agent.definitions[j]['file'].split('|')
basedir = file['path'].replace(old, new)
# List the modules folder
if len(basedir) > 0 and os.path.isdir(basedir):
self.agent.fb(1, 'Checking for ' + file['name'] + ' plugins in ' + basedir)
# Create multi-dimensional dict
file['modules'] = {}
for basename in os.listdir(basedir):
if os.path.isdir(os.path.join(basedir, basename)):
# Do the magic
filename = basedir + basename + '/' + basename + ext
if os.path.exists(filename):
content = open(filename, 'rb').read(512000) # Read maximum of 500KB into memory
result = re.search(self.agent.definitions[j]['regex'], content, flags=re.DOTALL) # re.DOTALL = multiline search
if result:
# Call group only if we've got a hit to avoid crash and strip non-digits (aka: Magento notation)
module_version = ".".join(re.findall(r'\d+', result.group(1)))
else:
module_version = "unknown"
file['modules'][basename] = module_version
i += 1
j += 1
self.agent.fb(1, 'Gevonden modules : ' + str(i))
# Store analytics
self.agent.ana['cms_mod_instances'] = i
return results | /rocksolid-agent-1.0.17.tar.gz/rocksolid-agent-1.0.17/rocksolid_agent/libs/mod.py | 0.52683 | 0.217109 | mod.py | pypi |
# rockstar-py
<h2 align="center">Python transpiler for the esoteric language <a href="https://github.com/dylanbeattie/rockstar">Rockstar</a></h2>
<p align="center">
<a href="https://travis-ci.com/yyyyyyyan/rockstar-py"><img alt="Travis (.org)" src="https://img.shields.io/travis/yyyyyyyan/rockstar-py"></a>
<a href="https://www.codacy.com/manual/yyyyyyyan/rockstar-py"><img alt="Codacy grade" src="https://img.shields.io/codacy/grade/6496fe0a545242c5bd8c4723f1d0f45f"></a>
<a href="https://pypi.org/project/rockstar-py"><img alt="PyPI - Version" src="https://img.shields.io/pypi/v/rockstar-py"></a>
<a href="https://pypi.org/project/rockstar-py"><img alt="PyPI - Status" src="https://img.shields.io/pypi/status/rockstar-py"></a>
<a href="https://pepy.tech/project/rockstar-py"><img alt="PyPI - Status" src="https://pepy.tech/badge/rockstar-py"></a>
<a href="https://pypi.org/project/rockstar-py"><img alt="PyPI - Python Version" src="https://img.shields.io/pypi/pyversions/rockstar-py"></a>
<a href="https://pypi.org/project/rockstar-py"><img alt="PyPI - Wheel" src="https://img.shields.io/pypi/wheel/rockstar-py"></a>
<a href="https://github.com/yyyyyyyan/rockstar-py/search?l=python"><img alt="GitHub top language" src="https://img.shields.io/github/languages/top/yyyyyyyan/rockstar-py"></a>
<a href="https://github.com/psf/black"><img alt="Code Style - Black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
<a href="https://github.com/yyyyyyyan/rockstar-py/graphs/contributors"><img alt="GitHub contributors" src="https://img.shields.io/github/contributors/yyyyyyyan/rockstar-py"></a>
<a href="https://github.com/yyyyyyyan/rockstar-py/stargazers"><img alt="GitHub stars" src="https://img.shields.io/github/stars/yyyyyyyan/rockstar-py"></a>
<a href="https://github.com/yyyyyyyan/rockstar-py/issues"><img alt="GitHub issues" src="https://img.shields.io/github/issues/yyyyyyyan/rockstar-py"></a>
<a href="https://github.com/yyyyyyyan/rockstar-py"><img alt="GitHub code size in bytes" src="https://img.shields.io/github/languages/code-size/yyyyyyyan/rockstar-py"></a>
<a href="https://github.com/yyyyyyyan/rockstar-py/releases/latest"><img alt="GitHub Release Date" src="https://img.shields.io/github/release-date/yyyyyyyan/rockstar-py"></a>
<a href="https://github.com/yyyyyyyan/rockstar-py/commits/master"><img alt="GitHub commits since tagged version" src="https://img.shields.io/github/commits-since/yyyyyyyan/rockstar-py/latest"></a>
<a href="https://github.com/yyyyyyyan/rockstar-py/commits/master"><img alt="GitHub last commit" src="https://img.shields.io/github/last-commit/yyyyyyyan/rockstar-py"></a>
<a href="https://github.com/yyyyyyyan/rockstar-py/blob/master/LICENSE.txt"><img alt="License - MIT" src="https://img.shields.io/github/license/yyyyyyyan/rockstar-py"></a>
</p>
## Getting Started
These instructions will get you a copy of the project up and running on
your local machine for development and testing purposes.
### Installing
First, make sure you have installed a supported Python version (\>=
3.6).
Now, the easiest way of installing **rockstar-py** is using pip:
pip install rockstar-py
(This may require `sudo` if you're installing globally on a \*nix
system.
You can also clone this project using git and install the package with
setuptools:
git clone https://github.com/yyyyyyyan/rockstar-py.git
cd rockstar-py
python3 setup.py install
## Usage
If you installed the package using pip or setuptools, you can simply run rockstar-py in the command line:
rockstar-py [-h] (-i INPUT | --stdin) [-o OUTPUT | --stdout | --exec] [-v]
Otherwise, you can run the transpiler from inside the `rockstar-py` folder by running Python with the `rockstarpy` package:
python3 rockstarpy [-h] (-i INPUT | --stdin) [-o OUTPUT | --stdout | --exec] [-v]
Call `rockstar-py` with the flag `-h`/`--help` to see a description of all options:
usage: rockstar-py [-h] (-i INPUT | --stdin) [-o OUTPUT | --stdout | --exec] [-v]
Python transpiler for the esoteric language Rockstar
optional arguments:
-h, --help show this help message and exit
-i INPUT, --input INPUT
Input file (.rock)
--stdin Stream in stdin
-o OUTPUT, --output OUTPUT
Output file (.py)
--stdout Stream to stdout
--exec Execute (without saving) the transpiled code
-v, --version Version
## Examples
Just to make it more clear, some examples with the
[fizz.rock](https://github.com/yyyyyyyan/rockstar-py/blob/master/tests/fizz.rock)
code.
### Basic usage
> rockstar-py -i fizz.rock -o fizz.py
> ls
fizz.py fizz.rock
> cat fizz.py
def Midnight(your_heart, your_soul):
while your_heart >= your_soul: # this is a comment
your_heart = your_heart - your_soul
return your_heart
Desire = 100
my_world = False
Fire = 3 # i love comments
Hate = 5
while not my_world == Desire:
my_world += 1
if Midnight(my_world, Fire) == False and Midnight(my_world, Hate) == False:
print("FizzBuzz!")
continue
if Midnight(my_world, Fire) == False:
print("Fizz!")
continue
if Midnight(my_world, Hate) == False:
print("Buzz!")
continue
print(my_world)
### Using `--stdout`
> rockstar-py -i fizz.rock --stdout
def Midnight(your_heart, your_soul):
while your_heart >= your_soul: # this is a comment
your_heart = your_heart - your_soul
return your_heart
Desire = 100
my_world = False
Fire = 3 # i love comments
Hate = 5
while not my_world == Desire:
my_world += 1
if Midnight(my_world, Fire) == False and Midnight(my_world, Hate) == False:
print("FizzBuzz!")
continue
if Midnight(my_world, Fire) == False:
print("Fizz!")
continue
if Midnight(my_world, Hate) == False:
print("Buzz!")
continue
print(my_world)
### Using `--stdin`
> rockstar-py --stdin -o fizz.py
Midnight takes your heart and your soul
While your heart is as high as your soul (this is a comment)
Put your heart without your soul into your heart
Give back your heart
Desire's a lovestruck ladykiller
My world is empty
Fire's ice (i love comments)
Hate is water
Until my world is Desire,
Build my world up
If Midnight taking my world, Fire is nothing and Midnight taking my world, Hate is nothing
Shout "FizzBuzz!"
Take it to the top
If Midnight taking my world, Fire is nothing
Shout "Fizz!"
Take it to the top
If Midnight taking my world, Hate is nothing
Say "Buzz!"
Take it to the top
Whisper my world
[Ctrl+D]
> ls
fizz.py fizz.rock
### Using `--exec`
> rockstar-py -i fizz.rock --exec
1
2
Fizz!
4
Buzz!
Fizz!
7
8
Fizz!
Buzz!
11
Fizz!
13
14
FizzBuzz!
16
17
Fizz!
19
Buzz!
Fizz!
22
23
Fizz!
Buzz!
26
Fizz!
28
29
FizzBuzz!
31
32
Fizz!
34
Buzz!
Fizz!
37
38
Fizz!
Buzz!
41
Fizz!
43
44
FizzBuzz!
46
47
Fizz!
49
Buzz!
Fizz!
52
53
Fizz!
Buzz!
56
Fizz!
58
59
FizzBuzz!
61
62
Fizz!
64
Buzz!
Fizz!
67
68
Fizz!
Buzz!
71
Fizz!
73
74
FizzBuzz!
76
77
Fizz!
79
Buzz!
Fizz!
82
83
Fizz!
Buzz!
86
Fizz!
88
89
FizzBuzz!
91
92
Fizz!
94
Buzz!
Fizz!
97
98
Fizz!
Buzz!
## Contributing
The project has basically reached its end, but I'm still accepting pull
requests that improve speed and legibility of the code.
## Authors
- **[yyyyyyyan](https://github.com/yyyyyyyan)** - *Initial work*
## Contributors
Huge thanks to everyone who is contribuing to this project. Check them
out at [Contributors](https://github.com/yyyyyyyan/rockstar-py/graphs/contributors)!
## License
This project is licensed under the MIT License - see the
[LICENSE](https://github.com/yyyyyyyan/rockstar-py/blob/master/LICENSE)
file for details.
## Acknowledgments
- Hat tip to [dylanbeattie](https://github.com/dylanbeattie/) for creating Rockstar
- The FizzBuzz example works well. If valid code doesn’t work, create an issue so I can get a look.
- I’ll work on the readibility and organization of the code, would love suggestions on how/where to do that.
- I'd also love help with the tests.
| /rockstar-py-2.1.0.tar.gz/rockstar-py-2.1.0/README.md | 0.658857 | 0.724846 | README.md | pypi |
import re
class Transpiler(object):
SIMPLE_VARIABLE_FMT = r"\b[A-Za-z]+\b"
COMMON_VARIABLE_FMT = r"\b(?:[Aa]n?|[Tt]he|[Mm]y|[Yy]our) [a-z]+\b"
PROPER_VARIABLE_FMT = r"\b[A-Z][A-Za-z]*(?: [A-Z][A-Za-z]*)*\b"
REGEX_VARIABLES = r"(?:{}|{}|{})".format(COMMON_VARIABLE_FMT, PROPER_VARIABLE_FMT, SIMPLE_VARIABLE_FMT)
QUOTE_STR_FMT = r"\"[^\"]*\""
def __init__(self):
self.indentation_style = " " * 4
self._current_indentation = 0
self.in_function = False
self.globals = set()
self.most_recently_named = ""
self.simple_subs = {
"(": "#",
")": "",
"Give back": "return",
"Take it to the top": "continue",
"Break it down": "break",
" false ": " False ",
" wrong ": " False ",
" no ": " False ",
" lies ": " False ",
" null ": " False ",
" nothing ": " False ",
" nowhere ": " False ",
" nobody ": " False ",
" empty ": " False ",
" gone ": " False ",
" mysterious ": " False ",
" true ": " True ",
" right ": " True ",
" yes ": " True ",
" ok ": " True ",
" plus ": " + ",
" with ": " + ",
" minus ": " - ",
" without ": " - ",
" times ": " * ",
" of ": " * ",
" over ": " / ",
" is higher than ": " > ",
" is greater than ": " > ",
" is bigger than ": " > ",
" is stronger than ": " > ",
" is lower than ": " < ",
" is less than ": " < ",
" is smaller than ": " < ",
" is weaker than ": " < ",
" is as high as ": " >= ",
" is as great as ": " >= ",
" is as big as ": " >= ",
" is as strong as ": " >= ",
" is as low as ": " <= ",
" is as little as ": " <= ",
" is as small as ": " <= ",
" is as weak as ": " <= ",
" is not ": " != ",
" aint ": " != ",
"Until ": "while not ",
"While ": "while ",
}
@property
def current_indentation(self):
return self._current_indentation
@current_indentation.setter
def current_indentation(self, value):
self._current_indentation = value if value > 0 else 0
def get_comments(self, line):
comment_match = re.search(r"\((.*)\)", line)
if comment_match:
line = line.replace(comment_match.group(), "")
comment = " # " + comment_match.group(1)
elif "(" in line or ")" in line:
raise SyntaxError("Missing parentheses in comment")
else:
comment = ""
return line, comment
def create_function(self, line):
match = re.match(
r"\b({0}) takes ({0}(?: and {0})*)\b".format(self.REGEX_VARIABLES), line
)
if match:
self.current_indentation += 1
line = "def {}({}):".format(
match.group(1), match.group(2).replace(" and", ",")
)
self.in_function = True
return line
def create_while(self, line):
if line.startswith("while "):
line = line.replace(" is ", " == ")
line += ":"
self.current_indentation += 1
return line
def create_if(self, line):
match = re.match(r"If .*", line)
if match:
self.current_indentation += 1
line = line.replace(" is ", " == ")
line = line.replace("If", "if")
line += ":"
return line
def replace_let_be_with_is(self, line):
match = re.match(r"Let ({0}) be (.+)".format(self.REGEX_VARIABLES), line)
if match:
return match.group(1) + " is " + match.group(2)
return line
def find_poetic_number_literal(self, line):
poetic_type_literals_keywords = ["True", "False"]
match = re.match(
r"\b({0})(?: is|\'s| was| were) ([\d\w\.,\:\!\;\'\-\s]+)".format(
self.REGEX_VARIABLES
),
line,
)
if match and match.group(2).split()[0] not in poetic_type_literals_keywords:
line = "{} = ".format(match.group(1))
for word_number in match.group(2).split():
if re.match(r"\d+", word_number):
line += str(word_number)
else:
period = "." if word_number.endswith(".") else ""
alpha_word = re.sub(r"[^A-Za-z\-]", "", word_number)
line += str(len(alpha_word) % 10) + period
return line
def find_variables(self, line, fmt, clean_func=str):
variables = set(re.findall(fmt, line))
if variables:
for variable in variables:
line = re.sub(r"\b{}\b".format(variable), clean_func(variable).replace(" ", "_"), line)
return line
def find_proper_variables(self, line):
return self.find_variables(line, self.PROPER_VARIABLE_FMT, lambda variable: variable.title())
def find_common_variables(self, line):
return self.find_variables(line, self.COMMON_VARIABLE_FMT, lambda variable: variable.lower())
def find_named(self, line):
match = re.match(r"([A-Za-z]+(?:_[A-Za-z]+)*) [+-]?= .+", line)
if match:
return match.group(1)
def get_strings(self, line):
strings = dict()
says_match = re.match(r"({}) says (.*)".format(self.REGEX_VARIABLES), line)
if says_match:
line = says_match.group(1) + ' = {str_0}'
strings["str_0"] = '"{}"'.format(says_match.group(2).replace('"', r'\"'))
return line, strings
else:
for str_number, string in enumerate(re.findall(self.QUOTE_STR_FMT, line)):
fmt_var = f"str_{str_number}"
line = re.sub(self.QUOTE_STR_FMT, f"{{str_{str_number}}}", line, 1)
strings[fmt_var] = string
return line, strings
def transpile_line(self, line):
if line == "\n":
self.current_indentation -= 1
return ""
else:
line_ident = self.indentation_style * self.current_indentation
self.in_function = False if self.current_indentation == 0 else self.in_function
py_line, line_strings = self.get_strings(line)
py_line, comments = self.get_comments(py_line)
for key in self.simple_subs:
py_line = py_line.strip()
py_line += " "
py_line = py_line.replace(key, self.simple_subs[key])
py_line = py_line.strip("\n ,.;")
py_line = self.replace_let_be_with_is(py_line)
py_line = self.find_poetic_number_literal(py_line)
py_line = py_line.replace("'", "")
for key in self.simple_subs:
py_line = py_line.strip()
py_line += " "
py_line = py_line.replace(key, self.simple_subs[key])
py_line = py_line.strip("\n ,.;")
most_recently_named_keywords = [
" it ",
" he ",
" she ",
" him ",
" her ",
" them ",
" they ",
" ze ",
" hir ",
" zie ",
" zir ",
" xe ",
" xem ",
" ve ",
" ver ",
]
for keyword in most_recently_named_keywords:
py_line = py_line.replace(
keyword, " {} ".format(self.most_recently_named)
)
py_line = self.create_function(py_line)
py_line = self.create_while(py_line)
py_line = self.create_if(py_line)
line_ident = self.indentation_style * (self.current_indentation - 1) if py_line == "Else" else line_ident
py_line = "else:" if py_line == "Else" else py_line
py_line = re.sub(
r"Put (.*) into ({})".format(self.REGEX_VARIABLES),
r"\g<2> = \g<1>",
py_line,
)
py_line = re.sub(
r"Build ({}) up".format(self.REGEX_VARIABLES), r"\g<1> += 1", py_line
)
py_line = re.sub(
r"Knock ({}) down(\, down)*".format(self.REGEX_VARIABLES),
r"\g<1> -= " + str(1 + py_line.count(", down")),
py_line,
)
py_line = re.sub(
r"Listen to ({})".format(self.REGEX_VARIABLES),
r"\g<1> = input()",
py_line,
)
py_line = re.sub(
r"(?:Say|Shout|Whisper|Scream) (.*)", r"print(\g<1>)", py_line
)
py_line = py_line.replace(" is ", " = ", 1)
py_line = re.sub(
r"({0}) taking ((?:{0}|\"[^\"]*\"|[0-9]+)(?:, ?(?:{0}|\"[^\"]*\"|[0-9]+))*)".format(
self.REGEX_VARIABLES
),
r"\g<1>(\g<2>)",
py_line,
)
py_line = self.find_proper_variables(py_line)
py_line = self.find_common_variables(py_line)
line_named = self.find_named(py_line)
if line_named:
self.most_recently_named = line_named
if not self.in_function:
self.globals.add(line_named)
elif line_named in self.globals:
py_line = f"global {line_named}\n" + line_ident + py_line
py_line = py_line.format(**line_strings)
return line_ident + py_line + comments + "\n" | /rockstar-py-2.1.0.tar.gz/rockstar-py-2.1.0/rockstarpy/transpile.py | 0.402275 | 0.168378 | transpile.py | pypi |
from __future__ import annotations
from typing import Any, Generator, Iterable
import re
from dataclasses import dataclass
from pathlib import Path
class Formatter:
"""Formatting class for string substitution and comments parsing."""
@dataclass
class _Replacement:
loc: tuple[int, int]
text: str
def __init__(self, context: dict[str, Any]):
"""Initialize Formatter."""
self.directive_pattern: re.Pattern[str] = re.compile(
r"(?P<prefix>\$)?\{(?P<directive>[a-zA-z][a-zA-Z0-9_]+)"
r"(?:\:(?P<parameter>[a-zA-Z0-9_\-\.]+))?\}"
)
self.context = context
def _format_simple(
self, directive: str, parameter: str | None, loc: tuple[int, int]
) -> _Replacement | None:
# Cannot have a parameter
if parameter is not None:
return None
return self._Replacement(loc, self.context[directive])
def _format_project(
self, _: str, parameter: str | None, loc: tuple[int, int]
) -> _Replacement | None:
# Parameter is required
if parameter is None:
return None
if parameter not in self.context["projects"]:
return None
return self._Replacement(loc, self.context["projects"][parameter])
def _format_directive(self, match: re.Match[str]) -> _Replacement | None:
# As a special case allow `{branch}` and `url` to alias `${branch}`
# and '${url}' respectively for backwards compatibility.
# Otherwise the '$' is required
if match["prefix"] is None:
if match["directive"] not in ["branch", "url"]:
return None
if match["directive"] in ["branch", "url"]:
return self._format_simple(
match["directive"], match["parameter"], match.span()
)
if match["directive"] == "project":
return self._format_project(
match["directive"], match["parameter"], match.span()
)
return None
def _replacements(self, line: str) -> Generator[_Replacement, None, None]:
for match in self.directive_pattern.finditer(line):
replacement = self._format_directive(match)
if replacement is not None:
yield replacement
def format_line(self, line: str) -> str:
"""Substitute variable references into line.
References of the form ${<variable>} and ${directive:param}
are substituted
>>> f = Formatter(
... {
... "branch": "develop",
... "url": "https://example.com",
... "projects": {"project": "https://project.com"},
... }
... )
>>> f.format_line('my branch is ${branch}, {branch} also works')
'my branch is develop, develop also works'
>>> f.format_line('Url: ${url} or {url}')
'Url: https://example.com or https://example.com'
>>> f.format_line('- url: ${project:project}')
'- url: https://project.com'
Unknown references are not replaced.
>>> f.format_line('{invalid}')
'{invalid}'
"""
result: str = ""
end: int = 0
for replacement in self._replacements(line):
assert replacement.loc[0] >= end
result += line[end : replacement.loc[0]] + replacement.text
end = replacement.loc[1]
result += line[end:]
return result
def skip_comments(self, lines: Iterable[str]) -> Generator[str, None, None]:
"""Returns a sequence that skips lines as long as they start with '#'.
Lines after the first "non-comment" line are returned as-is.
>>> f = Formatter({})
>>> for l in f.skip_comments(
... ["#comment 0", "# comment 1", "text", "# will not be skipped"]
... ):
... print(l)
text
# will not be skipped
"""
iterator = iter(lines)
for line in iterator:
if not line.startswith("#"):
yield line
break
yield from iterator
def format_toc(
input_path: Path, output_path: Path, context: dict[str, Any]
) -> None:
"""Format the input table of contents with additional information."""
formatter = Formatter(context)
with open(input_path, encoding="utf-8") as toc_in, open(
output_path, "w", encoding="utf-8"
) as toc_out:
for line in formatter.skip_comments(toc_in):
toc_out.write(formatter.format_line(line)) | /rocm_docs_core-0.22.0-py3-none-any.whl/rocm_docs/formatting.py | 0.940134 | 0.273103 | formatting.py | pypi |
from typing import Dict, List, Optional, Union
import os
from rocm_docs.core import setup
MaybePath = Union[str, os.PathLike, None]
# Intentionally disabling the too-many-instance-attributes check in pylint
# as this class is intended to contain all necessary Sphinx config variables
# pylint: disable=too-many-instance-attributes
class ROCmDocs:
"""A class to contain all of the Sphinx variables."""
SPHINX_VARS = [
"extensions",
"html_title",
"html_theme",
"html_theme_options",
"doxygen_root",
"doxygen_project",
"doxyfile",
"doxysphinx_enabled",
]
def __init__(
self,
project_name: str,
_: Optional[str] = None,
__: MaybePath = None,
) -> None:
"""Intialize ROCmDocs."""
self._project_name: str = project_name
self.extensions: List[str] = []
self.html_title: str
self.html_theme: str
self.html_theme_options: Dict[str, Union[str, bool, List[str]]] = {}
self.doxygen_root: MaybePath = None
self.doxygen_project: Dict[str, Union[Optional[str], MaybePath]] = {
"name": None,
"path": None,
}
self.doxyfile: MaybePath = None
self.doxysphinx_enabled: bool = False
@property
def project(self) -> str:
"""Sphinx project variable."""
return self._project_name
def run_doxygen(
self,
doxygen_root: MaybePath = None,
doxygen_path: MaybePath = None,
doxygen_file: Optional[str] = None,
) -> None:
"""Run doxygen as part of Sphinx by adding rocm_docs.doxygen."""
if "rocm_docs.doxygen" not in self.extensions:
self.extensions.append("rocm_docs.doxygen")
self.doxygen_root = doxygen_root
self.doxygen_project = {
"name": self._project_name,
"path": doxygen_path,
}
self.doxyfile = doxygen_file
def enable_api_reference(self) -> None:
"""Enable embedding the doxygen generated api."""
if "rocm_docs.doxygen" not in self.extensions:
self.extensions.append("rocm_docs.doxygen")
self.doxysphinx_enabled = True
def setup(self) -> None:
"""Set up default RTD variables."""
self.extensions.append("rocm_docs")
full_project_name = self._project_name
self.html_title = full_project_name
self.html_theme = "rocm_docs_theme"
__all__ = ["setup", "ROCmDocs"] | /rocm_docs_core-0.22.0-py3-none-any.whl/rocm_docs/__init__.py | 0.89036 | 0.152127 | __init__.py | pypi |
# RObot Description processor
**The ultimate Python tool for RObot Descriptions processing.**
ROD is yet another library to operate on robot descriptions based on the [SDFormat][sdformat] specification.
## Why SDFormat?
Among the many existing robot description formats, SDFormat provides a well-defined and maintained [versioned specification][sdformat_spec] that controls the available fields and their content.
[Open Robotics][open_robotics] already provides the C++ library [`gazebosim/sdformat`](https://github.com/gazebosim/sdformat) with initial support of [Python bindings][sdformat_python].
However, C++ dependencies in pure-Python projects are typically quite complicated to handle and maintain.
Here ROD comes to rescue.
URDF, thanks to native ROS support, is historically the most popular robot description used by the community.
The main problem of URDF is that it is not a specification, and developers of URDF descriptions might produce models and parsers that do not comply to any standard.
Luckily, URDF models can be easily converted to SDF[^urdf_to_sdf].
If the URDF model is not compliant, the process errors with clear messages.
Furthermore, modern versions of the converter produce a SDF description with standardized [pose semantics][pose_semantics],
that greatly simplifies the life of downstream developers that do not have to guess the reference frame or pose elements.
Last but not least, the pose semantics also makes SDF aware of the concept of _frame_ that URDF is missing.
## Features
- Out-of-the-box support of SDFormat specifications [≥ 1.7][sdformat_spec_17]
- Serialization and deserialization support of SDF files
- In-memory layout based on `dataclasses`
- Syntax highlighting and auto-completion
- Support of programmatic creation of SDF files from Python APIs
- Transitive support of URDF through conversion to SDF[^urdf_to_sdf]
- Type validation of elements and attributes
- Automatic check of missing required elements
- Based on [`Fatal1ty/mashumaro`][mashumaro] for great serialization and deserialization performance
- Support of exporting the in-memory model description to URDF
[mashumaro]: https://github.com/Fatal1ty/mashumaro
[open_robotics]: https://www.openrobotics.org/
[pose_semantics]: http://sdformat.org/tutorials?tut=pose_frame_semantics_proposal&cat=pose_semantics_docs&
[sdformat]: http://sdformat.org/
[sdformat_python]: http://sdformat.org/tutorials?tut=python_bindings&cat=developers&
[sdformat_repo]: https://github.com/gazebosim/sdformat
[sdformat_spec]: http://sdformat.org/spec
[sdformat_spec_17]: http://sdformat.org/spec?elem=sdf&ver=1.7
[urdf]: http://wiki.ros.org/urdf
[^urdf_to_sdf]: Conversion can be done either using `ign sdf` included in Ignition Gazebo Fortress, or `gz sdf` included in Gazebo Sim starting from Garden.
## Installation
You can install the project with [`pypa/pip`][pip], preferably in a [virtual environment][venv]:
```bash
pip install git+https://github.com/ami-iit/rod
```
[pip]: https://github.com/pypa/pip/
[venv]: https://docs.python.org/3.8/tutorial/venv.html
## Examples
<details>
<summary>Serialize and deserialize SDF files</summary>
```python
import pathlib
from rod import Sdf
# Supported SDF resources
sdf_resource_1 = "/path/to/file.sdf"
sdf_resource_2 = pathlib.Path(sdf_resource_1)
sdf_resource_3 = sdf_resource_2.read_text()
# Deserialize SDF resources
sdf_1 = Sdf.load(sdf=sdf_resource_1)
sdf_2 = Sdf.load(sdf=sdf_resource_2)
sdf_3 = Sdf.load(sdf=sdf_resource_3)
# Serialize in-memory Sdf object
print(sdf_3.serialize(pretty=True))
```
</details>
<details>
<summary>Create SDF models programmatically</summary>
```python
from rod import Axis, Inertia, Inertial, Joint, Limit, Link, Model, Sdf, Xyz
sdf = Sdf(
version="1.7",
model=Model(
name="my_model",
link=[
Link(name="base_link", inertial=Inertial(mass=1.0, inertia=Inertia())),
Link(name="my_link", inertial=Inertial(mass=0.5, inertia=Inertia())),
],
joint=Joint(
name="base_to_my_link",
type="revolute",
parent="base_link",
child="my_link",
axis=Axis(xyz=Xyz(xyz=[0, 0, 1]), limit=Limit(lower=-3.13, upper=3.14)),
),
),
)
print(sdf.serialize(pretty=True))
```
```xml
<?xml version="1.0" encoding="utf-8"?>
<sdf version="1.7">
<model name="my_model">
<link name="base_link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>1.0</ixx>
<iyy>1.0</iyy>
<izz>1.0</izz>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyz>0.0</iyz>
</inertia>
</inertial>
</link>
<link name="my_link">
<inertial>
<mass>0.5</mass>
<inertia>
<ixx>1.0</ixx>
<iyy>1.0</iyy>
<izz>1.0</izz>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyz>0.0</iyz>
</inertia>
</inertial>
</link>
<joint name="base_to_my_link" type="revolute">
<parent>base_link</parent>
<child>my_link</child>
<axis>
<xyz>0 0 1</xyz>
<limit>
<lower>-3.13</lower>
<upper>3.14</upper>
</limit>
</axis>
</joint>
</model>
</sdf>
```
</details>
<details>
<summary>Exporting SDF to URDF</summary>
```python
# Generate first the 'sdf' object with the collapsed code
# of the section 'Create SDF models programmatically'.
from rod.urdf.exporter import UrdfExporter
urdf_string = UrdfExporter.sdf_to_urdf_string(
sdf=sdf,
pretty=True,
gazebo_preserve_fixed_joints=True,
)
print(urdf_string)
```
```xml
<?xml version="1.0" encoding="utf-8"?>
<robot name="my_model">
<link name="base_link">
<inertial>
<origin xyz="0.0 0.0 0.0" rpy="0.0 0.0 0.0"/>
<mass value="1.0"/>
<inertia ixx="1.0" ixy="0.0" ixz="0.0" iyy="1.0" iyz="0.0" izz="1.0"/>
</inertial>
</link>
<link name="my_link">
<inertial>
<origin xyz="0.0 0.0 0.0" rpy="0.0 0.0 0.0"/>
<mass value="0.5"/>
<inertia ixx="1.0" ixy="0.0" ixz="0.0" iyy="1.0" iyz="0.0" izz="1.0"/>
</inertial>
</link>
<joint name="base_to_my_link" type="revolute">
<origin xyz="0.0 0.0 0.0" rpy="0.0 0.0 0.0"/>
<parent link="base_link"/>
<child link="my_link"/>
<axis xyz="0 0 1"/>
<limit effort="3.4028235e+38" velocity="3.4028235e+38" lower="-3.13" upper="3.14"/>
</joint>
</robot>
```
</details>
## Similar projects
- https://github.com/gazebosim/sdformat
- https://github.com/mmatl/urdfpy
- https://github.com/clemense/yourdfpy
- https://github.com/ros/urdf_parser_py
- https://github.com/FirefoxMetzger/python-sdformat/
## Contributing
Pull requests are welcome.
For major changes, please open an issue first to discuss what you would like to change.
## Maintainers
| [<img src="https://github.com/diegoferigo.png" width="40">][df] | [@diegoferigo][df] |
|:---------------------------------------------------------------:|:------------------:|
[df]: https://github.com/diegoferigo
## License
[BSD3](https://choosealicense.com/licenses/bsd-3-clause/)
| /rod-0.1rc1.tar.gz/rod-0.1rc1/README.md | 0.46952 | 0.872239 | README.md | pypi |
from calendar import day_abbr
from typing import Optional
from .datastorereq import Requests
import base64, hashlib, json
from .exceptions import *
from .Utils.bases import BaseDataStore
class DatabaseClient:
def __init__(self, universeId: int, token: str, ROBLOSECURITY: str, responsetype: Optional[str] = 'class'):
"""
universeId: The ID of the universe to connect to.
token: The API token to use for requests.
roblosecurity: The .ROBLOSECURITY token to use for authentication.
responsetype: The type of response to return. NOTE: Class is slower then json. 'class' | 'json'
Functions:
get_datastores: Returns a list of all datastores in the universe.
get_datastore: Returns a class or json object of the datastore with the specified name.
"""
if responsetype != 'class' and responsetype != 'json':
raise TypeError("Invalid response type.")
self.token = token
self.requests: Requests = Requests()
self.id = universeId
self.response = responsetype
self.set_token(token=ROBLOSECURITY)
def set_token(self, token: str):
"""
Authenticates the client with the passed .ROBLOSECURITY token.
This method does not send any requests and will not throw if the token is invalid.
Arguments:
token: A .ROBLOSECURITY token to authenticate the client with.
"""
self.requests.session.cookies[".ROBLOSECURITY"] = token
async def get_datastores(self):
"""
Gets the datastores associated with the game.
Returns: JSON Object.
"""
response = await self.requests.get(
url=f"https://apis.roblox.com/datastores/v1/{self.id}/standard-datastores",
headers={'x-api-key': self.token}
)
return response.json()
async def get_datastore(self, datastore: str):
"""
Gets the datastore with the specified name.
Arguments:
datastore: The name of the datastore to get.
Returns: JSON Object or Class.
"""
response = await self.requests.get(
url=f"https://apis.roblox.com/datastores/v1/universes/{self.id}/standard-datastores",
headers={'x-api-key': self.token},
)
if self.response == 'class':
r = 0
for i in response.json()["datastores"]:
if i['name'] == datastore:
return BaseDataStore(json=i, datastore=i['name'], token=self.token, apitoken=self.token, id=self.id)
else:
r += 1
return BaseDataStore({}, datastore, token=self.token, apitoken=self.token, id=self.id)
elif self.response == 'json':
for i in response.json():
r = 0
if i['name'] == datastore:
return i
else:
r += 1
return {}
else:
raise TypeError("Invalid response type.") | /rodatabase.py-2.2.1.tar.gz/rodatabase.py-2.2.1/datastore/__init__.py | 0.857007 | 0.213726 | __init__.py | pypi |
import json
from typing import Optional, List, Dict, Type
from httpx import Response
class RobloxException(Exception):
"""
Base exception.
"""
pass
class ResponseError:
"""
Represents an error returned by a Roblox game server.
Attributes:
code: The error code.
message: The error message.
user_facing_message: A more simple error message intended for frontend use.
field: The field causing this error.
retryable: Whether retrying this exception could supress this issue.
"""
def __init__(self, data: dict):
self.code: int = data["code"]
self.message: Optional[str] = data.get("message")
self.user_facing_message: Optional[str] = data.get("userFacingMessage")
self.field: Optional[str] = data.get("field")
self.retryable: Optional[str] = data.get("retryable")
class HTTPException(RobloxException):
"""
Exception that's raised when an HTTP request fails.
Attributes:
response: The HTTP response object.
status: The HTTP response status code.
errors: A list of Roblox response errors.
"""
def __init__(self, response: Response, errors: Optional[list] = None):
"""
Arguments:
response: The raw response object.
errors: A list of errors.
"""
self.response: Response = response
self.status: int = response.status_code
self.errors = [' ', ]
if self.errors:
try:
error = response.json()["error"]
error_detail = response.json()["errorDetails"][0]
super().__init__(
f"{response.status_code} {response.reason_phrase}: {response.url}.\n\nError: {error}\nError detail: {error_detail}\nResponse JSON:\n{response.json()}")
except json.decoder.JSONDecodeError:
super().__init__(f"{response.status_code} {response.reason_phrase}: {response.url}]\nResponse JSON:\nUnloadable JSON.")
else:
super().__init__(f"{response.status_code} {response.reason_phrase}: {response.url}")
class BadRequest(HTTPException):
"""HTTP exception raised for status code 400."""
pass
class Unauthorized(HTTPException):
"""HTTP exception raised for status code 401. This usually means you aren't properly authenticated."""
class Forbidden(HTTPException):
"""HTTP exception raised for status code 403. This usually means the X-CSRF-Token was not properly provided."""
pass
class NotFound(HTTPException):
"""
HTTP exception raised for status code 404.
This usually means we have an internal URL issue - please make a GitHub issue about this!
"""
pass
class TooManyRequests(HTTPException):
"""
HTTP exception raised for status code 429.
This means that Roblox has [ratelimited](https://en.wikipedia.org/wiki/Rate_limiting) you.
"""
pass
class InternalServerError(HTTPException):
"""
HTTP exception raised for status code 500.
This usually means that there was an issue on Roblox's end, but due to faulty coding on Roblox's part this can
sometimes mean that an endpoint used internally was disabled or that invalid parameters were passed.
"""
pass
class BadGateway(HTTPException):
"""HTTP exception raised for status code 502.
This means that Roblox servers gave a invbalid response.
"""
pass
_codes_exceptions: Dict[int, Type[HTTPException]] = {
400: BadRequest,
401: Unauthorized,
403: Forbidden,
404: NotFound,
429: TooManyRequests,
500: InternalServerError,
502: BadGateway
}
def get_exception_from_status_code(code: int) -> Type[HTTPException]:
"""
Gets an exception that should be raised instead of the generic HTTPException for this status code.
"""
return _codes_exceptions.get(code) or HTTPException | /rodatabase.py-2.2.1.tar.gz/rodatabase.py-2.2.1/datastore/exceptions.py | 0.780997 | 0.251653 | exceptions.py | pypi |
from typing import Optional
from ..datastorereq import Requests
import base64, hashlib, json
class BaseDataStore:
def __repr__(self) -> str:
return f"<BaseDataStore {self.datastore}>"
def __init__(self, json, datastore, token, apitoken, id):
self._json = json
self.datastore = datastore
self.requests: Requests = Requests()
self.token = apitoken
self.id = id
self.set_token(token)
def set_token(self, token: str):
"""
Sets the token for the datastore.
Arguments:
token: The token to set.
"""
self.requests.session.cookies[".ROBLOSECURITY"] = token
async def get_keys(self, limit: Optional[int] = 100):
"""
Gets the keys of the given datastore.
Arguments:
datastore: The name of the datastore to get the keys of.
limit: The maximum number of keys to return.
Returns: JSON Object.
"""
if limit > 100:
raise TypeError("Limit must be less than or equal to 100.")
response = await self.requests.get(
url=f"https://apis.roblox.com/datastores/v1/universes/{self.id}/standard-datastores/datastore/entries",
headers={'x-api-key': self.token},
params={'datastoreName': self.datastore, 'prefix': '', 'limit': limit}
)
return response.json()
async def set_data(self, key: str, data):
"""
Sets the data in the specified datastore.
Arguments:
datastore: The name of the datastore to set the data in.
key: The key to set the data under.
data: The data to set.
Retuns: JSON Object.
"""
sdata = json.dumps(data)
sdata = str(base64.b64encode(hashlib.md5(bytes(sdata, encoding='utf8')).digest()), encoding='utf8')
response = await self.requests.post(
url=f"https://apis.roblox.com/datastores/v1/universes/{self.id}/standard-datastores/datastore/entries/entry",
headers={'x-api-key': self.token, 'content-md5': sdata},
json=data,
params={'datastoreName': self.datastore, 'entryKey': key}
)
return response.json()
async def increment_data(self, key: str, incrementby: int):
"""
Increments the data in the specified datastore.
Arguments:
datastore: The name of the datastore to increment the data in.
key: The key to increment the data under.
incrementby: The amount to increment the data by.
Retuns: JSON Object.
"""
response = await self.requests.post(
url=f"https://apis.roblox.com/datastores/v1/universes/{self.id}/standard-datastores/datastore/entries/entry/increment",
headers={'x-api-key': self.token},
json={"incrementBy": incrementby},
params={'datastoreName': self.datastore, 'entryKey': key}
)
return response.json()
async def delete_data(self, key: str):
"""
Deletes the data in the specified datastore entry.
Arguments:
datastore: The name of the datastore to delete the data in.
key: The key to delete the data under.
Retuns: JSON Object.
"""
response = await self.requests.delete(
url=f"https://apis.roblox.com/datastores/v1/universes/{self.id}/standard-datastores/datastore/entries/entry",
headers={'x-api-key': self.token},
params={'datastoreName': self.datastore, 'entryKey': key}
)
return response.json()
async def get_data(self, key: str):
"""
Gets the data in the specified datastore entry.
Arguments:
datastore: The name of the datastore to get the data in.
key: The key to get the data under.
Retuns: JSON Object.
"""
response = await self.requests.get(
url=f"https://apis.roblox.com/datastores/v1/universes/{self.id}/standard-datastores/datastore/entries/entry",
headers={'x-api-key': self.token},
params={'datastoreName': self.datastore, 'entryKey': key}
)
return response.json() | /rodatabase.py-2.2.1.tar.gz/rodatabase.py-2.2.1/datastore/Utils/bases.py | 0.915847 | 0.358662 | bases.py | pypi |
import os
import csv
import re
from collections import OrderedDict
from Bio import SeqIO
from Bio.SeqFeature import SeqFeature, FeatureLocation
def check_if_border(feature, operon_borders):
"""Return start/end coordinate of the SeqFeature if its accession is the first/last in the given tuple.
Parameters
----------
feature : SeqFeature
Directory with input genbank files.
operon_borders : str
Directory to store the output.
Returns
-------
tuple(str, int)
The first value is either 'end' or 'start'.
The second value is the corresponding coordinate.
If SeqFeature's ID is not in the given tuple, returns None.
"""
prot_id_regexp = re.compile('[A-Z]{2}_[0-9]+\.[0-9]')
start_id = operon_borders[0]
end_id = operon_borders[-1]
if 'protein_id' in feature.qualifiers:
if feature.qualifiers['protein_id'][0] == start_id:
return ('start', feature.location.start + 1) # genbank is 1-based, python is 0-based
if feature.qualifiers['protein_id'][0] == end_id:
return ('end', int(feature.location.end))
elif 'pseudo' in feature.qualifiers:
if 'inference' in feature.qualifiers:
inference_prot_id_search = prot_id_regexp.search(feature.qualifiers['inference'][0])
if inference_prot_id_search is not None:
inference_prot_id = inference_prot_id_search.group(0)
if inference_prot_id == start_id:
return('start', feature.location.start + 1)
if inference_prot_id == end_id:
return ('end', int(feature.location.end))
else:
if feature.qualifiers['locus_tag'][0] == start_id:
return ('start', feature.location.start + 1)
if feature.qualifiers['locus_tag'][0] == end_id:
return ('end', int(feature.location.end))
return None
def convert_gbk(gb_dir, gb_out_dir, rodeo_output, bg_domains, max_intergenic_distance = 100, product_class = 'thiopeptide'):
"""Convert a common genbank file to the genbank antiSMASH output.
Adds a feature “cluster” with information about the class of the product.
The coordinates of this feature are boundaries of the group of adjacent genes on the same strand that includes RODEO query.
Marks genes with given domains as biosynthetic.
Parameters
----------
gb_dir : str
Directory with input genbank files.
gb_out_dir : str
Directory to store the output.
rodeo_output: RodeoOutput
RODEO output to use as a reference.
bg_domains : list
List of Pfam or TIGRFAMs IDs for domains that are important for your product biosynthesis.
max_intergenic_distance : int, optional
Maximum distance (nt) between genes within the biosynthetic gene cluster (default: 100).
product_class : string, optional
A putative class of the final product (default: thiopeptide).
Returns
-------
bool
True if successful, False otherwise.
"""
rodeo_output.table_proccessing(bg_domains, max_intergenic_distance)
operon_border_accs = (rodeo_output.operon_accs[0], rodeo_output.operon_accs[-1])
biosynthetic_genes = rodeo_output.biosynthetic_genes
contig_edge = False
prot_id = rodeo_output.query
try:
genbank = SeqIO.parse('%s%s.gbk' % (gb_dir, prot_id), 'genbank')
for record in genbank: # Every file is expected to contain only one record
cluster_coords = OrderedDict([('start', 1), ('end', len(record))])
for feature in record.features:
if feature.type == 'CDS':
border_check = check_if_border(feature, operon_border_accs)
if border_check is not None:
cluster_coords[border_check[0]] = border_check[1]
if 'protein_id' in feature.qualifiers:
if feature.qualifiers['protein_id'][0] in biosynthetic_genes:
feature.qualifiers['sec_met'] = ['Kind: biosynthetic']
start, end = cluster_coords.values()
cluster_location = FeatureLocation(start, end)
cluster_qualifiers = OrderedDict([('contig_edge', str(contig_edge)), ('product', product_class)])
cluster = SeqFeature(location = cluster_location, type = 'cluster', qualifiers = cluster_qualifiers)
record.features = [cluster] + record.features
SeqIO.write(record, '%s%s.gbk' % (gb_out_dir, prot_id), 'genbank')
return True
except Exception as e:
print e
return False | /rodeo-utils-0.0.2.tar.gz/rodeo-utils-0.0.2/rodeo_utils/rodeo2antismash.py | 0.608245 | 0.342077 | rodeo2antismash.py | pypi |
from math import log
from typing import Dict, List, Optional
from warnings import warn
import numpy as np
from sklearn.metrics import matthews_corrcoef
from .utils import centered_iou, get_center, harmonic_mean, hungarian_matching
class RoDeO:
r"""Robust Detection Outcome (RoDeO).
Computes five detection properties after a hungarian matching:
1. Classification (Matthews Correlation Coefficient)
2. Localization (Distance between centers of matched boxes)
3. Shape Matching (Centered Box IoU)
The summary metric is the harmonic mean of the above.
"""
def __init__(
self,
class_names: List[str],
w_matched: Optional[float] = 1.0,
w_overpred: Optional[float] = 1.0,
w_missed: Optional[float] = 1.0,
class_weight_matching: Optional[float] = None,
return_per_class: Optional[bool] = False,
) -> None:
r"""Metric class for Robust Detection Outcome (RoDeO).
Args:
class_names: List of possible class names.
w_matched: Weight for matched boxes when weighting the scores.
Eq. (5) in the paper (default: 1.0).
w_overpred: Weight for overpredicted boxes when weighting the scores.
Eq. (5) in the paper (default: 1.0).
w_missed: Weight for missed boxes when weighting the scores.
Eq. (5) in the paper (default: 1.0).
class_weight_matching: Weight for class matching when computing
the hungarian matching. Will be computed from the
classification performance if not specified (default).
return_per_class: Whether to return the scores for each class
(default: False).
"""
assert len(class_names) > 1
self.class_names: List[str] = class_names
self.num_classes: int = len(self.class_names)
self.w_matched: float = w_matched
self.w_overpred: float = w_overpred
self.w_missed: float = w_missed
self.class_weight_matching: Optional[float] = class_weight_matching
self.return_per_class: bool = return_per_class
self.pred_boxes: List[np.ndarray]
self.target_boxes: List[np.ndarray]
self.reset()
def reset(self) -> None:
r"""Reset the metric. Clear all stored predictions and targets."""
self.pred_boxes = []
self.target_boxes = []
def add(
self,
preds: List[np.ndarray],
targets: List[np.ndarray],
) -> None:
r"""Add predictions and targets to the metric.
Args:
preds: List of predicted boxes. Each box is a (M_p, 5) array
with (x, y, w, h, cls_id) for each box.
targets: List of target boxes. Each box is a (M_t, 5) array
with (x, y, w, h, cls_id) for each box.
"""
assert len(preds) == len(targets)
self.pred_boxes.extend(preds)
self.target_boxes.extend(targets)
def compute(self) -> Dict[str, float]:
r"""Compute the metric."""
if self.class_weight_matching is None:
class_weight = self._get_class_cost_for_matching()
else:
class_weight = self.class_weight_matching
# Get matchings
matched_preds = []
matched_targets = []
unmatched_preds = []
unmatched_targets = []
for preds, targets in zip(self.pred_boxes,
self.target_boxes):
# Compute hungarian matching
(matched_preds_,
matched_targets_,
unmatched_preds_,
unmatched_targets_) = hungarian_matching(preds, targets, class_weight=class_weight)
matched_preds.append(matched_preds_)
matched_targets.append(matched_targets_)
unmatched_preds.append(unmatched_preds_)
unmatched_targets.append(unmatched_targets_)
# Each box now is (x, y, w, h, cls_id)
matched_preds = np.concatenate(matched_preds) # (n_matched, 5)
matched_targets = np.concatenate(matched_targets) # (n_matched, 5)
unmatched_preds = np.concatenate(unmatched_preds) # (n_overpred, 5)
unmatched_targets = np.concatenate(unmatched_targets) # (n_underpred, 5)
if len(matched_preds) == 0:
warn("Unable to calculate RoDeO without predictions or targets. Returning worst possible value.")
keys = ['total', 'localization', 'shape_matching', 'classification', 'overprediction', 'underprediction']
classes = ['/' + c for c in self.class_names] + [''] if self.return_per_class else ['']
return {f'RoDeO{cls}/{k}': 0.0 for cls in classes for k in keys}
# Compute scores
res = {}
res['RoDeO/localization'] = self._localization_score(
matched_preds,
matched_targets,
unmatched_preds,
unmatched_targets,
)
res['RoDeO/shape_matching'] = self._shape_matching_score(
matched_preds,
matched_targets,
unmatched_preds,
unmatched_targets
)
res['RoDeO/classification'] = self._classification_score(
matched_preds,
matched_targets,
unmatched_preds,
unmatched_targets
)
# Combine with (harmonic) mean (Eq. 6 in the paper)
res['RoDeO/total'] = harmonic_mean(np.array([
res['RoDeO/localization'],
res['RoDeO/shape_matching'],
res['RoDeO/classification']
])).item()
# For each class
if self.return_per_class:
for i, cls_name in enumerate(self.class_names):
key = f'RoDeO/{cls_name}'
# Filter class
matched_inds_c = matched_targets[:, 4] == i
matched_preds_c = matched_preds[matched_inds_c]
matched_targets_c = matched_targets[matched_inds_c]
unmatched_preds_c = unmatched_preds[unmatched_preds[:, 4] == i]
unmatched_targets_c = unmatched_targets[unmatched_targets[:, 4] == i]
# Compute scores
res[f'{key}/localization'] = self._localization_score(
matched_preds_c,
matched_targets_c,
unmatched_preds_c,
unmatched_targets_c,
)
res[f'{key}/shape_matching'] = self._shape_matching_score(
matched_preds_c,
matched_targets_c,
unmatched_preds_c,
unmatched_targets_c
)
res[f'{key}/classification'] = self._classification_score(
matched_preds_c,
matched_targets_c,
unmatched_preds_c,
unmatched_targets_c
)
# Combine with (harmonic) mean (Eq. 6 in the paper)
res[f'{key}/total'] = harmonic_mean(np.array([
res[f'{key}/localization'],
res[f'{key}/shape_matching'],
res[f'{key}/classification']
])).item()
# Return results
return res
def _localization_score(
self,
matched_preds: np.ndarray,
matched_targets: np.ndarray,
unmatched_preds: np.ndarray,
unmatched_targets: np.ndarray
) -> float:
r"""Compute the localization score (Eq. 2 in the paper)."""
# Normalize predictions and targets by size of target boxes
target_sizes = np.concatenate([matched_targets[:, 2:4], matched_targets[:, 2:4]], 1) # (n_matched, 2)
pred_center = get_center(matched_preds[:, :4] / target_sizes) # (n_matched, 2)
target_center = get_center(matched_targets[:, :4] / target_sizes) # (n_matched, 2)
# Get the euclidean distance between the centers
matched_dists = np.power(pred_center - target_center, 2).sum(1) # (n_matched)
# Compute the score
matched_score = np.exp(-matched_dists * log(2)).mean()
unmatched_score = np.array(0.0)
loc_score = self._weight_scores(
matched_score,
unmatched_score,
matched_preds,
unmatched_preds,
unmatched_targets,
)
return loc_score.item()
def _shape_matching_score(
self,
matched_preds: np.ndarray,
matched_targets: np.ndarray,
unmatched_preds: np.ndarray,
unmatched_targets: np.ndarray
) -> float:
r"""Centered IoUs between boxes. Unmatched boxes give IoU=0
(Eq. 3 in the paper)."""
matched_score = centered_iou(matched_preds, matched_targets).mean()
unmatched_score = np.array(0.0)
shape_score = self._weight_scores(
matched_score,
unmatched_score,
matched_preds,
unmatched_preds,
unmatched_targets,
)
return shape_score.item()
def _classification_score(
self,
matched_preds: np.ndarray,
matched_targets: np.ndarray,
unmatched_preds: np.ndarray,
unmatched_targets: np.ndarray
) -> float:
r"""Clamped matthews correlation coefficient (Eq. 4 in the paper)."""
pred_classes = matched_preds[:, 4]
pred_multi_hot = np.zeros((len(pred_classes), self.num_classes))
np.put_along_axis(pred_multi_hot, pred_classes[:, None].astype(np.int32), 1, 1)
target_classes = matched_targets[:, 4]
target_multi_hot = np.zeros((len(target_classes), self.num_classes))
np.put_along_axis(target_multi_hot, target_classes[:, None].astype(np.int32), 1, 1)
matched_score = matthews_corrcoef(pred_multi_hot.reshape(-1),
target_multi_hot.reshape(-1)).clip(min=0)
unmatched_score = np.array(0.0)
cls_score = self._weight_scores(
matched_score,
unmatched_score,
matched_preds,
unmatched_preds,
unmatched_targets,
)
return cls_score.item()
def _weight_scores(
self,
matched_score: float,
unmatched_score: float,
matched_preds: np.ndarray,
unmatched_preds: np.ndarray,
unmatched_targets: np.ndarray,
) -> np.ndarray:
r"""Weight the scores according to the number of matched, overpredicted
and missed boxes (Eq. 5 in the paper)."""
matched = len(matched_preds) * self.w_matched
overpred = len(unmatched_preds) * self.w_overpred
missed = len(unmatched_targets) * self.w_missed
total = matched + overpred + missed
total_score = (matched_score * matched + (overpred + missed) * unmatched_score) / total
return total_score
def _get_class_cost_for_matching(self) -> np.ndarray:
r"""Get the sample-level classification performance of the data"""
if len(self.pred_boxes) == 0:
return np.array(0.0)
pred_multi_hot = np.zeros((len(self.pred_boxes), self.num_classes))
for i, pred in enumerate(self.pred_boxes):
np.put_along_axis(pred_multi_hot[i], pred[:, 4].astype(np.int32), 1, 0)
target_multi_hot = np.zeros((len(self.pred_boxes), self.num_classes))
for i, target in enumerate(self.target_boxes):
np.put_along_axis(target_multi_hot[i], target[:, 4].astype(np.int32), 1, 0)
mcc = matthews_corrcoef(pred_multi_hot.reshape(-1),
target_multi_hot.reshape(-1))
class_cost = max(0, mcc)
return class_cost | /rodeometric-0.0.1.tar.gz/rodeometric-0.0.1/rodeo/rodeo.py | 0.952948 | 0.71889 | rodeo.py | pypi |
from typing import Optional, Tuple
import numpy as np
from scipy.optimize import linear_sum_assignment
def hungarian_matching(
preds: np.ndarray,
targets: np.ndarray,
shape_weight: Optional[float] = 1.0,
class_weight: Optional[float] = 1.0
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
r"""Performs hungarian matching on a set of predicted and target boxes
and returns the matched pairs, as well as the unmatched predicted and
target boxes.
Args:
preds: Array of predicted boxes (M_p x >=4)
targets: Array of target boxes (M_t x >=4)
shape_weight: How to weight shape similarity in the cost matrix
class_weight: How to weight class similarity in the cost matrix
"""
# Perform matching
shape_cost = -generalized_box_iou(preds[:, :4], targets[:, :4])
class_cost = -(preds[:, 4][:, None] == targets[:, 4][None, :]).astype(np.int32)
cost_matrix = shape_cost * shape_weight + class_cost * class_weight
pred_inds, target_inds = linear_sum_assignment(cost_matrix)
# Find matched predictions and targets
matched_preds = preds[pred_inds]
matched_targets = targets[target_inds]
# Find unmatched predictions and targets
unmatched_preds_mask = np.ones(preds.shape[0], dtype=bool)
unmatched_preds_mask[pred_inds] = False
unmatched_preds = preds[unmatched_preds_mask]
unmatched_targets_mask = np.ones(targets.shape[0], dtype=bool)
unmatched_targets_mask[target_inds] = False
unmatched_targets = targets[unmatched_targets_mask]
return (matched_preds,
matched_targets,
unmatched_preds,
unmatched_targets)
def centered_iou(preds: np.ndarray, targets: np.ndarray) -> np.ndarray:
r"""Computes the IoU of pairs of centered boxes (aligned at upper left).
Args:
preds: Tensor of predicted boxes (M x 4), each (x,y,w,h)
targets: Tensor of target boxes (M x 4), each (x,y,w,h)
"""
preds = preds.copy()
# Align at upper left
preds[:, :2] = targets[:, :2]
# Compute IoUs
ious = np.diag(box_iou(preds[:, :4], targets[:, :4]))
return ious
def get_center(boxes: np.ndarray) -> np.ndarray:
r"""Boxes in (x,y,w,h) format. Returns the center of each box."""
return boxes[:, :2] + (boxes[:, 2:4] / 2)
def harmonic_mean(x: np.ndarray, eps: Optional[float] = 1e-6) -> np.ndarray:
return len(x) / (1 / (x + eps)).sum()
def box_iou(boxes1: np.ndarray, boxes2: np.ndarray) -> np.ndarray:
r"""Compute the intersection over union between all samples in two sets
of bounding boxes in the (x,y,w,h) format."""
# Compute the coordinates of the bounding boxes' corners
x1, y1 = boxes1[:, 0], boxes1[:, 1]
x2, y2 = x1 + boxes1[:, 2], y1 + boxes1[:, 3]
x3, y3 = boxes2[:, 0], boxes2[:, 1]
x4, y4 = x3 + boxes2[:, 2], y3 + boxes2[:, 3]
# Compute the areas of the bounding boxes
area1 = boxes1[:, 2] * boxes1[:, 3]
area2 = boxes2[:, 2] * boxes2[:, 3]
# Compute the intersection coordinates and areas
xA = np.maximum(x1[:, np.newaxis], x3)
yA = np.maximum(y1[:, np.newaxis], y3)
xB = np.minimum(x2[:, np.newaxis], x4)
yB = np.minimum(y2[:, np.newaxis], y4)
interArea = np.maximum(0, xB - xA) * np.maximum(0, yB - yA)
# Compute the union areas
unionArea = area1[:, np.newaxis] + area2 - interArea
# Compute the intersection over union
return interArea / unionArea
def generalized_box_iou(boxes1: np.ndarray, boxes2: np.ndarray) -> np.ndarray:
r"""Compute the generalized intersection over union between all samples
in two sets of bounding boxes in the (x,y,w,h) format."""
# Compute the coordinates of the bounding boxes' corners
x1, y1 = boxes1[:, 0], boxes1[:, 1]
x2, y2 = x1 + boxes1[:, 2], y1 + boxes1[:, 3]
x3, y3 = boxes2[:, 0], boxes2[:, 1]
x4, y4 = x3 + boxes2[:, 2], y3 + boxes2[:, 3]
# Compute the areas of the bounding boxes
area1 = boxes1[:, 2] * boxes1[:, 3]
area2 = boxes2[:, 2] * boxes2[:, 3]
# Compute the intersection coordinates and areas
xA = np.maximum(x1[:, np.newaxis], x3)
yA = np.maximum(y1[:, np.newaxis], y3)
xB = np.minimum(x2[:, np.newaxis], x4)
yB = np.minimum(y2[:, np.newaxis], y4)
interArea = np.maximum(0, xB - xA) * np.maximum(0, yB - yA)
# Compute the union areas
unionArea = area1[:, np.newaxis] + area2 - interArea
# Compute the enclosing box
xC = np.minimum(x1[:, np.newaxis], x3)
yC = np.minimum(y1[:, np.newaxis], y3)
xD = np.maximum(x2[:, np.newaxis], x4)
yD = np.maximum(y2[:, np.newaxis], y4)
encArea = np.maximum(0, xD - xC) * np.maximum(0, yD - yC)
# Compute the generalized intersection over union
return interArea / unionArea - (encArea - unionArea) / encArea | /rodeometric-0.0.1.tar.gz/rodeometric-0.0.1/rodeo/utils.py | 0.967724 | 0.880746 | utils.py | pypi |
from .n import Node, Param, Result
from typing import TypeVar, List, Tuple
T = TypeVar('T')
class Roeteer:
def __init__(self) -> None:
self._radix: Dict[str, Node[T]] = {}
def _get_radix(self, method: str) -> Node[T]:
if method not in self._radix:
self._radix[method] = Node[T]()
return self._radix[method]
def use(self, path: str, handler: T) -> None:
if '*' in path:
raise Exception("Catch all wildcard not allowed in middleware")
self._get_radix("middleware").insert(path, handler)
def get(self, path: str, *arg: List[T]) -> None:
self._get_radix("get").insert(path, *arg)
def post(self, path: str, *arg: List[T]) -> None:
self._get_radix("post").insert(path, *arg)
def head(self, path: str, *arg: List[T]) -> None:
self._get_radix("head").insert(path, *arg)
def put(self, path: str, *arg: List[T]) -> None:
self._get_radix("put").insert(path, *arg)
def delete(self, path: str, *arg: List[T]) -> None:
self._get_radix("delete").insert(path, *arg)
def connect(self, path: str, *arg: List[T]) -> None:
self._get_radix("connect").insert(path, *arg)
def options(self, path: str, *arg: List[T]) -> None:
self._get_radix("options").insert(path, *arg)
def trace(self, path: str, *arg: List[T]) -> None:
self._get_radix("trace").insert(path, *arg)
def patch(self, path: str, *arg: List[T]) -> None:
self._get_radix("patch").insert(path, *arg)
def resolve(self, method: str, path: str) -> List[Tuple[T, Param]]:
handlers = []
results = self._get_radix("middleware").lookup(path, stacked=True)
if results:
for result in results:
for handler in result.handler:
handlers.append((handler, result.params))
result = self._get_radix(method).lookup(path)
if result:
for handler in result.handler:
handlers.append((handler, result.params))
return handlers | /roe_teer-0.1.0.tar.gz/roe_teer-0.1.0/src/roe_teer/roeteer.py | 0.652574 | 0.194406 | roeteer.py | pypi |
from __future__ import annotations
import re
from copy import deepcopy
from typing import Generic, TypeVar, List, Dict, Tuple, Callable
def longest_common_prefix(a: str, b: str) -> int:
i = 0
l = min(len(a), len(b))
while i < l and a[i] == b[i]:
i += 1
return i
T = TypeVar('T')
class Param:
def __init__(self, value: str, type: str | None) -> None:
self.value: str = value
self.type: str | None = type
def __eq__(self, other: object) -> bool:
if not isinstance(other, Param):
return False
return self.value == other.value and self.type == other.type
class Result(Generic[T]):
def __init__(self) -> None:
self.handler: List[T] = []
self.params: Dict[str, Param] = {}
class Node(Generic[T]):
def __init__(self, path: str = None, handler: List[T] = None, children: Dict[str, "Node"] = None) -> None:
self.path: str = path if path is not None else ""
self.children: Dict[str, "Node"] = children if children is not None else {}
self.handler: List[T] = handler if handler is not None else []
self.paramName: str | None = None
self.type: str | None = None
self.regex: re.Pattern | None = None
def lookup(self, p: str, stacked: bool = False) -> Result[T] | List[Result[T]] | None:
stack: List[Tuple["Node"[T], bool, str, Result[T], bool]] = [(self, False, p, Result[T](), False)]
i = 0
while i >= 0:
node, checked, path, result, walkedBy = stack[i]
restPath: str = ""
if checked:
i -= 1
continue
else:
stack[i] = (node, True, path, result, walkedBy)
if len(node.path) > 0 and node.path[0] == "*":
if node.paramName is None:
raise Exception(f"Catch all wildcard has no name in '{node.path}'")
result.params[node.paramName] = Param(path, node.type)
restPath = ""
if stacked:
stack[i] = (node, True, path, result, True)
elif len(node.path) > 0 and node.path[0] == ":":
index = -1
if node.regex is not None:
match = node.regex.match(path)
if match is None:
i -= 1
continue
index = match.end()
else:
try:
index = path.index("/")
except:
index = len(path)
restPath = path[index:]
if node.paramName is None:
raise Exception(f"Catch all wildcard has no name in '{node.path}'")
result.params[node.paramName] = Param(path[:index], node.type)
if stacked:
stack[i] = (node, True, path, result, True)
else:
lcp = longest_common_prefix(node.path, path)
if lcp != len(node.path):
i -= 1
continue
else:
restPath = path[lcp:]
if stacked:
stack[i] = (node, True, path, result, True)
if restPath == "":
if not stacked:
result.handler = node.handler
return result
else:
stack = list(filter(lambda tuple: tuple[4], stack))
results: List[Result[T]] = []
for item in stack:
node, checked, path, result, walkedBy = item
if len(node.handler) <= 0:
continue
result.handler = node.handler
results.append(result)
return results
if "*" in node.children:
i += 1
stack.insert(i, (node.children["*"], False, restPath, deepcopy(result), False))
if ":" in node.children:
i += 1
stack.insert(i, (node.children[":"], False, restPath, deepcopy(result), False))
if restPath[0] in node.children:
i += 1
stack.insert(i, (node.children[restPath[0]], False, restPath, deepcopy(result), False))
if stacked:
stack = list(filter(lambda tuple: tuple[4], stack))
results: List[Result[T]] = []
for item in stack:
node, checked, path, result, walkedBy = item
if len(node.handler) <= 0:
continue
result.handler = node.handler
results.append(result)
return results
return None
def insert(self, path: str, *arg: List[T]) -> "Node":
start = end = 0
while end < len(path):
if path[end] in [':', "*"]:
self = self.merge(path[start:end])
wildcard = path[end]
start = end
# Catch-all wildcard
if wildcard == "*":
end = len(path)
p = path[start:end]
result = re.match(r"^\*(?P<paramName>[a-z0-9._-]*)(\|(?P<type>[a-zA-Z_]*))?$", p, flags=re.IGNORECASE)
if result is None:
raise Exception(f"Malformatted catch all wildcard '{p}': required format: *catch-all|type where catch-all name can only contain a-z, A-Z, 0-9, ., _, -")
if wildcard in self.children:
raise Exception(f"Cannot add '{p}': another catch all wildcard '{self.children[wildcard].path}' already exists")
child = Node(p, [], {})
child.paramName = result.group("paramName")
child.type = result.group("type")
self.children[wildcard] = child
self = child
start = end
break
if wildcard == ":":
result = re.search(r"^(:(?P<paramName>[a-zA-Z0-9._-]+)(\((?P<regex>.*?)\))?(\|(?P<type>[a-zA-Z_]*))?)", path[start:], flags=re.IGNORECASE)
if result is None:
# Should never get thrown...
raise Exception(f"Malformatted parameter wildcard in '{path}': required format: :parameter-name(optional regex)|optional type/optional additional path where parameter name can container A-Z, a-z, 0-9, ., _, - and type can container A-Z, a-z, 0-9")
end += result.end()
p = path[start:end]
child = self.children[wildcard] if wildcard in self.children else None
if child is not None:
if child.path != p:
raise Exception(f"Parameter name '{p}' in '{path}' should be equal to previous provided name '{child.path}'")
else:
child = Node(p, [], {})
child.paramName = result.group("paramName")
child.type = result.group("type")
child.regex = re.compile(result.group("regex")) if result.group("regex") is not None else None
self.children[wildcard] = child
self = child
else:
end += 1
if start < len(path):
self = self.merge(path[start:])
if len(arg) > 0:
for handler in arg:
if handler not in self.handler:
self.handler.append(handler)
return self
def merge(self, path: str) -> "Node":
lcp = longest_common_prefix(path, self.path)
if lcp == 0 and len(self.children) == 0:
self.path = path
return self
if lcp < len(self.path):
child = Node(self.path[lcp:], self.handler, self.children)
self.path = path[:lcp]
self.children = { child.path[0]: child }
self.handler = []
if lcp < len(path):
if path[lcp] in self.children:
self = self.children[path[lcp]].insert(path[lcp:])
else:
child = Node(path[lcp:], [], {})
self.children[path[lcp]] = child
self = child
return self | /roe_teer-0.1.0.tar.gz/roe_teer-0.1.0/src/roe_teer/n.py | 0.601359 | 0.261284 | n.py | pypi |
Rofa
=====
Introduction
------------
Interests in **Quant investing** (or trading) are growing fast every day. Many people try to create profitable strategies and test it using past data (It is called 'backetest' by quants). It is good phenomenon in that it can prevent people from wasting their money with unverified strategies, or just news) and enables investing with more statistical approaches. However, some people started to cling to just higher CAGR (compound annual growth rate) and total returns and ignore other statistical performance to ensure robustness of strategy. CAGR is just one factor to evaluate strategy, and if you don't care other evaluations, your investing can be in danger in the future. See below pictures.
.. image:: https://trello-attachments.s3.amazonaws.com/5cff44a05a8aa00f048b6f41/431x282/655dde558a4aa7fbd48f82059130b5fb/image.png
'Strategy B' has underperformed 'Strategy A' before 2014, but it outperformed 'Strategy A' at the end in the perspective of total returns and CAGR. But as you know, we cannot say 'Strategy B' is more robust than 'Strategy A'
**Rofa** is abbreviation for 'Robust Factor'. This module helps you to check factor's robustness. All you do is just prepare data with certain formats, and ask rofa to simulate, and evaluate, and make summarized graph of it. Now you had powerful quant tool **'Rofa'**.
Installation
------------
.. code::
pip install rofa
.. note::
You can use any editor to use python, but I recommend using jupyter notebook to start. jupyter notebook allows you to interactively run python code block by block. You can install jupyter notebook as follows.
.. code::
pip install jupyter
To start it
.. code::
jupyter notebook
Getting started
---------------
- **Import Rofa**
First of all, import ``rofa`` and ``QuantileSimulator`` from rofa
.. code:: python
import rofa
from rofa import QuantileSimulator
- **Registering daily returns data**
Unfortunately, rofa does not have any data with it. So, in order to run simulation, you need to ****register daily returns data** (pandas DataFrame). If you register returns data once, rofa will find returns data later without re-registering (You can change this option using ``save=False``).
.. code:: python
import pandas as pd
# Read Close Data
returns= pd.read_pickle('../data/returns.pkl') # Your returns data
rofa.register_returns(returns)
.. note::
returns dataframe data must have following format, where columns are asset symbols and index is date.
.. image:: https://trello-attachments.s3.amazonaws.com/5cff44a05a8aa00f048b6f41/950x265/e39cca22e2da9014b8bda5cfe78c6f40/image.png
- **Prepare data**
Data (pandas Dataframe) must have formats where columns are asset symbols (or code) and index is date such as returns. You can download data used in this example in here_.
.. _here: https://drive.google.com/drive/folders/1HnZYE0smawi_YoxcnTsdESEJuZDme2F5?usp=sharing
.. code:: python
# Read Close Data
close = pd.read_pickle('../data/close.pkl') # Your data
.. image:: https://trello-attachments.s3.amazonaws.com/5cff44a05a8aa00f048b6f41/947x255/c8dd7064418c602ba01350f25be0a808/image.png
- **QuantileSimulator**
.. code:: python
quan_sim = QuantileSimulator(close, rebalance_freq=10, bins=5)
In QuantileSimulator, first argument accepts factor data (close here). Additionaly, you can set ``rebalance_freq``, ``bins``, ``tax_rate``, ``weight_model`` and etc.
- **Run simulation**
Just run the simulation. Simulation logics are all done by ``rofa``
.. code:: python
quan_sim.run()
- **Plot portfolio returns**
Simulation classes has plotter plugin inside it, which makes it possible to visuallize the simulation result.
.. code:: python
quan_sim.plotter.plot_portfolio_returns()
.. image:: https://trello-attachments.s3.amazonaws.com/5cff44a05a8aa00f048b6f41/975x588/85eda2c8d19b247c944a86d95c0bc65d/image.png
From portfolio returns graph, we can compare overall performances and drawdowns of each portfolio.
- **Plot performance metrics (CAGR, MDD, Sharpe, Calmar)**
.. image:: https://trello-attachments.s3.amazonaws.com/5cff44a05a8aa00f048b6f41/1061x655/5ed868976346a3554c2677b6077ab1c5/image.png
From this graph, we can check performance metrics and check if there is strong relationship between factor and performance.
- **Plot rolling performance.**
.. image:: https://trello-attachments.s3.amazonaws.com/5cff44a05a8aa00f048b6f41/1076x656/a387b0aa8db6a379c9c578f986b42514/image.png
- **Wait, we can plot all at once**
You might have though about how come I can memorize all plot methods. Here's a method for you. ``plot_all`` plots all above. Super simple!
.. code:: python
quan_sim.plotter.plot_all()
.. image:: https://trello-attachments.s3.amazonaws.com/5cff44a05a8aa00f048b6f41/476x897/58c2343f257c855bf8d6ec6b1bfd4a7c/image.png
TODO
----
- Add more performance indicators
- Optimize code for efficiency. There are some points to make code inefficient
- Create ``LongShortPlotter`` and make all methods used in ``QuantilePlotter``
- Add statistical analysis plugin such as ``Linear Regression``, ``t-test``, and ``ANOVA``
- Create ``NakedSimulator`` and add plotter plugins
- Create ``Evaluator Plugin`` Later
- Use ``numba`` or ``cython`` to improve performance
- Better documentation! | /rofa-0.1.2.tar.gz/rofa-0.1.2/README.rst | 0.714528 | 0.75341 | README.rst | pypi |
from datetime import datetime
from functools import partial
from typing import Any
from rofi import Rofi
def required(transcoder: (...), *args):
result = transcoder(*args)
if not result:
exit('This field is required.')
return result
def text(r: Rofi, name: str, config: dict, store: Any) -> dict:
res = r.text_entry(name)
if res == None:
return {}
return {
name: {
config.get('type'): [
{
"type": "text",
"text": {"content": res}
},
]
}
}
def number(r: Rofi, name: str, config: dict, store: Any) -> dict:
res = r.decimal_entry(name)
if res == None:
return {}
return {
name: {'number': res}
}
def select(r: Rofi, name: str, config: dict, store: Any) -> dict:
prop_type = config.get('type')
choices = list(map(lambda x: x['name'], config[prop_type]['options']))
index, _key = r.select(name, choices)
if index == -1:
return {}
return {
name: {
prop_type: {'name': choices[index]}
}
}
def multi_select(r: Rofi, name: str, config: dict, store: Any) -> dict:
choices = list(map(lambda x: x['name'], config['multi_select']['options']))
all_res = []
index = 0
while index != -1:
index, _key = r.select(name, choices)
if index != -1:
all_res.append({'name': choices[index]})
choices.remove(choices[index])
if not all_res:
return {}
return {
name: {
'multi_select': all_res
}
}
def date(r: Rofi, name: str, config: dict, store: Any) -> dict:
res = r.date_entry(name)
if not res:
return {}
if type(res) != str:
res = str(res)
date = datetime.strptime(res, '%Y-%m-%d') # type: ignore
return {
name: {
'date': {
'start': date.isoformat()
}
}
}
def people(r: Rofi, name: str, config: dict, store: Any) -> dict:
return {}
def checkbox(r: Rofi, name: str, config: dict, store: Any) -> dict:
res = r.select(name, ['Yes', 'No'])
if res == None:
return {}
return {
name: {
'checkbox': res == 'Yes'
}
}
def url(r: Rofi, name: str, config: dict, store: Any) -> dict:
res = r.text_entry(name)
if res == None:
return {}
return {
name: {
'url': res
}
}
def email(r: Rofi, name: str, config: dict, store: Any) -> dict:
res = r.text_entry(name)
if res == None:
return {}
return {
name: {
'email': res
}
}
def phone_number(r: Rofi, name: str, config: dict, store: Any) -> dict:
res = r.text_entry(name)
if res == None:
return {}
return {
name: {
'phone_number': res
}
}
def relation(r: Rofi, name: str, config: dict, choices: dict) -> dict:
config = config['relation']
relations = []
if not choices:
return {}
index = 0
while index != -1:
index, _key = r.select(name, choices)
if index != -1:
key = list(choices.keys())[index]
relations.append({'id': choices[key]})
del choices[key]
if not relations:
return {}
return {
name: {
'relation': relations
}
}
transcoders = {
'title': partial(required, text),
'relation': relation,
'rich_text': text,
'number': number,
'select': select,
'multi_select': multi_select,
'date': date,
'people': people,
'checkbox': checkbox,
'url': url,
'email': email,
'phone_number': phone_number,
} | /rofi-notion-2.0.1.tar.gz/rofi-notion-2.0.1/rofi_notion/transcoders.py | 0.544317 | 0.24102 | transcoders.py | pypi |
# rbw-rofi
## A rofi frontend for Bitwarden
Based on the alternative [Bitwarden](https://bitwarden.com/) CLI [rbw](https://github.com/doy/rbw/) and inspired by [rofi-pass](https://github.com/carnager/rofi-pass), `rbw-rofi` is a simplistic password typer/copier using [rofi](https://github.com/davatorium/rofi) and [wofi](https://hg.sr.ht/~scoopta/wofi).
## Features
- Autotype password or username (`Enter`/`Alt+3` and `Alt+2`, respectively)
- Autotype username and password (with a `tab` character in between) with `Alt+1` (and copy TOTP to clipboard)
- Copy username, password or TOTP to the clipboard (`Alt+u`, `Alt+p` and `Alt+t`, respectively)
- Show an autotype menu with all fields
## Usage
First, you need to configure `rbw`. See its documentation for that.
Then, you can start `rofi-rbw`. It is *not* available as a rofi mode.
# Configuration
You can configure `rofi-rbw` either with cli arguments or with a config file called `$XDG_CONFIG_HOME/rofi-rbw.rc`. In the file, use the long option names without double dashes.
## Options
| long option | short option | possible values | description |
|----------------------|--------------|------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `--action` | `-a` | `type` (default), `copy`, `print` | Choose what `rofi-rbw` should do. |
| `--target` | `-t` | `username`, `password`, `totp` (or any custom field) | Choose which components of the selected entry are interesting. Can be passed multiple times to type/copy/print several components. Default is `username` and `password`. |
| `--prompt` | `-r` | any string | Define the text of the prompt. |
| `--selector-args` | | | Define arguments that will be passed through to `rofi` or `wofi`.<br/>Please note that you need to specify it as `--selector-args="<args>"` or `--selector-args " <args>"` because of a [bug in argparse](https://github.com/python/cpython/issues/53580) |
| `--selector` | | `rofi`, `wofi` | Show the selection dialog with this application. Chosen automatically by default. |
| `--clipboarder` | | `xsel`, `xclip`, `wl-copy` | Access the clipboard with this application. Chosen automatically by default. |
| `--typer` | | `xdotool`, `wtype`, `ydotool`, `dotool` | Type the characters using this application. Chosen automatically by default. |
| `--clear-after` | | integer number >= 0 (default is `0`) | Limit the duration in seconds passwords stay in your clipboard (unless overwritten). When set to 0, passwords will be kept indefinitely. |
| `--no-help` | | | Don't show the help message about the available shortcuts. |
| `--no-folder` | | | Don't show the entry's folder in the list. |
| `--keybindings` | | | Define custom keybindings in the format `<shortcut>:<action>:<target>`, for example `Alt+n:copy:username`. Multiple keybindings can be concatenated with `,`; multiple targets for one shortcut can be concatenated with `:`. Note that `wofi` doesn't support keybindings. |
| `--menu-keybindings` | | | Define custom keybindings for the target menu in the format `<shortcut>:<action>`, similar to `--keybindings`. Note that `wofi` doesn't support keybindings. |
# Installation
## From distribution repositories
[](https://repology.org/project/rofi-rbw/versions)
## From PyPI
`rofi-rbw` is on [PyPI](https://pypi.org/project/rofi-rbw/). You can install it with `pip install --user rofi-rbw` (or `sudo pip install rofi-rbw`).
## Manually
Download the wheel file from releases and install it with `sudo pip install $filename` (or you can use `pip install --user $filename` to only install it for the local user).
Note that it needs `configargparse` to work.
## Dependencies
You also need:
- Python 3.7 or higher
- `rofi` or `wofi`
- Something to programmatically type characters into other applications. Depending on your display server, it's `xdotool`, `wtype`, `ydotool` or `dotool`.
- Something to copy text to the clipboard. Again, depending on the display server, you want `xclip`, `xsel` or `wl-copy`.
| /rofi_rbw-1.2.0.tar.gz/rofi_rbw-1.2.0/README.md | 0.758958 | 0.679843 | README.md | pypi |
import re
from subprocess import run
from typing import Dict, List, Tuple, Union
from .abstractionhelper import is_installed, is_wayland
from .credentials import Credentials
from .entry import Entry
from .models import Action, Keybinding, Target, Targets
class Selector:
@staticmethod
def best_option(name: str = None) -> "Selector":
try:
return next(selector for selector in Selector.__subclasses__() if selector.name() == name)()
except StopIteration:
try:
return next(selector for selector in Selector.__subclasses__() if selector.supported())()
except StopIteration:
return Selector()
@staticmethod
def supported() -> bool:
pass
@staticmethod
def name() -> str:
pass
def show_selection(
self,
entries: List[Entry],
prompt: str,
show_help_message: bool,
show_folders: bool,
keybindings: Dict[str, Tuple[Action, List[Target]]],
additional_args: List[str],
) -> Tuple[Union[List[Target], None], Union[Action, None], Union[Entry, None]]:
raise NoSelectorFoundException()
def select_target(
self,
credentials: Credentials,
show_help_message: bool,
keybindings: Dict[str, Action],
additional_args: List[str],
) -> Tuple[Union[List[Target], None], Union[Action, None]]:
raise NoSelectorFoundException()
def _format_targets_from_credential(self, credentials: Credentials) -> List[str]:
targets = []
if credentials.username:
targets.append(f"Username: {credentials.username}")
if credentials.password:
targets.append(f'Password: {credentials.password[0]}{"*" * (len(credentials.password) - 1)}')
if credentials.has_totp:
targets.append(f"TOTP: {credentials.totp}")
if len(credentials.uris) == 1:
targets.append(f"URI: {credentials.uris[0]}")
else:
for (key, value) in enumerate(credentials.uris):
targets.append(f"URI {key + 1}: {value}")
for (key, value) in credentials.further.items():
targets.append(f'{key}: {value[0]}{"*" * (len(value) - 1)}')
return targets
@staticmethod
def _extract_targets(output: str) -> List[Target]:
return [Target(line.split(":")[0]) for line in output.strip().split("\n")]
@staticmethod
def _calculate_max_width(entries: List[Entry], show_folders: bool) -> int:
if show_folders:
return max(len(it.name) + len(it.folder) + 1 for it in entries)
else:
return max(len(it.name) for it in entries)
@staticmethod
def _format_folder(entry: Entry, show_folders: bool) -> str:
if not show_folders or not entry.folder:
return ""
return f"{entry.folder}/"
@staticmethod
def justify(entry: Entry, max_width: int, show_folders: bool) -> str:
return " " * (max_width - len(entry.name) - ((len(entry.folder) + 1) if show_folders else 0))
class Rofi(Selector):
@staticmethod
def supported() -> bool:
return is_installed("rofi")
@staticmethod
def name() -> str:
return "rofi"
def show_selection(
self,
entries: List[Entry],
prompt: str,
show_help_message: bool,
show_folders: bool,
keybindings: List[Keybinding],
additional_args: List[str],
) -> Tuple[Union[List[Target], None], Union[Action, None], Union[Entry, None]]:
parameters = [
"rofi",
"-markup-rows",
"-dmenu",
"-i",
"-sort",
"-p",
prompt,
*self.__build_parameters_for_keybindings(keybindings),
*additional_args,
]
if show_help_message and keybindings:
parameters.extend(self.__format_keybindings_message(keybindings))
rofi = run(
parameters,
input="\n".join(self.__format_entries(entries, show_folders)),
capture_output=True,
encoding="utf-8",
)
if rofi.returncode == 1:
return None, Action.CANCEL, None
elif rofi.returncode >= 10:
keybinding = keybindings[(rofi.returncode - 10)]
return_action = keybinding.action
return_targets = keybinding.targets
else:
return_action = None
return_targets = None
return return_targets, return_action, self.__parse_formatted_string(rofi.stdout)
def __format_entries(self, entries: List[Entry], show_folders: bool) -> List[str]:
max_width = self._calculate_max_width(entries, show_folders)
return [
f"{self._format_folder(it, show_folders)}<b>{it.name}</b>{self.justify(it, max_width, show_folders)} {it.username}"
for it in entries
]
def __parse_formatted_string(self, formatted_string: str) -> Entry:
match = re.compile("(?:(?P<folder>.+)/)?<b>(?P<name>.*?) *</b>(?P<username>.*)").search(formatted_string)
return Entry(match.group("name"), match.group("folder"), match.group("username").strip())
def select_target(
self,
credentials: Credentials,
show_help_message: bool,
keybindings: List[Keybinding],
additional_args: List[str],
) -> Tuple[Union[List[Target], None], Union[Action, None]]:
parameters = [
"rofi",
"-markup-rows",
"-dmenu",
"-p",
"Choose target",
"-i",
*self.__build_parameters_for_keybindings(keybindings),
*additional_args,
]
if show_help_message and keybindings:
parameters.extend(self.__format_keybindings_message(keybindings))
rofi = run(
parameters,
input="\n".join(self._format_targets_from_credential(credentials)),
capture_output=True,
encoding="utf-8",
)
if rofi.returncode == 1:
return None, Action.CANCEL
elif rofi.returncode >= 10:
action = keybindings[(rofi.returncode - 10)].action
else:
action = None
return (self._extract_targets(rofi.stdout)), action
def __build_parameters_for_keybindings(self, keybindings: List[Keybinding]) -> List[str]:
params = []
for index, keybinding in enumerate(keybindings):
params.extend([f"-kb-custom-{1 + index}", keybinding.shortcut])
return params
def __format_keybindings_message(self, keybindings: List[Keybinding]):
return [
"-mesg",
" | ".join(
[
f"<b>{keybinding.shortcut}</b>: {self.__format_action_and_targets(keybinding)}"
for keybinding in keybindings
]
),
]
def __format_action_and_targets(self, keybinding: Keybinding) -> str:
if keybinding.targets and Targets.MENU in keybinding.targets:
return "Menu"
elif keybinding.action == Action.SYNC:
return "Sync logins"
elif keybinding.targets:
return f"{keybinding.action.value.title()} {', '.join([target.raw for target in keybinding.targets])}"
else:
return keybinding.action.value.title()
class Wofi(Selector):
@staticmethod
def supported() -> bool:
return is_wayland() and is_installed("wofi")
@staticmethod
def name() -> str:
return "wofi"
def show_selection(
self,
entries: List[Entry],
prompt: str,
show_help_message: bool,
show_folders: bool,
keybindings: Dict[str, Tuple[Action, List[Target]]],
additional_args: List[str],
) -> Tuple[Union[List[Target], None], Union[Action, None], Union[Entry, None]]:
parameters = ["wofi", "--dmenu", "-p", prompt, *additional_args]
wofi = run(
parameters,
input="\n".join(self.__format_entries(entries, show_folders)),
capture_output=True,
encoding="utf-8",
)
if wofi.returncode == 0:
return None, None, self.__parse_formatted_string(wofi.stdout)
else:
return None, Action.CANCEL, None
def __format_entries(self, entries: List[Entry], show_folders: bool) -> List[str]:
max_width = self._calculate_max_width(entries, show_folders)
return [
f"{self._format_folder(it, show_folders)}{it.name}{self.justify(it, max_width, show_folders)} {it.username}"
for it in entries
]
def __parse_formatted_string(self, formatted_string: str) -> Entry:
match = re.compile("(?:(?P<folder>.+)/)?(?P<name>.*?) * (?P<username>.*)").search(formatted_string)
return Entry(match.group("name").strip(), match.group("folder"), match.group("username").strip())
def select_target(
self,
credentials: Credentials,
show_help_message: bool,
keybindings: Dict[str, Action],
additional_args: List[str],
) -> Tuple[Union[List[Target], None], Union[Action, None]]:
parameters = ["wofi", "--dmenu", "-p", "Choose target", *additional_args]
wofi = run(
parameters,
input="\n".join(self._format_targets_from_credential(credentials)),
capture_output=True,
encoding="utf-8",
)
if wofi.returncode == 1:
return None, Action.CANCEL
return self._extract_targets(wofi.stdout), None
class NoSelectorFoundException(Exception):
def __str__(self) -> str:
return "Could not find a valid way to show the selection. Please check the required dependencies." | /rofi_rbw-1.2.0.tar.gz/rofi_rbw-1.2.0/src/rofi_rbw/selector.py | 0.627723 | 0.239672 | selector.py | pypi |
import time
from subprocess import run
from .abstractionhelper import is_installed, is_wayland
class Clipboarder:
@staticmethod
def best_option(name: str = None) -> "Clipboarder":
try:
return next(clipboarder for clipboarder in Clipboarder.__subclasses__() if clipboarder.name() == name)()
except StopIteration:
try:
return next(clipboarder for clipboarder in Clipboarder.__subclasses__() if clipboarder.supported())()
except StopIteration:
return Clipboarder()
@staticmethod
def supported() -> bool:
pass
@staticmethod
def name() -> str:
pass
def copy_to_clipboard(self, characters: str) -> None:
raise NoClipboarderFoundException()
def clear_clipboard_after(self, clear: int) -> None:
raise NoClipboarderFoundException()
class XSelClipboarder(Clipboarder):
__last_copied_characters: str
@staticmethod
def supported() -> bool:
return not is_wayland() and is_installed("xsel")
@staticmethod
def name() -> str:
return "xsel"
def copy_to_clipboard(self, characters: str) -> None:
run(["xsel", "--input", "--clipboard"], input=characters, encoding="utf-8")
self.__last_copied_characters = characters
def fetch_clipboard_content(self) -> str:
return run(
[
"xsel",
"--output",
"--clipboard",
],
capture_output=True,
encoding="utf-8",
).stdout
def clear_clipboard_after(self, clear: int) -> None:
if clear > 0:
time.sleep(clear)
# Only clear clipboard if nothing has been copied since the password
if self.fetch_clipboard_content() == self.__last_copied_characters:
run(["xsel", "--clear", "--clipboard"])
self.__last_copied_characters = None
class XClipClipboarder(Clipboarder):
__last_copied_characters: str
@staticmethod
def supported() -> bool:
return not is_wayland() and is_installed("xclip")
@staticmethod
def name() -> str:
return "xclip"
def copy_to_clipboard(self, characters: str) -> None:
run(["xclip", "-in", "-selection", "clipboard"], input=characters, encoding="utf-8")
self.__last_copied_characters = characters
def fetch_clipboard_content(self) -> str:
return run(["xclip", "-o", "-selection", "clipboard"], capture_output=True, encoding="utf-8").stdout
def clear_clipboard_after(self, clear: int) -> None:
if clear > 0:
time.sleep(clear)
# Only clear clipboard if nothing has been copied since the password
if self.fetch_clipboard_content() == self.__last_copied_characters:
self.copy_to_clipboard("")
self.__last_copied_characters = None
class WlClipboarder(Clipboarder):
@staticmethod
def supported() -> bool:
return is_wayland() and is_installed("wl-copy")
@staticmethod
def name() -> str:
return "wl-copy"
def copy_to_clipboard(self, characters: str) -> None:
run(["wl-copy"], input=characters, encoding="utf-8")
def clear_clipboard_after(self, clear: int) -> None:
if clear > 0:
time.sleep(clear)
run(["wl-copy", "--clear"])
class NoClipboarderFoundException(Exception):
def __str__(self) -> str:
return "Could not find a valid way to copy to clipboard. Please check the required dependencies." | /rofi_rbw-1.2.0.tar.gz/rofi_rbw-1.2.0/src/rofi_rbw/clipboarder.py | 0.586404 | 0.156749 | clipboarder.py | pypi |
import time
from subprocess import run
from .abstractionhelper import is_installed, is_wayland
class Typer:
@staticmethod
def best_option(name: str = None) -> "Typer":
try:
return next(typer for typer in Typer.__subclasses__() if typer.name() == name)()
except StopIteration:
try:
return next(typer for typer in Typer.__subclasses__() if typer.supported())()
except StopIteration:
return Typer()
@staticmethod
def supported() -> bool:
pass
@staticmethod
def name() -> str:
pass
def get_active_window(self) -> str:
raise NoTyperFoundException()
def type_characters(self, characters: str, active_window: str) -> None:
raise NoTyperFoundException()
class XDoToolTyper(Typer):
@staticmethod
def supported() -> bool:
return not is_wayland() and is_installed("xdotool")
@staticmethod
def name() -> str:
return "xdotool"
def get_active_window(self) -> str:
return run(args=["xdotool", "getactivewindow"], capture_output=True, encoding="utf-8").stdout[:-1]
def type_characters(self, characters: str, active_window: str) -> None:
run(
[
"xdotool",
"windowactivate",
"--sync",
active_window,
"type",
"--clearmodifiers",
"--delay",
"0",
characters,
]
)
# workaround for https://github.com/jordansissel/xdotool/issues/43
run(["xdotool", "keyup", "Shift_L", "Shift_R", "Alt_L", "Alt_R"])
class WTypeTyper(Typer):
@staticmethod
def supported() -> bool:
return is_wayland() and is_installed("wtype")
@staticmethod
def name() -> str:
return "wtype"
def get_active_window(self) -> str:
return "not possible with wtype"
def type_characters(self, characters: str, active_window: str) -> None:
run(["wtype", characters])
class YDotoolTyper(Typer):
@staticmethod
def supported() -> bool:
return is_wayland() and is_installed("ydotool")
@staticmethod
def name() -> str:
return "ydotool"
def get_active_window(self) -> str:
return "not possible with ydotool"
def type_characters(self, characters: str, active_window: str) -> None:
time.sleep(0.05)
run(["ydotool", "type", "--key-delay", "0", characters])
class DotoolTyper(Typer):
@staticmethod
def supported() -> bool:
return is_installed("dotool")
@staticmethod
def name() -> str:
return "dotool"
def get_active_window(self) -> str:
return "not possible with dotool"
def type_characters(self, characters: str, active_window: str) -> None:
run(["dotool"], text=True, input=f"type {characters}")
class NoTyperFoundException(Exception):
def __str__(self) -> str:
return "Could not find a valid way to type characters. Please check the required dependencies." | /rofi_rbw-1.2.0.tar.gz/rofi_rbw-1.2.0/src/rofi_rbw/typer.py | 0.630571 | 0.167712 | typer.py | pypi |
from pathlib import Path
from typing import Any, Dict
try:
_have_config_reader = True
from tmuxp.config_reader import ConfigReader
except ImportError: # no_cover_tmuxp_gte_1_16
# In tmuxp < 1.16, 3d party library `Kaptan` was used to read config files.
# `Kaptan` is just a wrapper around `PyYAML` and `json`. Since Yaml is a superset of
# JSON, we'll just use `yaml.safe_load`.
import yaml
_have_config_reader = False
try:
from tmuxp.workspace.loader import expand as expand_config
except ImportError: # no_cover_tmuxp_gte_1_18
# In tmuxp < 1.18, `expand` was in `tmuxp.config`
from tmuxp.config import expand as expand_config
try:
from tmuxp.workspace.finders import get_workspace_dir
except ImportError: # no_cover_tmuxp_gte_1_18
# In tmuxp < 1.18, `get_workspace_dir` was called `get_config_dir`.
# In tmuxp >= 1.11, < 1.18 get_config_dir was in `tmuxp.cli.utils`.
# In tmuxp < 1.11, get_config_dir was in tmuxp.cli.
try:
from tmuxp.cli.utils import get_config_dir as get_workspace_dir
except ImportError: # no_cover_tmuxp_gte_1_11
from tmuxp.cli import get_config_dir as get_workspace_dir
try:
from tmuxp.workspace.finders import in_dir as configs_in_dir
except ImportError: # no_cover_tmuxp_gte_1_18
# In tmuxp < 1.18, `in_dir` was in `tmuxp.config`
from tmuxp.config import in_dir as configs_in_dir
__all__ = [
"Config",
"configs_in_dir",
"expand_config",
"get_workspace_dir",
"read_config_file",
]
Config = Dict[str, Any]
def read_config_file(config_path: Path) -> Config:
"""Read a tmuxp session config file.
If `tmuxp.config_reader.ConfigReader` is available, use that. Otherwise, fall back
to `yaml.safe_load`.
"""
if _have_config_reader: # no_cover_tmuxp_lt_1_16
cfg_reader = ConfigReader.from_file(config_path)
return cfg_reader.content
else: # no_cover_tmuxp_gte_1_16
return yaml.safe_load(config_path.read_text()) | /rofi_tmuxp-0.5.0-py3-none-any.whl/rofi_tmuxp/tmuxp_client.py | 0.631026 | 0.303896 | tmuxp_client.py | pypi |
import shlex
from subprocess import Popen
from typing import List, Optional, Iterable
from urllib.error import URLError
from urllib.request import urlopen
from dynmen import Menu
from tpblite import CATEGORIES
from tpblite import TPB as TPBAPI
from tpblite.models.torrents import Torrent, Torrents
from .config import CONFIG
from .proxy import get_proxies
from .utils import torrent_format
class TPB:
def __init__(self, url: Optional[str] = None):
if url is None:
if CONFIG["menu"].getboolean("use_tpb_proxy"):
try:
proxies = [url for url in get_proxies() if self._check_url(url)]
url = proxies[0]
except Exception:
url = CONFIG["menu"]["tpb_url"]
else:
url = CONFIG["menu"]["tpb_url"]
self.url = url
if not self._check_url(self.url):
raise ValueError(f"Cannot reach '{self.url}'.")
self.tpb = TPBAPI(self.url)
@staticmethod
def get_menu(
prompt: Optional[str] = None,
lines: Optional[int] = None,
multiple: bool = False,
message: Optional[str] = None,
) -> Menu:
"""Create the dynamic menu object."""
args = shlex.split(CONFIG["menu"]["command"])
if "rofi" in args:
if prompt is not None:
args += ["-p", prompt]
if lines is not None:
args += ["-l", lines]
if multiple:
args += ["-multi-select"]
if message is not None:
args += ["-mesg", message]
return Menu(args)
@staticmethod
def _check_url(url):
try:
return urlopen(url).getcode() == 200
except URLError:
return False
def search_or_top(
self,
) -> Torrents: # pylint: disable=inconsistent-return-statements
"""Choose between top or search.
Returns:
Torrents matching either the search or the top category.
"""
choices = {"Search": self.search, "Top": self.top}
menu = self.get_menu(prompt="Select", lines=2)
out = menu(choices)
return out.value()
def _ask_query(self) -> str:
menu = self.get_menu(prompt="Search", lines=0)
return menu().selected
def search(self, query: Optional[str] = None) -> List[Torrent]:
"""Search for torrents.
Args:
query (optional): search query.
Returns:
The Torrents matching the search query.
"""
if query is None:
query = self._ask_query()
torrents = self.tpb.search(query)
return self.select(torrents)
def _ask_category(self) -> str:
categories = [cat.strip() for cat in CONFIG["menu"]["categories"].split(",")]
if CONFIG["menu"].getboolean("categories_48h"):
categories += [cat + " 48h" for cat in categories]
categories = sorted(categories)
menu = self.get_menu(prompt="Select", lines=len(categories))
out = menu(categories)
return out.selected
def top(self, category: Optional[str] = None) -> List[Torrent]:
"""Get the top torrents for a category.
Args:
category (optional): top category.
Returns:
The torrents for the selected categories.
"""
if category is None:
category = self._ask_category()
last_48 = "48h" in category
category = category.split()[0]
category = getattr(CATEGORIES, category.upper())
if not isinstance(category, int):
category = category.ALL
torrents = self.tpb.top(category=category, last_48=last_48)
return self.select(torrents)
def select(self, torrents: Iterable[Torrent]) -> List[Torrent]:
"""Select a torrent from a `Torrents` object.
Args:
torrents: `Torrents`from which to select a single torrent.
Reuturns:
Selected torrents.
"""
torrents_formatted = {}
for torrent in torrents:
torrents_formatted[
torrent_format(CONFIG["menu"]["torrent_format"], torrent)
] = torrent
menu = self.get_menu(prompt="Select", multiple=True)
out = menu(torrents_formatted)
selected_out = []
for selected in out.selected.split("\n"):
selected_out.append(torrents_formatted[selected])
return selected_out
def action(self, torrent: Torrent) -> None:
"""Execute an action on `Torrent`.
Args:
torrent: `Torrent` instance on which to run the action.
"""
actions = CONFIG["actions"]
menu = self.get_menu(prompt="Select", lines=len(actions), message=str(torrent))
out = menu(actions)
command = torrent_format(out.value, torrent)
Popen(command, shell=True) | /rofi-tpb-0.2.7.tar.gz/rofi-tpb-0.2.7/rofi_tpb/tpb.py | 0.761095 | 0.156234 | tpb.py | pypi |
from glob import glob
from typing import Dict, List
from .paths import *
def read_characters_from_files(files: List[str], frecent: List[str], use_additional: bool) -> Dict[str, str]:
all_characters: Dict[str, List[str]] = {}
for character in frecent:
all_characters[character] = []
for file in __resolve_all_filenames(files, use_additional):
characters_from_file = __load_from_file(file)
for line in characters_from_file:
parsed_line = line.split(" ", 1)
all_characters.setdefault(parsed_line[0], []).append(parsed_line[1]) if 1 < len(parsed_line) else ""
return {character: ", ".join(descriptions) for character, descriptions in all_characters.items()}
def __resolve_all_filenames(file_names: List[str], use_additional: bool) -> List[Path]:
resolved_file_names = []
for file_name in file_names:
resolved_file_names += __resolve_filename(file_name, use_additional)
return resolved_file_names
def __resolve_filename(file_name: str, use_additional: bool) -> List[Path]:
resolved_file_names = []
for file in glob(os.path.expanduser(file_name)):
resolved_file_names.append(Path(file))
if resolved_file_names:
return resolved_file_names
if not Path(file_name).expanduser().is_absolute():
for file in (Path(__file__).parent / "data").glob(file_name if "*" in file_name else f"{file_name}*"):
resolved_file_names.append(file)
resolved_file_names += __load_additional_files(file, use_additional)
if resolved_file_names:
return resolved_file_names
if file_name == "all":
nested_file_names = [
__resolve_filename(file.stem, use_additional) for file in (Path(__file__).parent / "data").glob("*.csv")
]
resolved_file_names += [file_name for file_names in nested_file_names for file_name in file_names]
return resolved_file_names
raise FileNotFoundError(f"Couldn't find file {file_name!r}")
def __load_additional_files(original_file: Path, use_additional: bool) -> List[Path]:
additional_files = []
custom_additional_file = custom_additional_files_location / f"{original_file.stem}.additional.csv"
if custom_additional_file.is_file():
additional_files.append(custom_additional_file)
provided_additional_file = Path(__file__).parent / "data" / "additional" / f"{original_file.stem}.csv"
if use_additional and provided_additional_file.is_file():
additional_files.append(provided_additional_file)
return additional_files
def __load_from_file(file: Path) -> List[str]:
return file.read_text().strip("\n").split("\n") | /rofimoji-6.1.0-py3-none-any.whl/picker/file_loader.py | 0.588653 | 0.312475 | file_loader.py | pypi |
import argparse
import re
import sys
from dataclasses import dataclass
from enum import IntEnum, auto
from pickle import dump, load
from typing import Dict, List, Optional
from . import emoji_data
from .action import execute_action
from .argument_parsing import parse_arguments_flexible
from .clipboarder.clipboarder import Clipboarder
from .file_loader import read_characters_from_files
from .frecent import load_frecent_characters, save_frecent_characters
from .models import Action
from .paths import *
from .recent import load_recent_characters, save_recent_characters
from .typer.typer import Typer
class Step(IntEnum):
SHOW_ALL = auto()
SHORTCUTS = auto()
SELECT_SKIN_TONE = auto()
SELECT_ACTION = auto()
EXECUTE = auto()
DONE = auto()
@dataclass
class State:
step: Step
actions: List[Action]
processed_characters: str
unprocessed_characters: List[str]
return_code: int
__current_input: Optional[str] = None
output: Optional[str] = None
def save_to_cache(self) -> None:
with cache_file_location.open("wb+") as file:
dump(self, file)
@staticmethod
def load_from_cache(current_input: str, return_code: int) -> "State":
if not cache_file_location.is_file():
return State(
step=Step.SHOW_ALL,
actions=[],
processed_characters="",
unprocessed_characters=[],
return_code=return_code,
output=None,
)
with cache_file_location.open("rb+") as file:
state = load(file)
state.__current_input = current_input
state.return_code = return_code
state.output = None
return state
@staticmethod
def remove_cache():
try:
cache_file_location.unlink()
except FileNotFoundError:
pass
@property
def has_input(self) -> bool:
return self.__current_input is not None
@property
def current_input(self) -> str:
temp = self.__current_input
self.__current_input = None
return temp
def reset_current_input(self) -> None:
self.__current_input = None
class ModeRofimoji:
args: argparse.Namespace
typer: Typer
clipboarder: Clipboarder
def mode(self) -> None:
if os.environ.get("ROFI_RETV") == "0":
State.remove_cache()
chosen = sys.argv[-1]
state = State.load_from_cache(chosen, int(os.environ.get("ROFI_RETV")))
self.__parse_args()
state.actions = self.args.actions
if state.step == Step.SHOW_ALL:
self.show_characters(state)
if state.step == Step.SHORTCUTS:
self.handle_shortcuts(state)
if state.step == Step.SELECT_SKIN_TONE:
self.select_skin_tone(state)
if state.step == Step.SELECT_ACTION:
self.choose_action(state)
if state.step == Step.EXECUTE:
self.execute_actions(state)
if state.step == Step.DONE:
state.remove_cache()
else:
state.save_to_cache()
if state.output:
print(state.output)
def __parse_args(self) -> None:
self.args = parse_arguments_flexible()
self.typer = Typer.best_option(self.args.typer)
self.clipboarder = Clipboarder.best_option(self.args.clipboarder)
def show_characters(self, state: State) -> None:
recent_characters = self.__format_recent_characters(load_recent_characters(self.args.max_recent))
state.output = "\x00markup-rows\x1ftrue\n"
state.output += "\x00use-hot-keys\x1ftrue\n"
if len(recent_characters) > 0:
state.output += f"\x00message\x1f{recent_characters}"
state.output += "\n".join(
self.__format_characters(
read_characters_from_files(
self.args.files, load_frecent_characters() if self.args.frecency else [], self.args.use_additional
)
)
)
state.output += "\n"
state.step += 1
def __format_recent_characters(self, recent_characters: List[str]) -> str:
pairings = [f"\u200e{(index + 1) % 10}: {character}" for index, character in enumerate(recent_characters)]
return " | ".join(pairings)
def __format_characters(self, characters: Dict[str, str]) -> List[str]:
if self.args.show_description:
return [f"{key} {value}" for key, value in characters.items() if value != ""]
else:
return [f"{key}\0meta\x1f{value}" for key, value in characters.items() if value != ""]
def handle_shortcuts(self, state: State) -> None:
if 10 <= state.return_code <= 19:
state.processed_characters = load_recent_characters(self.args.max_recent)[state.return_code - 10]
state.reset_current_input()
state.step += 2
return
elif state.return_code:
new_actions = self.__choose_action_from_return_code(state.return_code)
if new_actions:
state.actions = new_actions
state.unprocessed_characters = state.current_input.splitlines()
state.step += 1
return
else:
return
def __choose_action_from_return_code(self, return_code: int) -> List[Action]:
if return_code == 20:
return [Action.COPY]
elif return_code == 21:
return [Action.TYPE]
elif return_code == 22:
return [Action.CLIPBOARD]
elif return_code == 23:
return [Action.UNICODE]
elif return_code == 24:
return [Action.COPY_UNICODE]
else:
return []
def __extract_char_from_input(self, line) -> str:
return re.match(r"^(?:\u200e(?! ))?(?P<char>.[^ ]*)( .*|$)", line).group("char")
def select_skin_tone(self, state: State) -> None:
if state.has_input:
state.processed_characters += self.__extract_char_from_input(state.current_input)
state.unprocessed_characters.pop()
for raw_character in state.unprocessed_characters:
character = self.__extract_char_from_input(raw_character)
save_frecent_characters(character)
if character not in emoji_data.skin_tone_selectable_emojis:
state.processed_characters += character
state.unprocessed_characters = state.unprocessed_characters[1:]
else:
state.output = "\n".join(
character + modifier + " " + emoji_data.fitzpatrick_modifiers[modifier]
for modifier in emoji_data.fitzpatrick_modifiers
)
return
state.step += 1
def choose_action(self, state: State) -> None:
if state.has_input:
state.actions = [Action(state.current_input)]
state.step += 1
return
if Action.MENU in state.actions:
state.output = "\n".join([str(it) for it in Action if it != Action.MENU])
return
state.step += 1
def execute_actions(self, state: State) -> Optional[str]:
save_recent_characters(state.processed_characters, self.args.max_recent)
execute_action(state.processed_characters, state.actions)
state.step += 1
return | /rofimoji-6.1.0-py3-none-any.whl/picker/mode.py | 0.504394 | 0.189859 | mode.py | pypi |
import sys
from typing import Dict, List, Tuple, Union
from . import emoji_data
from .action import execute_action
from .argument_parsing import parse_arguments_strict
from .clipboarder.clipboarder import Clipboarder
from .file_loader import read_characters_from_files
from .frecent import load_frecent_characters, save_frecent_characters
from .models import CANCEL, DEFAULT, Action, Shortcut
from .recent import load_recent_characters, save_recent_characters
from .selector.selector import Selector
from .typer.typer import Typer
class StandaloneRofimoji:
def __init__(self) -> None:
self.args = parse_arguments_strict()
self.selector = Selector.best_option(self.args.selector)
self.typer = Typer.best_option(self.args.typer)
self.clipboarder = Clipboarder.best_option(self.args.clipboarder)
self.active_window = self.typer.get_active_window()
def standalone(self) -> None:
action, value = self.__open_main_selector_window()
if action == CANCEL():
sys.exit()
elif action != DEFAULT():
self.args.actions = [action]
if isinstance(value, Shortcut):
characters = load_recent_characters(self.args.max_recent)[value.index]
else:
characters = self.__process_chosen_characters(value)
if Action.MENU in self.args.actions:
self.args.actions = self.selector.show_action_menu(self.args.selector_args)
save_recent_characters(characters, self.args.max_recent)
execute_action(characters, self.args.actions, self.active_window, self.args.typer, self.args.clipboarder)
def __open_main_selector_window(self) -> Tuple[Union[Action, DEFAULT, CANCEL], Union[List[str], Shortcut]]:
return self.selector.show_character_selection(
self.__format_characters(
read_characters_from_files(
self.args.files, load_frecent_characters() if self.args.frecency else [], self.args.use_additional
)
),
load_recent_characters(self.args.max_recent),
self.args.prompt,
self.args.keybindings,
self.args.selector_args,
)
def __format_characters(self, characters: Dict[str, str]) -> List[str]:
if self.args.show_description:
return [f"{key} {value}" for key, value in characters.items() if value != ""]
else:
return [f"{key}\0meta\x1f{value}" for key, value in characters.items() if value != ""]
def __process_chosen_characters(self, characters: List[str]) -> str:
characters_with_skin_tones = []
for character in characters:
save_frecent_characters(character)
characters_with_skin_tones.append(self.__add_skin_tone_to_character(character))
return "".join(characters_with_skin_tones)
def __add_skin_tone_to_character(self, character: str) -> str:
characters_with_skin_tone = []
for element in character:
if element in emoji_data.skin_tone_selectable_emojis:
characters_with_skin_tone.append(self.__select_skin_tone(element))
else:
characters_with_skin_tone.append(element)
return "".join(characters_with_skin_tone)
def __select_skin_tone(self, selected_emoji: str) -> str:
skin_tone = self.args.skin_tone
if skin_tone == "neutral":
return selected_emoji
elif skin_tone != "ask":
return selected_emoji + emoji_data.fitzpatrick_modifiers_reversed[skin_tone]
else:
modified_emojis = [
selected_emoji + modifier + " " + emoji_data.fitzpatrick_modifiers[modifier]
for modifier in emoji_data.fitzpatrick_modifiers
]
return_code, skin_tone = self.selector.show_skin_tone_selection(
modified_emojis, selected_emoji + " ", self.args.selector_args
)
if return_code == 1:
return ""
return skin_tone.split(" ")[0] | /rofimoji-6.1.0-py3-none-any.whl/picker/standalone.py | 0.491944 | 0.152032 | standalone.py | pypi |
import argparse
import shlex
import configargparse
from . import __version__
from .models import Action
from .paths import *
def parse_arguments_strict() -> argparse.Namespace:
return __parse_arguments(only_known=True)
def parse_arguments_flexible() -> argparse.Namespace:
return __parse_arguments(only_known=False)
def __parse_arguments(only_known: bool) -> argparse.Namespace:
parser = configargparse.ArgumentParser(
description="Select, insert or copy Unicode characters using rofi.",
default_config_files=config_file_locations,
)
parser.add_argument("--version", action="version", version="rofimoji " + __version__)
parser.add_argument(
"--action",
"-a",
dest="actions",
action="store",
type=Action,
choices=list(Action),
default=[Action.TYPE],
nargs="*",
metavar="ACTION",
help="How to insert the chosen characters. More than one action may be specified in "
"a space separated list (for example: `--action type copy`). Options: "
+ ", ".join(f'"{a.value}"' for a in Action),
)
parser.add_argument(
"--skin-tone",
"-s",
dest="skin_tone",
action="store",
choices=["neutral", "light", "medium-light", "moderate", "dark brown", "black", "ask"],
default="ask",
help='Decide on a skin-tone for all supported emojis. If not set (or set to "ask"), '
"you will be asked for each one ",
)
parser.add_argument(
"--files",
"-f",
dest="files",
action="store",
default=["emojis*"],
nargs="+",
metavar="FILE",
help="Read characters from this file instead, one entry per line",
)
parser.add_argument("--prompt", "-r", dest="prompt", action="store", default="😀 ", help="Set rofimoj's prompt")
parser.add_argument(
"--selector-args",
dest="selector_args",
action="store",
default=False,
help="A string of arguments to give to the selector (rofi, wofi or fuzzel)",
)
parser.add_argument(
"--max-recent",
dest="max_recent",
action="store",
type=int,
default=10,
help="Show at most this number of recently used characters (cannot be larger than 10)",
)
parser.add_argument(
"--no-frecency", dest="frecency", action="store_false", help="Don't show frequently used characters first"
)
parser.set_defaults(frecency=True)
parser.add_argument(
"--only-official",
dest="use_additional",
action="store_false",
help="Use only the official Unicode descriptions",
)
parser.set_defaults(use_additional=True)
parser.add_argument(
"--hidden-descriptions",
dest="show_description",
action="store_false",
help="Show only the character without its description",
)
parser.set_defaults(show_description=True)
parser.add_argument(
"--selector",
dest="selector",
action="store",
type=str,
choices=["rofi", "wofi", "fuzzel"],
default=None,
help="Choose the application to select the characters with",
)
parser.add_argument(
"--clipboarder",
dest="clipboarder",
action="store",
type=str,
choices=["xsel", "xclip", "wl-copy"],
default=None,
help="Choose the application to access the clipboard with",
)
parser.add_argument(
"--typer",
dest="typer",
action="store",
type=str,
choices=["xdotool", "wtype"],
default=None,
help="Choose the application to type with",
)
parser.add_argument(
"--keybinding-copy",
dest="keybinding_copy",
action="store",
type=str,
default="Alt+c",
help="Choose the keyboard shortcut to copy the character to the clipboard",
)
parser.add_argument(
"--keybinding-type",
dest="keybinding_type",
action="store",
type=str,
default="Alt+t",
help="Choose the keyboard shortcut to directly type the character",
)
parser.add_argument(
"--keybinding-clipboard",
dest="keybinding_clipboard",
action="store",
type=str,
default="Alt+p",
help="Choose the keyboard shortcut to insert the character via the clipboard",
)
parser.add_argument(
"--keybinding-unicode",
dest="keybinding_unicode",
action="store",
type=str,
default="Alt+u",
help="Choose the keyboard shortcut to directly type the character's unicode codepoint",
)
parser.add_argument(
"--keybinding-copy-unicode",
dest="keybinding_copy_unicode",
action="store",
type=str,
default="Alt+i",
help="Choose the keyboard shortcut to copy the character's unicode codepoint to the clipboard",
)
if only_known:
parsed_args = parser.parse_args()
else:
parsed_args, _ = parser.parse_known_args()
if parsed_args.selector_args:
parsed_args.selector_args = shlex.split(parsed_args.selector_args)
else:
parsed_args.selector_args = []
parsed_args.keybindings = {
Action.TYPE: parsed_args.keybinding_type,
Action.COPY: parsed_args.keybinding_copy,
Action.CLIPBOARD: parsed_args.keybinding_clipboard,
Action.UNICODE: parsed_args.keybinding_unicode,
Action.COPY_UNICODE: parsed_args.keybinding_copy_unicode,
}
return parsed_args | /rofimoji-6.1.0-py3-none-any.whl/picker/argument_parsing.py | 0.639173 | 0.178687 | argument_parsing.py | pypi |
from subprocess import run
from ..abstractionhelper import is_installed, is_wayland
class Typer:
@staticmethod
def best_option(name: str = None) -> "Typer":
try:
return next(typer for typer in Typer.__subclasses__() if typer.name() == name)()
except StopIteration:
try:
return next(typer for typer in Typer.__subclasses__() if typer.supported())()
except StopIteration:
return Typer()
@staticmethod
def supported() -> bool:
pass
@staticmethod
def name() -> str:
pass
def get_active_window(self) -> str:
raise NoTyperFoundException()
def type_characters(self, characters: str, active_window: str) -> None:
raise NoTyperFoundException()
def insert_from_clipboard(self, active_window: str) -> None:
raise NoTyperFoundException()
class XDoToolTyper(Typer):
@staticmethod
def supported() -> bool:
return not is_wayland() and is_installed("xdotool")
@staticmethod
def name() -> str:
return "xdotool"
def get_active_window(self) -> str:
return run(args=["xdotool", "getactivewindow"], capture_output=True, encoding="utf-8").stdout[:-1]
def type_characters(self, characters: str, active_window: str) -> None:
run(["xdotool", "windowactivate", "--sync", active_window, "type", "--clearmodifiers", characters])
def insert_from_clipboard(self, active_window: str) -> None:
run(
[
"xdotool",
"windowfocus",
"--sync",
active_window,
"key",
"--clearmodifiers",
"Shift+Insert",
"sleep",
"0.05",
]
)
class WTypeTyper(Typer):
@staticmethod
def supported() -> bool:
return is_wayland() and is_installed("wtype")
@staticmethod
def name() -> str:
return "wtype"
def get_active_window(self) -> str:
return "not possible with wtype"
def type_characters(self, characters: str, active_window: str) -> None:
run(["wtype", characters])
def insert_from_clipboard(self, active_window: str) -> None:
run(["wtype", "-M", "shift", "-P", "Insert", "-p", "Insert", "-m", "shift"])
class NoTyperFoundException(Exception):
def __str__(self) -> str:
return "Could not find a valid way to type characters. Please check the required dependencies." | /rofimoji-6.1.0-py3-none-any.whl/picker/typer/typer.py | 0.559771 | 0.191403 | typer.py | pypi |
import re
from subprocess import run
from typing import Dict, List, Tuple, Union
from ..abstractionhelper import is_installed, is_wayland
from ..models import CANCEL, DEFAULT, Action, Shortcut
class Selector:
@staticmethod
def best_option(name: str = None) -> "Selector":
try:
return next(selector for selector in Selector.__subclasses__() if selector.name() == name)()
except StopIteration:
try:
return next(selector for selector in Selector.__subclasses__() if selector.supported())()
except StopIteration:
return Selector()
@staticmethod
def supported() -> bool:
pass
@staticmethod
def name() -> str:
pass
def show_character_selection(
self,
characters: List[str],
recent_characters: List[str],
prompt: str,
keybindings: Dict[Action, str],
additional_args: List[str],
) -> Tuple[Union[Action, DEFAULT, CANCEL], Union[List[str], Shortcut]]:
raise NoSelectorFoundException()
def show_skin_tone_selection(
self, tones_emojis: List[str], prompt: str, additional_args: List[str]
) -> Tuple[int, str]:
raise NoSelectorFoundException
def show_action_menu(self, additional_args: List[str]) -> List[Action]:
raise NoSelectorFoundException
def extract_char_from_input(self, line) -> str:
return re.match(r"^(?:\u200e(?! ))?(?P<char>.[^ ]*)( .*|$)", line).group("char")
class Rofi(Selector):
@staticmethod
def supported() -> bool:
return is_installed("rofi")
@staticmethod
def name() -> str:
return "rofi"
def show_character_selection(
self,
characters: List[str],
recent_characters: List[str],
prompt: str,
keybindings: Dict[Action, str],
additional_args: List[str],
) -> Tuple[Union[Action, DEFAULT, CANCEL], Union[List[str], Shortcut]]:
parameters = [
"rofi",
"-dmenu",
"-markup-rows",
"-i",
"-multi-select",
"-no-custom",
"-ballot-unselected-str",
"",
"-p",
prompt,
"-kb-custom-11",
keybindings[Action.COPY],
"-kb-custom-12",
keybindings[Action.TYPE],
"-kb-custom-13",
keybindings[Action.CLIPBOARD],
"-kb-custom-14",
keybindings[Action.UNICODE],
"-kb-custom-15",
keybindings[Action.COPY_UNICODE],
*additional_args,
]
if recent_characters:
parameters.extend(["-mesg", self.__format_recent_characters(recent_characters)])
rofi = run(parameters, input="\n".join(characters), capture_output=True, encoding="utf-8")
if 10 <= rofi.returncode <= 19:
return DEFAULT(), Shortcut(rofi.returncode - 10)
action: Union[Action, DEFAULT, CANCEL]
if rofi.returncode == 1:
action = CANCEL()
elif rofi.returncode == 20:
action = Action.COPY
elif rofi.returncode == 21:
action = Action.TYPE
elif rofi.returncode == 22:
action = Action.CLIPBOARD
elif rofi.returncode == 23:
action = Action.UNICODE
elif rofi.returncode == 24:
action = Action.COPY_UNICODE
else:
action = DEFAULT()
return action, [self.extract_char_from_input(line) for line in rofi.stdout.splitlines()]
def __format_recent_characters(self, recent_characters: List[str]) -> str:
pairings = [f"\u200e{(index + 1) % 10}: {character}" for index, character in enumerate(recent_characters)]
return " | ".join(pairings)
def show_skin_tone_selection(
self, tones_emojis: List[str], prompt: str, additional_args: List[str]
) -> Tuple[int, str]:
rofi = run(
["rofi", "-dmenu", "-i", "-no-custom", "-p", prompt, *additional_args],
input="\n".join(tones_emojis),
capture_output=True,
encoding="utf-8",
)
return rofi.returncode, rofi.stdout
def show_action_menu(self, additional_args: List[str]) -> List[Action]:
rofi = run(
[
"rofi",
"-dmenu",
"-multi-select",
"-no-custom",
"-ballot-unselected-str",
"",
"-i",
*additional_args,
],
input="\n".join([str(it) for it in Action if it != Action.MENU]),
capture_output=True,
encoding="utf-8",
)
return [Action(action) for action in rofi.stdout.strip().split("\n")]
class Wofi(Selector):
@staticmethod
def supported() -> bool:
return is_wayland() and is_installed("wofi")
@staticmethod
def name() -> str:
return "wofi"
def show_character_selection(
self,
characters: List[str],
recent_characters: List[str],
prompt: str,
keybindings: Dict[Action, str],
additional_args: List[str],
) -> Tuple[Union[Action, DEFAULT, CANCEL], Union[List[str], Shortcut]]:
parameters = ["wofi", "--dmenu", "--allow-markup", "-i", "-p", prompt, *additional_args]
wofi = run(parameters, input="\n".join(characters), capture_output=True, encoding="utf-8")
return DEFAULT(), [self.extract_char_from_input(line) for line in wofi.stdout.splitlines()]
def show_skin_tone_selection(
self, tones_emojis: List[str], prompt: str, additional_args: List[str]
) -> Tuple[int, str]:
wofi = run(
["wofi", "--dmenu", "-i", "-p", prompt, *additional_args],
input="\n".join(tones_emojis),
capture_output=True,
encoding="utf-8",
)
return wofi.returncode, wofi.stdout
def show_action_menu(self, additional_args: List[str]) -> List[Action]:
wofi = run(
[
"wofi",
"-dmenu",
"-i",
*additional_args,
],
input="\n".join([it.value for it in Action if it != Action.MENU]),
capture_output=True,
encoding="utf-8",
)
return [Action(wofi.stdout.strip())]
class Fuzzel(Selector):
@staticmethod
def supported() -> bool:
return is_wayland() and is_installed("fuzzel")
@staticmethod
def name() -> str:
return "fuzzel"
def show_character_selection(
self,
characters: List[str],
recent_characters: List[str],
prompt: str,
keybindings: Dict[Action, str],
additional_args: List[str],
) -> Tuple[Union[Action, DEFAULT, CANCEL], Union[List[str], Shortcut]]:
parameters = ["fuzzel", "--dmenu", "--fuzzy-min-length", "1", "-p", prompt, *additional_args]
fuzzel = run(parameters, input="\n".join(characters), capture_output=True, encoding="utf-8")
return DEFAULT(), [self.extract_char_from_input(line) for line in fuzzel.stdout.splitlines()]
def show_skin_tone_selection(
self, tones_emojis: List[str], prompt: str, additional_args: List[str]
) -> Tuple[int, str]:
fuzzel = run(
["fuzzel", "--dmenu", "--fuzzy-min-length", "1", "-p", prompt, *additional_args],
input="\n".join(tones_emojis),
capture_output=True,
encoding="utf-8",
)
return fuzzel.returncode, fuzzel.stdout
def show_action_menu(self, additional_args: List[str]) -> List[Action]:
fuzzel = run(
[
"fuzzel",
"-dmenu",
*additional_args,
],
input="\n".join([it.value for it in Action if it != Action.MENU]),
capture_output=True,
encoding="utf-8",
)
return [Action(fuzzel.stdout.strip())]
class NoSelectorFoundException(Exception):
def __str__(self) -> str:
return "Could not find a valid way to show the selection. Please check the required dependencies." | /rofimoji-6.1.0-py3-none-any.whl/picker/selector/selector.py | 0.649023 | 0.275367 | selector.py | pypi |
# NLU: The Power of Spark NLP, the Simplicity of Python
John Snow Labs' NLU is a Python library for applying state-of-the-art text mining, directly on any dataframe, with a single line of code.
As a facade of the award-winning Spark NLP library, it comes with **1000+** of pretrained models in **100+** , all production-grade, scalable, and trainable and **everything in 1 line of code.**
## NLU in Action
See how easy it is to use any of the **thousands** of models in 1 line of code, there are hundreds of [tutorials](https://nlu.johnsnowlabs.com/docs/en/notebooks) and [simple examples](https://github.com/JohnSnowLabs/nlu/tree/master/examples) you can copy and paste into your projects to achieve State Of The Art easily.
<img src="http://ckl-it.de/wp-content/uploads/2020/08/My-Video6.gif" width="1800" height="500"/>
## NLU & Streamlit in Action
This 1 line let's you visualize and play with **1000+ SOTA NLU & NLP models** in **200** languages
for **Named Entitiy Recognition**, **Dependency Trees & Parts of Speech**, **Classification for 100+ problems**, **Text Summarization & Question Answering using T5** , **Translation with Marian**, **Text Similarity Matrix** using **BERT, ALBERT, ELMO, XLNET, ELECTRA** with other of the **100+ wordembeddings** and much more using [Streamlit](http://streamlit.com/) .
```shell
streamlit run https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/examples/streamlit/01_dashboard.py
```
<img src="https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/docs/assets/streamlit_docs_assets/gif/start.gif">
NLU provides tight and simple integration into Streamlit, which enables building powerful webapps in just 1 line of code which showcase the.
View the [NLU&Streamlit documentation](https://nlu.johnsnowlabs.com/docs/en/streamlit_viz_examples) or [NLU & Streamlit examples section](https://github.com/JohnSnowLabs/nlu/tree/master/examples/streamlit).
The entire GIF demo and
## All NLU ressources overview
Take a look at our official NLU page: [https://nlu.johnsnowlabs.com/](https://nlu.johnsnowlabs.com/) for user documentation and examples
| Ressource | Description|
|-----------------------------------------------------------------------|-------------------------------------------|
| [Install NLU](https://nlu.johnsnowlabs.com/docs/en/install) | Just run `pip install nlu pyspark==3.0.2`
| [The NLU Namespace](https://nlu.johnsnowlabs.com/docs/en/namespace) | Find all the names of models you can load with `nlu.load()`
| [The `nlu.load(<Model>)` function](https://nlu.johnsnowlabs.com/docs/en/load_api) | Load any of the **1000+ models in 1 line**
| [The `nlu.load(<Model>).predict(data)` function](https://nlu.johnsnowlabs.com/docs/en/predict_api) | Predict on `Strings`, `List of Strings`, `Numpy Arrays`, `Pandas`, `Modin` and `Spark Dataframes`
| [The `nlu.load(<train.Model>).fit(data)` function](https://nlu.johnsnowlabs.com/docs/en/training) | Train a text classifier for `2-Class`, `N-Classes` `Multi-N-Classes`, `Named-Entitiy-Recognition` or `Parts of Speech Tagging`
| [The `nlu.load(<Model>).viz(data)` function](https://nlu.johnsnowlabs.com/docs/en/viz_examples) | Visualize the results of `Word Embedding Similarity Matrix`, `Named Entity Recognizers`, `Dependency Trees & Parts of Speech`, `Entity Resolution`,`Entity Linking` or `Entity Status Assertion`
| [The `nlu.load(<Model>).viz_streamlit(data)` function](https://nlu.johnsnowlabs.com/docs/en/streamlit_viz_examples) | Display an interactive GUI which lets you explore and test every model and feature in NLU in 1 click.
| [General Concepts](https://nlu.johnsnowlabs.com/docs/en/concepts) | General concepts in NLU
| [The latest release notes](https://nlu.johnsnowlabs.com/docs/en/release_notes) | Newest features added to NLU
| [Overview NLU 1-liners examples](https://nlu.johnsnowlabs.com/docs/en/examples) | Most common used models and their results
| [Overview NLU 1-liners examples for healthcare models](https://nlu.johnsnowlabs.com/docs/en/examples_hc) | Most common used healthcare models and their results
| [Overview of all NLU tutorials and Examples](https://nlu.johnsnowlabs.com/docs/en/notebooks) | 100+ tutorials on how to use NLU on text datasets for various problems and from various sources like Twitter, Chinese News, Crypto News Headlines, Airline Traffic communication, Product review classifier training,
| [Connect with us on Slack](https://join.slack.com/t/spark-nlp/shared_invite/zt-lutct9gm-kuUazcyFKhuGY3_0AMkxqA) | Problems, questions or suggestions? We have a very active and helpful community of over 2000+ AI enthusiasts putting NLU, Spark NLP & Spark OCR to good use
| [Discussion Forum](https://github.com/JohnSnowLabs/spark-nlp/discussions) | More indepth discussion with the community? Post a thread in our discussion Forum
| [John Snow Labs Medium](https://medium.com/spark-nlp) | Articles and Tutorials on the NLU, Spark NLP and Spark OCR
| [John Snow Labs Youtube](https://www.youtube.com/channel/UCmFOjlpYEhxf_wJUDuz6xxQ/videos) | Videos and Tutorials on the NLU, Spark NLP and Spark OCR
| [NLU Website](https://nlu.johnsnowlabs.com/) | The official NLU website
|[Github Issues](https://github.com/JohnSnowLabs/nlu/issues) | Report a bug
## Getting Started with NLU
To get your hands on the power of NLU, you just need to install it via pip and ensure Java 8 is installed and properly configured. Checkout [Quickstart for more infos](https://nlu.johnsnowlabs.com/docs/en/install)
```bash
pip install nlu pyspark==3.0.2
```
## Loading and predict with any model in 1 line python
```python
import nlu
nlu.load('sentiment').predict('I love NLU! <3')
```
## Loading and predict with multiple models in 1 line
Get 6 different embeddings in 1 line and use them for downstream data science tasks!
```python
nlu.load('bert elmo albert xlnet glove use').predict('I love NLU! <3')
```
## What kind of models does NLU provide?
NLU provides everything a data scientist might want to wish for in one line of code!
- NLU provides everything a data scientist might want to wish for in one line of code!
- 1000 + pre-trained models
- 100+ of the latest NLP word embeddings ( BERT, ELMO, ALBERT, XLNET, GLOVE, BIOBERT, ELECTRA, COVIDBERT) and different variations of them
- 50+ of the latest NLP sentence embeddings ( BERT, ELECTRA, USE) and different variations of them
- 100+ Classifiers (NER, POS, Emotion, Sarcasm, Questions, Spam)
- 300+ Supported Languages
- Summarize Text and Answer Questions with T5
- Labeled and Unlabeled Dependency parsing
- Various Text Cleaning and Pre-Processing methods like Stemming, Lemmatizing, Normalizing, Filtering, Cleaning pipelines and more
## Classifiers trained on many different different datasets
Choose the right tool for the right task! Whether you analyze movies or twitter, NLU has the right model for you!
- trec6 classifier
- trec10 classifier
- spam classifier
- fake news classifier
- emotion classifier
- cyberbullying classifier
- sarcasm classifier
- sentiment classifier for movies
- IMDB Movie Sentiment classifier
- Twitter sentiment classifier
- NER pretrained on ONTO notes
- NER trainer on CONLL
- Language classifier for 20 languages on the wiki 20 lang dataset.
## Utilities for the Data Science NLU applications
Working with text data can sometimes be quite a dirty Job. NLU helps you keep your hands clean by providing lots of components that take away data engineering intensive tasks.
- Datetime Matcher
- Pattern Matcher
- Chunk Matcher
- Phrases Matcher
- Stopword Cleaners
- Pattern Cleaners
- Slang Cleaner
## Where can I see all models available in NLU?
For NLU models to load, see [the NLU Namespace](https://nlu.johnsnowlabs.com/docs/en/namespace) or the [John Snow Labs Modelshub](https://modelshub.johnsnowlabs.com/models) or go [straight to the source](https://github.com/JohnSnowLabs/nlu/blob/master/nlu/namespace.py).
## Supported Data Types
- Pandas DataFrame and Series
- Spark DataFrames
- Modin with Ray backend
- Modin with Dask backend
- Numpy arrays
- Strings and lists of strings
# NLU Tutorials : TODO TABLULATEEE
# NLU Demos on Datasets
- [Kaggle Twitter Airline Sentiment Analysis NLU demo](https://www.kaggle.com/kasimchristianloan/nlu-sentiment-airline-demo)
- [Kaggle Twitter Airline Emotion Analysis NLU demo](https://www.kaggle.com/kasimchristianloan/nlu-emotion-airline-demo)
- [Kaggle Twitter COVID Sentiment Analysis NLU demo](https://www.kaggle.com/kasimchristianloan/nlu-covid-sentiment-showcase)
- [Kaggle Twitter COVID Emotion Analysis nlu demo](https://www.kaggle.com/kasimchristianloan/nlu-covid-emotion-showcase)
# NLU component examples
Checkout the following notebooks for examples on how to work with NLU.
## NLU Training Examples
### Binary Class Text Classification training
- [2 class Finance News sentiment classifier training](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/binary_text_classification/NLU_training_sentiment_classifier_demo_apple_twitter.ipynb)
- [2 class Reddit comment sentiment classifier training](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/binary_text_classification/NLU_training_sentiment_classifier_demo_reddit.ipynb)
- [2 class Apple Tweets sentiment classifier training](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/binary_text_classification/NLU_training_sentiment_classifier_demo_IMDB.ipynb)
- [2 class IMDB Movie sentiment classifier training](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/binary_text_classification/NLU_training_sentiment_classifier_demo_IMDB.ipynb)
- [2 class twitter classifier training](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/binary_text_classification/NLU_training_sentiment_classifier_demo_twitter.ipynb)
### Multi Class Text Classification training
- [5 class WineEnthusiast Wine review classifier training](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_wine.ipynb)
- [3 class Amazon Phone review classifier training](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_amazon.ipynb)
- [5 class Amazon Musical Instruments review classifier training](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_musical_instruments.ipynb)
- [5 class Tripadvisor Hotel review classifier training](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_hotel_reviews.ipynb)
- [5 class Phone review classifier training](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_hotel_reviews.ipynb)
### Multi Label Text Classification training
- [ Train Multi Label Classifier on E2E dataset Demo](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_label_text_classification/NLU_traing_multi_label_classifier_E2e.ipynb)
- [Train Multi Label Classifier on Stack Overflow Question Tags dataset Demo](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_label_text_classification/NLU_training_multi_token_label_text_classifier_stackoverflow_tags.ipynb)
### Named Entity Recognition training (NER)
- [NER Training example](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/named_entity_recognition/NLU_training_NER_demo.ipynb)
### Part of Speech tagger training (POS)
- [POS Training example](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/Training/part_of_speech/NLU_training_POS_demo.ipynb)
## NLU Applications Examples
- [Sentence Similarity with Multiple Sentence Embeddings](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/sentence_embeddings/sentence_similarirty_stack_overflow_questions.ipynb)
- [6 Wordembeddings in 1 line with T-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/word_embeddings/NLU_multiple_word_embeddings_and_t-SNE_visualization_example.ipynb)
## NLU Demos on Datasets
- [Kaggle Twitter Airline Sentiment Analysis NLU demo](https://www.kaggle.com/kasimchristianloan/nlu-sentiment-airline-demo)
- [Kaggle Twitter Airline Emotion Analysis NLU demo](https://www.kaggle.com/kasimchristianloan/nlu-emotion-airline-demo)
- [Kaggle Twitter COVID Sentiment Analysis NLU demo](https://www.kaggle.com/kasimchristianloan/nlu-covid-sentiment-showcase)
- [Kaggle Twitter COVID Emotion Analysis nlu demo](https://www.kaggle.com/kasimchristianloan/nlu-covid-emotion-showcase)
## NLU examples grouped by component
The following are Collab examples which showcase each NLU component and some applications.
### Named Entity Recognition (NER)
- [NER pretrained on ONTO Notes](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/named_entity_recognition_(NER)/NLU_ner_ONTO_18class_example.ipynb)
- [NER pretrained on CONLL](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/named_entity_recognition_(NER)/NLU_ner_CONLL_2003_5class_example.ipynb)
- [Tokenize, extract POS and NER in Chinese](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/multilingual/chinese_ner_pos_and_tokenization.ipynb)
- [Tokenize, extract POS and NER in Korean](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/multilingual/korean_ner_pos_and_tokenization.ipynb)
- [Tokenize, extract POS and NER in Japanese](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/multilingual/japanese_ner_pos_and_tokenization.ipynb)
- [Aspect based sentiment NER sentiment for restaurants](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/named_entity_recognition_(NER)/aspect_based_ner_sentiment_restaurants.ipynb)
### Part of speech (POS)
- [POS pretrained on ANC dataset](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/part_of_speech(POS)/NLU_part_of_speech_ANC_example.ipynb)
- [Tokenize, extract POS and NER in Chinese](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/multilingual/chinese_ner_pos_and_tokenization.ipynb)
- [Tokenize, extract POS and NER in Korean](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/multilingual/korean_ner_pos_and_tokenization.ipynb)
- [Tokenize, extract POS and NER in Japanese](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/multilingual/japanese_ner_pos_and_tokenization.ipynb)
### Sequence2Sequence
- [Translate between 192+ languages with marian](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/sequence2sequence/translation_demo.ipynb)
- [Try out the 18 Tasks like Summarization Question Answering and more on T5](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/sequence2sequence/T5_tasks_summarize_question_answering_and_more)
- [T5 Open and Closed Book question answering tutorial](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/sequence2sequence/T5_question_answering.ipynb)
### Classifiers
- [Unsupervised Keyword Extraction with YAKE](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/classifiers/unsupervised_keyword_extraction_with_YAKE.ipynb)
- [Toxic Text Classifier](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/classifiers/toxic_classification.ipynb)
- [Twitter Sentiment Classifier](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/classifiers/sentiment_classification.ipynb)
- [Movie Review Sentiment Classifier](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/classifiers/sentiment_classification_movies.ipynb)
- [Sarcasm Classifier](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/classifiers/sarcasm_classification.ipynb)
- [50 Class Questions Classifier](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/classifiers/question_classification.ipynb)
- [300 Class Languages Classifier](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/classifiers/NLU_language_classification.ipynb)
- [Fake News Classifier](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/classifiers/fake_news_classification.ipynb)
- [E2E Classifier](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/classifiers/E2E_classification.ipynb)
- [Cyberbullying Classifier](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/classifiers/cyberbullying_cassification_for_racism_and_sexism.ipynb)
- [Spam Classifier](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/classifiers/spam_classification.ipynb)
- [Emotion Classifier](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/classifiers/emotion_classification.ipynb)
### Word Embeddings
- [BERT, ALBERT, ELMO, ELECTRA, XLNET, GLOVE at once with t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/word_embeddings/NLU_multiple_word_embeddings_and_t-SNE_visualization_example.ipynb)
- [BERT Word Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/word_embeddings/NLU_BERT_word_embeddings_and_t-SNE_visualization_example.ipynb)
- [ALBERT Word Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/word_embeddings/NLU_ALBERT_word_embeddings_and_t-SNE_visualization_example.ipynb)
- [ELMO Word Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/word_embeddings/NLU_ELMo_word_embeddings_and_t-SNE_visualization_example.ipynb)
- [XLNET Word Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/word_embeddings/NLU_XLNET_word_embeddings_and_t-SNE_visualization_example.ipynb)
- [ELECTRA Word Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/word_embeddings/NLU_ELECTRA_word_embeddings_and_t-SNE_visualization_example.ipynb)
- [COVIDBERT Word Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/word_embeddings/NLU_COVIDBERT_word_embeddings_and_t-SNE_visualization_example.ipynb)
- [BIOBERT Word Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/word_embeddings/NLU_BIOBERT_word_embeddings_and_t-SNE_visualization_example.ipynb)
- [GLOVE Word Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/word_embeddings/NLU_GLOVE_word_embeddings_and_t-SNE_visualization_example.ipynb)
### Sentence Embeddings
- [BERT Sentence Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/sentence_embeddings/NLU_BERT_sentence_embeddings_and_t-SNE_visualization_Example.ipynb)
- [ELECTRA Sentence Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/sentence_embeddings/NLU_ELECTRA_sentence_embeddings_and_t-SNE_visualization_example.ipynb)
- [USE Sentence Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/sentence_embeddings/NLU_USE_sentence_embeddings_and_t-SNE_visualization_example.ipynb)
### Sentence Embeddings
- [BERT Sentence Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/sentence_embeddings/NLU_BERT_sentence_embeddings_and_t-SNE_visualization_Example.ipynb)
- [ELECTRA Sentence Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/sentence_embeddings/NLU_ELECTRA_sentence_embeddings_and_t-SNE_visualization_example.ipynb)
- [USE Sentence Embeddings and t-SNE plotting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/sentence_embeddings/NLU_USE_sentence_embeddings_and_t-SNE_visualization_example.ipynb)
### Dependency Parsing
- [Untyped Dependency Parsing](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/dependency_parsing/NLU_untyped_dependency_parsing_example.ipynb)
- [Typed Dependency Parsing](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/dependency_parsing/NLU_typed_dependency_parsing_example.ipynb)
### Text Pre Processing and Cleaning
- [Tokenization](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/text_pre_processing_and_cleaning/NLU_tokenization_example.ipynb)
- [Stopwords removal](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/text_pre_processing_and_cleaning/NLU_stopwords_removal_example.ipynb)
- [Stemming](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/text_pre_processing_and_cleaning/NLU_stemmer_example.ipynb)
- [Lemmatization](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/text_pre_processing_and_cleaning/NLU_lemmatization.ipynb)
- [Normalizing](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/text_pre_processing_and_cleaning/NLU_normalizer_example.ipynb)
- [Spell checking](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/text_pre_processing_and_cleaning/NLU_spellchecking_example.ipynb)
- [Sentence Detecting](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/text_pre_processing_and_cleaning/NLU_sentence_detection_example.ipynb)
- [Normalize documents](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/text_pre_processing_and_cleaning/document_normalizer_demo.ipynb)
### Chunkers
- [N Gram](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/chunkers/NLU_n-gram.ipynb)
- [Entity Chunking](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/chunkers/NLU_chunking_example.ipynb)
### Matchers
- [Date Matcher](https://github.com/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/matchers/NLU_date_matching.ipynb)
# Need help?
- [Ping us on Slack](https://spark-nlp.slack.com/archives/C0196BQCDPY)
- [Post an issue on Github](https://github.com/JohnSnowLabs/nlu/issues)
# Simple NLU Demos
- [NLU different output levels Demo](https://colab.research.google.com/drive/1C4N3wpC17YzZf9fXHDNAJ5JvSmfbq7zT?usp=sharing)
# Features in NLU Overview
* Tokenization
* Trainable Word Segmentation
* Stop Words Removal
* Token Normalizer
* Document Normalizer
* Stemmer
* Lemmatizer
* NGrams
* Regex Matching
* Text Matching,
* Chunking
* Date Matcher
* Sentence Detector
* Deep Sentence Detector (Deep learning)
* Dependency parsing (Labeled/unlabeled)
* Part-of-speech tagging
* Sentiment Detection (ML models)
* Spell Checker (ML and DL models)
* Word Embeddings (GloVe and Word2Vec)
* BERT Embeddings (TF Hub models)
* ELMO Embeddings (TF Hub models)
* ALBERT Embeddings (TF Hub models)
* XLNet Embeddings
* Universal Sentence Encoder (TF Hub models)
* BERT Sentence Embeddings (42 TF Hub models)
* Sentence Embeddings
* Chunk Embeddings
* Unsupervised keywords extraction
* Language Detection & Identification (up to 375 languages)
* Multi-class Sentiment analysis (Deep learning)
* Multi-label Sentiment analysis (Deep learning)
* Multi-class Text Classification (Deep learning)
* Neural Machine Translation
* Text-To-Text Transfer Transformer (Google T5)
* Named entity recognition (Deep learning)
* Easy TensorFlow integration
* GPU Support
* Full integration with Spark ML functions
* 1000 pre-trained models in +200 languages!
* Multi-lingual NER models: Arabic, Chinese, Danish, Dutch, English, Finnish, French, German, Hewbrew, Italian, Japanese, Korean, Norwegian, Persian, Polish, Portuguese, Russian, Spanish, Swedish, Urdu and more
* Natural Language inference
* Coreference resolution
* Sentence Completion
* Word sense disambiguation
* Clinical entity recognition
* Clinical Entity Linking
* Entity normalization
* Assertion Status Detection
* De-identification
* Relation Extraction
* Clinical Entity Resolution
## Citation
We have published a [paper](https://www.sciencedirect.com/science/article/pii/S2665963821000063) that you can cite for the NLU library:
```bibtex
@article{KOCAMAN2021100058,
title = {Spark NLP: Natural language understanding at scale},
journal = {Software Impacts},
pages = {100058},
year = {2021},
issn = {2665-9638},
doi = {https://doi.org/10.1016/j.simpa.2021.100058},
url = {https://www.sciencedirect.com/science/article/pii/S2665963821000063},
author = {Veysel Kocaman and David Talby},
keywords = {Spark, Natural language processing, Deep learning, Tensorflow, Cluster},
abstract = {Spark NLP is a Natural Language Processing (NLP) library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines that can scale easily in a distributed environment. Spark NLP comes with 1100+ pretrained pipelines and models in more than 192+ languages. It supports nearly all the NLP tasks and modules that can be used seamlessly in a cluster. Downloaded more than 2.7 million times and experiencing 9x growth since January 2020, Spark NLP is used by 54% of healthcare organizations as the world’s most widely used NLP library in the enterprise.}
}
}
```
| /roflma-3.4.0.tar.gz/roflma-3.4.0/README.md | 0.654453 | 0.991961 | README.md | pypi |
from nlu import *
from dataclasses import dataclass
import glob
import os
import json
import sys
import logging
COMPONENT_INFO_FILE_NAME = 'component_infos.json'
logger = logging.getLogger('nlu')
class AllComponentsInfo:
def __init__(self):
''' Initialize every NLU component info object and provide access to them'''
self.all_components = {}
self.classifiers = {}
self.embeddings = {}
self.normalizers = {}
self.pretrained_pipelines = {}
self.selectors = {}
self.spell_checkers = {}
self.stemmers = {}
self.tokenizers = {}
self.utils = {}
self.all_multi_lang_base_ner_languages = ['en', 'fr', 'de', 'it', 'pl', 'pt', 'ru', 'es']
self.all_multi_lang_xtreme_ner_languages = ['af', 'ar', 'bg', 'bn', 'de', 'el', 'en', 'es', 'et', 'eu', 'fa',
'fi', 'fr', 'he', 'hi', 'hu', 'id', 'it', 'ja', 'jv', 'ka', 'kk',
'ko', 'ml', 'mr', 'ms', 'my', 'nl', 'pt', 'ru', 'sw', 'ta', 'te',
'th', 'tl', 'tr', 'ur', 'vi', 'yo', 'zh']
self.all_right_to_left_langs_with_pretrained_tokenizer = ['zh', 'ko', 'ja']
self.all_pretrained_pipe_languages = ['en', 'nl', 'fr', 'de', 'it', 'no', 'pl', 'pt', 'ru', 'es', 'xx', ]
self.all_pretrained_model_languages = ['vi', 'mt', 'ta', 'af', 'cy', 'et', 'bh', 'am', 'da', 'fr', 'de', 'it',
'nb', 'no', 'nn', 'pl', 'pt', 'ru', 'es', 'af', 'ar', 'hy', 'eu', 'bn',
'br', 'bg', 'ca', 'cs', 'eo', 'fi', 'gl', 'el', 'ha', 'he', 'hi', 'hu',
'id', 'ga', 'ja', 'la', 'lv', 'mr', 'fa', 'ro', 'sk', 'sl', 'so', 'st',
'sw', 'sv', 'th', 'tr', 'uk', 'yo', 'zu', 'zh', 'xx', 'ur', 'ko',
'yi','uk', 'te','ta','sd','pa','ne', 'ml','mr','kn','id','gu','bs',
'ig', 'lg', 'lou', 'pcm', 'wo','rw',
] + self.all_multi_lang_xtreme_ner_languages
self.all_languages = set(self.all_pretrained_pipe_languages).union(set(self.all_pretrained_model_languages))
self.all_classifier_classes = []
# this maps a requested token to a class
self.all_nlu_actions = ['tokenize', 'pos', 'ner', 'embed', 'classify', 'sentiment', 'emotion', 'spell',
'dependency', 'dep', 'dep.untyped', 'match', 'sentence_detector', 'spell', 'stopwords'
'labled_dependency',
'lemma', 'norm', 'select', 'pretrained_pipe', 'util', 'embed_sentence', 'embed_chunk',
'ngram']
all_component_paths_regex = nlu.nlu_package_location + 'components/*/*/'
all_component_paths = glob.glob(all_component_paths_regex)
for path in all_component_paths:
if '__py' in path: continue
# logger.info('Loading info dict @ path'+ path)
component = ComponentInfo.from_directory(path)
self.all_components[component.name] = component
if component.type == 'classifier': self.classifiers[component.name] = component
if component.type == 'embedding': self.embeddings[component.name] = component
if component.type == 'normalizer': self.normalizers[component.name] = component
if component.type == 'pretrained_pipeline': self.pretrained_pipelines[component.name] = component
if component.type == 'selector': self.selectors[component.name] = component
if component.type == 'spell_checker': self.spell_checkers[component.name] = component
if component.type == 'stemmer': self.stemmers[component.name] = component
if component.type == 'tokenizer': self.tokenizers[component.name] = component
if component.type == 'util': self.utils[component.name] = component
def list_all_components(self):
print("--------------Avaiable Components in NLU :--------------")
for name in self.all_components.keys(): print(name)
def DEBUG_list_all_components(self):
print("--------------Avaiable Components in NLU :--------------")
for name in self.all_components.keys():
print(name, " INPUT_F : ", self.all_components[name].inputs, " OUTPUT_F ", self.all_components[name].inputs,
" INPUT_N ", self.all_components[name].spark_output_column_names, "OUTPUT_N ",
self.all_components[name].spark_output_column_names)
def get_component_info_by_name(self, name):
return self.all_components[name]
def list_all_components_of_type(self, component_type='embeddings'):
pass
@staticmethod
def list_all_components_of_language(component_lang='ger'):
pass
@staticmethod
def list_all_components_of_languageand_type(component_lang='ger', component_type='embeddings'):
pass
@staticmethod
def get_default_component_of_type():
pass
@staticmethod
def list_avaiable_output_types():
pass
@staticmethod
def get_all_component_info_obj():
pass
@dataclass
class ComponentInfo:
name: str
description: str # general annotator/model/component/pipeline info
outputs: list # this is which columns/output types this component is providing
inputs: list # this tells us which columns/input types the component is depending on
type: str # this tells us which kind of component this is
output_level: str # document, sentence, token, chunk, input_dependent or model_dependent
spark_input_column_names: list # default expected name for input columns when forking with spark nlp annotators on spark DFs
spark_output_column_names: list # default expected name for output columns when forking with spark nlp annotators on spark DFs
provider: str # Who provides the implementation of this annotator, Spark-NLP for base. Would be
license: str # open source or private
computation_context: str # Will this component do its computation in Spark land (like all of Spark NLP annotators do) or does it require some other computation engine or library like Tensorflow, Numpy, HuggingFace, etc..
output_context: str # Will this components final result
trainable: bool
@classmethod
def from_directory(cls, component_info_dir):
"""Create ComponentInfo class from the component_infos.json which is provided for every component
@param component_info_dir:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
"""
if not component_info_dir:
raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.")
component_info_dir = component_info_dir.replace('//', '/')
with open(os.path.join(component_info_dir, COMPONENT_INFO_FILE_NAME), "r") as f:
dataset_info_dict = json.load(f)
try:
return cls(**dataset_info_dict) # dataset_info_dict
except:
print(" Exception Occured! For Path", component_info_dir,
" Json file most likely has missing features. Todo nicer output error info", sys.exc_info()[0])
raise | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/info.py | 0.422266 | 0.17614 | info.py | pypi |
from nlu.universe.atoms import LicenseType, NlpLevel
class NLP_LEVELS(NlpLevel):
""":cvar
XXX_SUPER is a N to M Mapping, with M <= N
XXX_SUB is a N to M mapping, with M >=N
no prefix implies a N to N mapping to be expected
"""
DOCUMENT = NlpLevel('document')
CHUNK = NlpLevel('chunk')
NER_CHUNK = NlpLevel("ner_chunk")
KEYWORD_CHUNK = NlpLevel('KEYWORD_CHUNK')
POS_CHUNK = NlpLevel('POS_CHUNK')
NGRAM_CHUNK = NlpLevel('NGRAM_CHUNK')
SUB_CHUNK = NlpLevel('sub_chunk')
SUPER_CHUNK = NlpLevel('super_chunk')
SENTENCE = NlpLevel('sentence')
RELATION = NlpLevel('relation')
TOKEN = NlpLevel('token')
SUB_TOKEN = NlpLevel('sub_token')
SUPER_TOKEN = NlpLevel('super_token')
INPUT_DEPENDENT_DOCUMENT_CLASSIFIER = NlpLevel('INPUT_DEPENDENT_DOCUMENT_CLASSIFIER')
INPUT_DEPENDENT_DOCUMENT_EMBEDDING = NlpLevel('INPUT_DEPENDENT_DOCUMENT_EMBEDDING')
# Same output level as the anno that generates the chunks
INPUT_DEPENDENT_CHUNK_EMBEDDING = NlpLevel('INPUT_DEPENDENT_CHUNK_EMBEDDING')
class OCR_OUTPUT_LEVELS:
# PAGES ARE LIKE TOKENS!! Book is full document!
PAGES = 'pages' # Generate 1 output per PAGE in each input document. I.e if 2 PDFs input with 5 pages each, gens 10 rows. 1 to many mapping
FILE = 'file' # Generate 1 output per document, I.e. 2 PDFS with 5 pages each gen 2 Row, 1 to one mapping
OBJECT = 'object' # Generate 1 output row per detected Object in Input document. I.e. if 2 PDFS with 5 Cats each, generates 10 rows. ---> REGION or Not?
CHARACTER = 'character' # Generate 1 oputput row per OCR'd character, I.e. 2 PDFS with 100 Chars each, gens 100 Rows.
TABLE = 'table' # 1 Pandas DF per Table.
class AnnoTypes:
# DOCUMENT_XX can be sbustituted for SENTENCE
TOKENIZER = 'tokenizer'
TOKEN_CLASSIFIER = 'token_classifier'
TRANSFORMER_TOKEN_CLASSIFIER = 'transformer_token_classifier' # Can be token level but also NER level
TRANSFORMER_SEQUENCE_CLASSIFIER = 'transformer_sequence_classifier' # Can be token level but also NER level
CHUNK_CLASSIFIER = 'chunk_classifier' # ASSERTION/ NER GENERATES/CONTEXT_PARSER THESE but DOES NOT TAKE THEM IN!!! Split into NER-CHUNK Classifier, etc..?
DOCUMENT_CLASSIFIER = 'document_classifier'
RELATION_CLASSIFIER = 'relation_classifier' # Pairs of chunks
TOKEN_EMBEDDING = 'token_embedding'
CHUNK_EMBEDDING = 'chunk_embedding'
DOCUMENT_EMBEDDING = 'document_embedding'
SENTENCE_DETECTOR = 'sentence_detector'
SENTENCE_EMBEDDING = 'sentence_embedding'
SPELL_CHECKER = 'spell_checker'
HELPER_ANNO = 'helper_anno'
TEXT_NORMALIZER = 'text_normalizer'
TOKEN_NORMALIZER = 'token_normalizer'
# TODO chunk sub-classes? I.e. POS-CHUNKS, NER-CHUNKS, KEYWORD-CHUNKS, RESOLUTION-CHUNKS, etc??
pos_regex_chunker = 'token_normalizer'
CHUNK_FILTERER = 'chunk_filterer'
TEXT_RECOGNIZER = 'text_recognizer' | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/universe/logic_universes.py | 0.462959 | 0.343617 | logic_universes.py | pypi |
from nlu.pipe.extractors.extractor_base_data_classes import SparkNLPExtractor, SparkNLPExtractorConfig
from nlu.pipe.extractors.extractor_methods.helper_extractor_methods import *
from nlu.pipe.extractors.extractor_methods.base_extractor_methods import *
"""
This file contains methods to get pre-defined configurations for every annotator.
Extractor_resolver.py should be used to resolve SparkNLP Annotator classes to methods
in this file, which return the corrosponding configs that need to be passed to
the master_extractor() call.
This file is where all the in extractor_base_data_classes.py Dataclasses are combined with the
extractors defined in extractor_methods.py.
"""
def default_get_nothing(output_col_prefix):
return SparkNLPExtractorConfig(
output_col_prefix=output_col_prefix,
name='nothing_extractor',
description='Extracts nothing. Useful for annotators with irrelevant data'
)
def default_only_result_config(output_col_prefix):
return SparkNLPExtractorConfig(
output_col_prefix=output_col_prefix,
get_result=True,
name='Default result extractor',
description='Just gets the result field'
)
def default_full_config(output_col_prefix='DEFAULT'):
return SparkNLPExtractorConfig(
output_col_prefix=output_col_prefix,
get_positions=True,
get_begin=True,
get_end=True,
get_embeds=True,
get_result=True,
get_meta=True,
get_full_meta=True,
get_annotator_type=True,
name='default_full',
description='Default full configuration, keeps all data and gets all metadata fields',
)
def default_NER_converter_licensed_config(output_col_prefix='entities'):
"""Extracts NER tokens withouth positions, just the converted IOB tags,confidences and classified tokens """
return SparkNLPExtractorConfig(
output_col_prefix=output_col_prefix,
get_result=True,
get_meta=True,
meta_white_list=['entity', 'confidence'], # sentence, chunk
name='default_ner',
description='Converts IOB-NER representation into entity representation and generates confidences for the entire entity chunk',
)
def default_chunk_resolution_config(output_col_prefix='resolved_entities'):
"""Extracts NER tokens withouth positions, just the IOB tags,confidences and classified tokens """
return SparkNLPExtractorConfig(
output_col_prefix=output_col_prefix,
get_result=True,
get_meta=True,
meta_white_list=['confidence', 'resolved_text'], # sentence, chunk
name='default_ner',
description='Converts IOB-NER representation into entity representation and generates confidences for the entire entity chunk',
)
def default_relation_extraction_positional_config(output_col_prefix='extracted_relations'):
"""Extracts NER tokens withouth positions, just the IOB tags,confidences and classified tokens """
return SparkNLPExtractorConfig(
output_col_prefix=output_col_prefix,
get_result=True,
meta_white_list=[],
get_meta=True,
meta_black_list=['entity1_begin', 'entity2_begin', 'entity1_end', 'entity2_end', ],
name='default_relation_extraction',
description='Get relation extraction result and all metadata, positions of entities excluded',
)
def default_relation_extraction_config(output_col_prefix='extracted_relations'):
"""Extracts NER tokens withouth positions, just the IOB tags,confidences and classified tokens """
return SparkNLPExtractorConfig(
output_col_prefix=output_col_prefix,
get_result=True,
meta_white_list=[],
get_meta=True,
meta_black_list=['entity1_begin', 'entity2_begin', 'entity1_end', 'entity2_end', ],
name='default_relation_extraction',
description='Get relation extraction result and all metadata, positions of entities excluded',
)
def default_de_identification_config(output_col_prefix='de_identified'):
"""Extracts NER tokens withouth positions, just the IOB tags,confidences and classified tokens """
return SparkNLPExtractorConfig(
output_col_prefix=output_col_prefix,
get_result=True,
name='positional_relation_extraction',
description='Get relation extraction result and all metadata, which will include positions of entities chunks',
)
def default_assertion_config(output_col_prefix='assertion'):
"""Extracts NER tokens withouth positions, just the IOB tags,confidences and classified tokens """
return SparkNLPExtractorConfig(
output_col_prefix=output_col_prefix,
get_result=True,
name='default_assertion_extraction',
get_meta=True,
meta_white_list=['confidence'],
description='Gets the assertion result and confidence',
)
def default_ner_config(output_col_prefix='med_ner'):
return default_only_result_config(output_col_prefix)
def default_ner_config(output_col_prefix='med_ner'):
return default_get_nothing(output_col_prefix)
def default_feature_assembler_config(output_col_prefix='feature_assembler'):
return SparkNLPExtractorConfig(
output_col_prefix=output_col_prefix,
get_result=False,
name='features_assembled',
get_meta=False,
description='Gets nothing',
)
def default_generic_classifier_config(output_col_prefix='generic_classifier'):
return SparkNLPExtractorConfig(
output_col_prefix=output_col_prefix,
get_result=True,
name='generic_classifier',
get_meta=True,
meta_white_list=['confidence'],
description='Gets the result and confidence',
) | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/extractors/extractor_configs_HC.py | 0.718989 | 0.255802 | extractor_configs_HC.py | pypi |
from nlu.pipe.extractors.extractor_base_data_classes import SparkNLPExtractor,SparkNLPExtractorConfig
from nlu.pipe.extractors.extractor_methods.helper_extractor_methods import *
"""
This file contains methods to get pre-defined configurations for every annotator.
Extractor_resolver.py should be used to resolve SparkNLP Annotator classes to methods
in this file, which return the corrosponding configs that need to be passed to
the master_extractor() call.
This file is where all the in extractor_base_data_classes.py Dataclasses are combined with the
extractors defined in extractor_methods.py.
"""
def default_full_config(output_col_prefix='DEFAULT'):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_positions = True,
get_begin = True,
get_end = True,
get_embeds = True,
get_result = True,
get_meta = True,
get_full_meta = True,
get_annotator_type = True,
name = 'default_full',
description = 'Default full configuration, keeps all data and gets all metadata fields',
)
def default_document_config(output_col_prefix='document'):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
pop_result_list = True,
get_result = True,
)
def default_NER_config(output_col_prefix='NER'):
"""Extracts NER tokens withouth positions, just the IOB tags,confidences and classified tokens """
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
# get_result = True,
# get_meta = True,
meta_white_list = ['confidence'],
name = 'default_ner',
description = 'NER with IOB tags and confidences for them',
)
def meta_NER_config(output_col_prefix='NER'):
"""Extracts NER tokens withouth positions, just the IOB tags,confidences and classified tokens """
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = True,
get_meta = True,
meta_white_list = ['confidence'],
name = 'default_ner',
description = 'NER with IOB tags and confidences for them',
)
def default_language_classifier_config(output_col_prefix='language'):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = True,
get_meta = True,
get_full_meta = True,
pop_result_list = True,
name = 'Only keep maximum language confidence',
description = 'Instead of returning the confidence for every language the Classifier was traiend on, only the maximum confidence will be returned',
meta_data_extractor = SparkNLPExtractor(meta_extract_language_classifier_max_confidence,
'Extract the maximum confidence from all classified languages and drop the others. TODO top k results',
'Keep only top language confidence')
)
def default_only_result_config(output_col_prefix):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = True,
name = 'Default result extractor',
description = 'Just gets the result field'
)
def default_only_embedding_config(output_col_prefix):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_embeds = True,
name = 'Default Embed extractor',
description = 'Just get the Embed field'
)
def default_only_result_and_positions_config(output_col_prefix):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = True,
get_positions = True,
name = 'Positional result only default',
description = 'Get the result field and the positions'
)
def default_sentiment_dl_config(output_col_prefix='sentiment_dl'):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = True,
get_full_meta = True,
name = 'Only keep maximum sentiment confidence ',
description = 'Instead of r eturning the confidence for Postive and Negative, only the confidence of the more likely class will be returned in the confidence column',
meta_data_extractor = SparkNLPExtractor(extract_maximum_confidence,
'Instead of returining positive/negative confidence, only the maximum confidence will be returned withouth sentence number reference.',
'Maximum binary confidence')
)
def default_lang_classifier_config(output_col_prefix='sentiment_dl'):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = True,
get_full_meta = True,
pop_meta_list = True,
pop_result_list = True,
name = 'default_lang_classifier_config',
description = 'Instead of returning the confidence for every language, just returns the confidence of the most likely language',
meta_data_extractor = SparkNLPExtractor(extract_maximum_confidence,
'Instead of returining positive/negative confidence, only the maximum confidence will be returned withouth sentence number reference.',
'Maximum binary confidence')
)
def default_sentiment_config(output_col_prefix='sentiment'):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = True,
get_full_meta = True,
# pop_result_list = True,
name = 'Only keep maximum sentiment confidence ',
description = 'Instead of returning the confidence for Postive and Negative, only the confidence of the more likely class will be returned in the confidence column',
meta_data_extractor = SparkNLPExtractor(extract_maximum_confidence,
'Instead of returining positive/negative confidence, only the maximum confidence will be returned withouth sentence number reference.',
'Maximum binary confidence')
)
def default_sentiment_vivk_config(output_col_prefix='vivk_sentiment'):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = True,
get_full_meta = True,
# pop_result_list = True,
name = 'Default sentiment vivk',
description = 'Get prediction confidence and the resulting label'
)
def default_multi_classifier_dl_config(output_col_prefix='classifier_dl'):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = True,
get_full_meta = True,
name = 'default_classifier_dl',
description = 'Get all predicted confidences and labels',
pop_never = True,
meta_data_extractor = SparkNLPExtractor(extract_maximum_confidence,
'Instead returning confidence for each class, only return max confidence',
'Max confidence')
)
def default_classifier_dl_config(output_col_prefix='classifier_dl'):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = True,
get_full_meta = True,
name = 'default_classifier_dl',
description = 'Get all predicted confidences and labels',
meta_data_extractor = SparkNLPExtractor(extract_maximum_confidence,
'Instead returning confidence for each class, only return max confidence',
'Max confidence')
)
def default_tokenizer_config(output_col_prefix='token'):
return default_only_result_config(output_col_prefix)
def default_POS_config(output_col_prefix='POS_tag'):
return default_only_result_config(output_col_prefix)
def default_sentence_detector_DL_config(output_col_prefix='sentence'):
return default_only_result_config(output_col_prefix)
def default_chunker_config(output_col_prefix='matched_chunk'):
return default_only_result_config(output_col_prefix)
def default_T5_config(output_col_prefix='T5'):
return default_only_result_config(output_col_prefix)
def default_marian_config(output_col_prefix='translated'):
return default_only_result_config(output_col_prefix)
def default_gpt2_config(output_col_prefix='generated'):
return default_only_result_config(output_col_prefix)
# EMBEDS
def default_sentence_embedding_config(output_col_prefix='sentence_embedding'):
return default_only_embedding_config(output_col_prefix)
def default_chunk_embedding_config(output_col_prefix='chunk_embedding'):
return default_only_embedding_config(output_col_prefix)
def default_word_embedding_config(output_col_prefix='word_embedding'):
return default_only_embedding_config(output_col_prefix)
def default_token_classifier_config(output_col_prefix='token_classifier'):
return default_only_result_config(output_col_prefix)
# TOKEN CLEANERS
def default_stopwords_config(output_col_prefix='stopwords_removed'):
return default_only_result_config(output_col_prefix)
def default_lemma_config(output_col_prefix='lemma'):
return default_only_result_config(output_col_prefix)
def default_stemm_config(output_col_prefix='stemm'):
return default_only_result_config(output_col_prefix)
def default_norm_config(output_col_prefix='norm'):
return default_only_result_config(output_col_prefix)
def default_norm_document_config(output_col_prefix='norm_document'):
return default_only_result_config(output_col_prefix)
def default_sentence_detector_config(output_col_prefix='sentence'):
return default_only_result_config(output_col_prefix)
def default_dep_untyped_config(output_col_prefix='dependency_untyped'):
return default_only_result_config(output_col_prefix)
def default_dep_typed_config(output_col_prefix='dependency_typed'):
return default_only_result_config(output_col_prefix)
def default_spell_norvig_config(output_col_prefix='spell_checked'):
return default_only_result_config(output_col_prefix)
def default_spell_context_config(output_col_prefix='spell_checked'):
return default_only_result_config(output_col_prefix)
def default_spell_symmetric_config(output_col_prefix='spell_checked'):
return default_only_result_config(output_col_prefix)
def default_ngram_config(output_col_prefix='ngram'):
return default_only_result_config(output_col_prefix)
def default_word_segmenter_config(output_col_prefix='words_segmented'):
return default_only_result_config(output_col_prefix)
def default_chunk_config(output_col_prefix='matched_chunk'):
return default_only_result_config(output_col_prefix)
def default_yake_config(output_col_prefix='keywords'):
"""Extracts YAKE keywords with confidences """
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = True,
name = 'default_yake',
get_meta = True,
meta_white_list = ['score'],
description = 'Get all keywords and their confidences',
pop_never = True
)
def default_ner_converter_config(output_col_prefix='ner_chunk'):
"""Extracts the Entity Labels, which are derived from the IOB Tags """
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = True,
name = 'default_ner',
get_meta = True,
meta_white_list = ['entity','confidence'],
description = 'Converts IOB-NER representation into entity representation and generates confidences for the entire entity chunk',
)
def default_doc2chunk_config(output_col_prefix='doc2chunk'):
return SparkNLPExtractorConfig(
output_col_prefix = output_col_prefix,
get_result = False,
name = 'doc2chunk',
get_meta = False,
description = 'Converts Doc type col to chunk aka entity type',
) | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/extractors/extractor_configs_OS.py | 0.561215 | 0.173919 | extractor_configs_OS.py | pypi |
import numpy as np
def meta_extract_language_classifier_max_confidence(row,configs):
''' Extract the language classificationw ith highest confidence and drop the others '''
# # todo Get the best, but what about TOP K! conditional sentence extraction and mroe docs
#unpack all confidences to float and set 'sentence' key value to -1 so it does not affect finding the highest cnfidence
unpack_dict_values = lambda x : -1 if 'sentence' in x[0] else float(x[1][0])
l = list(map(unpack_dict_values,row.items()))
m = np.argmax(l)
k = list(row.keys())[m]
return {k+'_confidence' : row[k][0]} # remoe [0] for list return
def zipp(l): return zip(*l) # unpack during list comprehension not supported in Python, need this workaround for now
def extract_maximum_confidence(row, configs):
''' Extract the maximum confidence from any classifier with N classes.
A classifier with N classes, has N confidences in it's metadata by default, which is too much data usually.
This extractor gets the highest confidence from the array of confidences.
This method assumes all keys in metadata corrospond to confidences, except the `sentence` key, which maps to a sentence ID
key schema is 'meta_' + configs.output_col_prefix + '_confidence'
Parameters
-------------
configs : SparkNLPExtractorConfig
if configs.get_sentence_origin is True, the sentence origin column will be kept, otherwise dropped.
row : dict
i.e. looks like{'meta_category_sentence': ['0'],'meta_category_surprise': ['0.0050183665'],'meta_category_sadness': ['8.706827E-5'],'meta_category_joy': ['0.9947379'],'meta_category_fear': ['1.5667251E-4']}
Returns
------------
dict
if configs.get_sentence_origin True {'meta_sentiment_dl_sentence': ['0', '1'], 'meta_sentiment_dl_confidence': [0.9366506, 0.9366506]}
else {'meta_sentiment_dl_confidence': [0.9366506, 0.9366506]}
'''
meta_sent_key = 'meta_' + configs.output_col_prefix + '_sentence'
fl = lambda k : False if 'sentence' in k else True # every key that has not the sub string sentence in it is considerd a confidence key
confidences_keys = list(filter (fl, row.keys()))
if configs.pop_meta_list :
return {
**{'meta_' + configs.output_col_prefix + '_confidence':max([float(row[k][0]) for k in confidences_keys ])},
**({'meta_' + configs.output_col_prefix + '_sentence' : row[meta_sent_key]} if configs.get_sentence_origin else {})
}
else:
if len(confidences_keys) == 1 :
return {
**{'meta_' + configs.output_col_prefix + '_confidence':max([float(row[k]) for k in confidences_keys ])},
**({'meta_' + configs.output_col_prefix + '_sentence' : row[meta_sent_key]} if configs.get_sentence_origin else {})
}
else :
return {
**{'meta_' + configs.output_col_prefix + '_confidence': [ max(z )for z in zipp (list(map(float,row[k])) for k in confidences_keys)]} ,
**({'meta_' + configs.output_col_prefix + '_sentence' : row[meta_sent_key]} if configs.get_sentence_origin else {})
} | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/extractors/extractor_methods/helper_extractor_methods.py | 0.477311 | 0.389343 | helper_extractor_methods.py | pypi |
Resolve Annotator Classes in the Pipeline to Extractor Configs and Methods.
Each Spark NLP Annotator Class is mapped to at least one
Every Annotator should have 2 configs. Some might offor multuple configs/method pairs, based on model/NLP reference.
- default/minimalistic -> Just the results of the annotations, no confidences or extra metadata
- with meta -> A config that leverages white/black list and gets the most relevant metadata
- with positions -> With Begins/Ends
"""
from sparknlp.annotator import *
from sparknlp.base import *
name_deductable_OS = [
NerConverter,
BertEmbeddings,
AlbertEmbeddings,
XlnetEmbeddings ,
WordEmbeddingsModel ,
ElmoEmbeddings ,
BertSentenceEmbeddings,
UniversalSentenceEncoder,
SentenceEmbeddings,
ContextSpellCheckerModel ,
SymmetricDeleteModel ,
NorvigSweetingModel ,
NerDLModel ,
NerCrfModel,
LanguageDetectorDL ,
SentimentDLModel ,
SentimentDetectorModel ,
ViveknSentimentModel ,
MultiClassifierDLModel,
ClassifierDLModel ,
ChunkEmbeddings ,
TextMatcherModel,
RegexMatcherModel,
DateMatcher,
MultiDateMatcher,
T5Transformer,
MarianTransformer,
WordSegmenterModel,
DistilBertEmbeddings,
RoBertaEmbeddings,
XlmRoBertaEmbeddings,
DistilBertForTokenClassification,
BertForTokenClassification,
LongformerEmbeddings,
DistilBertForSequenceClassification,
BertForSequenceClassification,
# approaches
ViveknSentimentApproach ,
SentimentDLApproach ,
ClassifierDLApproach ,
MultiClassifierDLApproach ,
NerDLApproach ,
PerceptronApproach ,
Doc2Chunk,
Chunk2Doc,
]
always_name_deductable_OS = [
BertEmbeddings,
AlbertEmbeddings,
XlnetEmbeddings ,
WordEmbeddingsModel ,
ElmoEmbeddings ,
BertSentenceEmbeddings,
UniversalSentenceEncoder,
SentenceEmbeddings,
MultiClassifierDLModel,
ClassifierDLModel ,
ChunkEmbeddings ,
TextMatcherModel,
RegexMatcherModel,
DateMatcher,
MultiDateMatcher,
# T5Transformer,
# MarianTransformer,
# WordSegmenterModel,
DistilBertEmbeddings,
RoBertaEmbeddings,
XlmRoBertaEmbeddings,
Chunk2Doc,
] | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/col_substitution/name_deduction/name_deductable_annotators_OS.py | 0.644449 | 0.456531 | name_deductable_annotators_OS.py | pypi |
from sparknlp_jsl.annotator import *
from sparknlp.base import *
from sparknlp_display import *
class VizUtilsHC():
"""Utils for interfacing with the Spark-NLP-Display lib - licensed Viz"""
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
@staticmethod
def infer_viz_licensed(pipe)->str:
"""For a given NLUPipeline with licensed components, infers which visualizations are applicable. """
# we go in reverse, which makes NER always take lowest priority and NER feeder annotators have higher priority
for c in pipe.components[::-1]:
if isinstance(c.model, TypedDependencyParserModel) : return 'dep'
if isinstance(c.model, (SentenceEntityResolverModel)) : return 'resolution'
if isinstance(c.model, (RelationExtractionDLModel,RelationExtractionDLModel)) : return 'relation'
if isinstance(c.model, (AssertionDLModel,AssertionLogRegModel)) : return 'assert'
if isinstance(c.model, (NerConverter,NerConverterInternal)) : return 'ner'
@staticmethod
def viz_ner(anno_res, pipe,labels = [] , viz_colors={},is_databricks_env =False,write_to_streamlit=False):
"""Infer columns required for ner viz and then viz it.
viz_colors : set label colors by specifying hex codes , i.e. viz_colors = {'LOC':'#800080', 'PER':'#77b5fe'}
labels : only allow these labels to be displayed. (default: [] - all labels will be displayed)
"""
document_col,entities_col = VizUtilsHC.infer_ner_dependencies(pipe)
ner_vis = NerVisualizer()
if len(viz_colors) > 0 : ner_vis.set_label_colors(viz_colors)
if write_to_streamlit :
import streamlit as st
HTML = ner_vis.display(anno_res,label_col=entities_col,document_col=document_col, labels=labels, return_html=True )
CSS,HTML = HTML.split('</style>')
CSS = CSS + '</style>'
HTML = f'<div> {HTML} '
st.markdown(CSS, unsafe_allow_html=True)
# st.markdown(HTML, unsafe_allow_html=True)
st.markdown(VizUtilsHC.HTML_WRAPPER.format(HTML), unsafe_allow_html=True)
elif not is_databricks_env:
ner_vis.display(anno_res,label_col=entities_col,document_col=document_col, labels=labels )
else :
return ner_vis.display(anno_res,label_col=entities_col,document_col=document_col, labels=labels,return_html=True )
@staticmethod
def infer_ner_dependencies(pipe):
"""Finds entities and doc cols for ner viz"""
doc_component = None
entities_component = None
for c in pipe.components:
if isinstance(c.model, (NerConverter,NerConverterInternal)) : entities_component = c
if isinstance(c.model, DocumentAssembler) : doc_component = c
document_col = doc_component.out_types[0]
entities_col = entities_component.out_types[0]
return document_col, entities_col
@staticmethod
def viz_dep(anno_res,pipe,is_databricks_env,write_to_streamlit=False):
"""Viz dep result"""
pos_col,dep_typ_col,dep_untyp_col = VizUtilsHC.infer_dep_dependencies(pipe)
dependency_vis = DependencyParserVisualizer()
if write_to_streamlit :
import streamlit as st
SVG = dependency_vis.display(anno_res,pos_col =pos_col,dependency_col = dep_untyp_col ,dependency_type_col = dep_typ_col,return_html=True)
# st.markdown(SVG, unsafe_allow_html=True)
st.markdown(VizUtilsHC.HTML_WRAPPER.format(SVG), unsafe_allow_html=True)
elif not is_databricks_env:
dependency_vis.display(anno_res,pos_col =pos_col,dependency_col = dep_untyp_col ,dependency_type_col = dep_typ_col)
else:
return dependency_vis.display(anno_res,pos_col =pos_col,dependency_col = dep_untyp_col ,dependency_type_col = dep_typ_col,return_html=True)
@staticmethod
def infer_dep_dependencies(pipe):
"""Finds entities,pos,dep_typed,dep_untyped and doc cols for dep viz viz"""
# doc_component = None
pos_component = None
dep_untyped_component = None
dep_typed_component = None
for c in pipe.components:
if isinstance(c.model, PerceptronModel) : pos_component = c
if isinstance(c.model, TypedDependencyParserModel) : dep_typed_component = c
if isinstance(c.model, DependencyParserModel) : dep_untyped_component = c
pos_col = pos_component.out_types[0]
dep_typ_col = dep_typed_component.out_types[0]
dep_untyp_col = dep_untyped_component.out_types[0]
return pos_col,dep_typ_col,dep_untyp_col
@staticmethod
def viz_resolution(anno_res,pipe,viz_colors={},is_databricks_env=False,write_to_streamlit=False):
"""Viz dep result. Set label colors by specifying hex codes, i.e. viz_colors={'TREATMENT':'#800080', 'PROBLEM':'#77b5fe'} """
entities_col,resolution_col,doc_col = VizUtilsHC.infer_resolution_dependencies(pipe)
er_vis = EntityResolverVisualizer()
if len(viz_colors) > 0 : er_vis.set_label_colors(viz_colors)
if write_to_streamlit :
import streamlit as st
HTML = er_vis.display(anno_res,label_col=entities_col, resolution_col = resolution_col,document_col=doc_col,return_html=True)
CSS,HTML = HTML.split('</style>')
CSS = CSS + '</style>'
HTML = f'<div> {HTML} '
st.markdown(CSS, unsafe_allow_html=True)
# st.markdown(HTML, unsafe_allow_html=True)
st.markdown(VizUtilsHC.HTML_WRAPPER.format(HTML), unsafe_allow_html=True)
elif not is_databricks_env:
er_vis.display(anno_res,label_col=entities_col, resolution_col = resolution_col,document_col=doc_col)
else:
return er_vis.display(anno_res,label_col=entities_col, resolution_col = resolution_col,document_col=doc_col,return_html=True)
@staticmethod
def infer_resolution_dependencies(pipe):
"""Finds entities_col,resolution_col,doc_col cols for resolution viz viz"""
entities_component,resolution_component,doc_component = None, None, None
for c in pipe.components:
if isinstance(c.model, DocumentAssembler) : doc_component = c
if isinstance(c.model, (NerConverter,NerConverterInternal)) : entities_component = c
if isinstance(c.model, (SentenceEntityResolverModel)) : resolution_component = c
entities_col = entities_component.out_types[0]
resolution_col = resolution_component.out_types[0]
doc_col = doc_component.out_types[0]
return entities_col,resolution_col,doc_col
@staticmethod
def viz_relation(anno_res,pipe,is_databricks_env,write_to_streamlit=False):
"""Viz relation result. Set label colors by specifying hex codes, i.e. viz_colors={'TREATMENT':'#800080', 'PROBLEM':'#77b5fe'} """
relation_col,document_col = VizUtilsHC.infer_relation_dependencies(pipe)
re_vis = RelationExtractionVisualizer()
if write_to_streamlit :
import streamlit as st
HTML = re_vis.display(anno_res,relation_col = relation_col,document_col = document_col, show_relations=True, return_html=True)
# st.markdown(HTML, unsafe_allow_html=True)
st.markdown(VizUtilsHC.HTML_WRAPPER.format(HTML), unsafe_allow_html=True)
if not is_databricks_env:
re_vis.display(anno_res,relation_col = relation_col,document_col = document_col, show_relations=True)
else:
return re_vis.display(anno_res,relation_col = relation_col,document_col = document_col, show_relations=True, return_html=True)
@staticmethod
def infer_relation_dependencies(pipe):
"""Finds relation_col,document_col cols for relation viz viz"""
relation_component,doc_component = None, None
for c in pipe.components:
if isinstance(c.model, DocumentAssembler) : doc_component = c
if isinstance(c.model, (RelationExtractionDLModel,RelationExtractionModel)) : relation_component = c
relation_col = relation_component.out_types[0]
document_col = doc_component.out_types[0]
return relation_col,document_col
@staticmethod
def viz_assertion(anno_res,pipe,viz_colors={},is_databricks_env=False,write_to_streamlit=False):
"""Viz relation result. Set label colors by specifying hex codes, i.e. viz_colors={'TREATMENT':'#008080', 'problem':'#800080'} """
entities_col,assertion_col, doc_col = VizUtilsHC.infer_assertion_dependencies(pipe)
assertion_vis = AssertionVisualizer()
if len(viz_colors) > 0 : assertion_vis.set_label_colors(viz_colors)
if write_to_streamlit :
import streamlit as st
HTML = assertion_vis.display(anno_res,label_col = entities_col,assertion_col = assertion_col ,document_col = doc_col,return_html=True)
# st.markdown(HTML, unsafe_allow_html=True)
CSS,HTML = HTML.split('</style>')
CSS = CSS + '</style>'
HTML = f'<div> {HTML} '
st.markdown(CSS, unsafe_allow_html=True)
# st.markdown(HTML, unsafe_allow_html=True)
st.markdown(VizUtilsHC.HTML_WRAPPER.format(HTML), unsafe_allow_html=True)
elif not is_databricks_env:
assertion_vis.display(anno_res,label_col = entities_col,assertion_col = assertion_col ,document_col = doc_col)
else:
return assertion_vis.display(anno_res,label_col = entities_col,assertion_col = assertion_col ,document_col = doc_col,return_html=True)
@staticmethod
def infer_assertion_dependencies(pipe):
"""Finds relation_col,document_col cols for relation viz viz"""
entities_component,assert_component, doc_component = None, None,None
for c in pipe.components:
if isinstance(c.model, DocumentAssembler) : doc_component = c
if isinstance(c.model, (AssertionDLModel,AssertionLogRegModel)) : assert_component = c
if isinstance(c.model, (NerConverter,NerConverterInternal)) : entities_component = c
entities_col = entities_component.out_types[0]
assertion_col = assert_component.out_types[0]
doc_col = doc_component.out_types[0]
return entities_col,assertion_col, doc_col | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/viz/vis_utils_HC.py | 0.495606 | 0.288564 | vis_utils_HC.py | pypi |
from sparknlp.annotator import *
from nlu.pipe.viz.vis_utils_OS import VizUtilsOS
import random
class VizUtils():
"""Utils for interfacing with the Spark-NLP-Display lib"""
@staticmethod
def get_random(): return random.randint(0,1333333333337)
@staticmethod
def infer_viz_type(pipe)->str:
"""For a given NLUPipeline, infers which visualizations are applicable. """
if pipe.has_licensed_components :
from nlu.pipe.viz.vis_utils_HC import VizUtilsHC
return VizUtilsHC.infer_viz_licensed(pipe)
else : return VizUtilsOS.infer_viz_open_source(pipe)
@staticmethod
def viz_OS(anno_res, pipe, viz_type,viz_colors,labels_to_viz,is_databricks_env,write_to_streamlit,streamlit_key):
"""Vizualize open source component"""
streamlit_key = VizUtils.get_random() if streamlit_key == "RANDOM" else streamlit_key
if viz_type == 'ner' : return VizUtilsOS.viz_ner(anno_res, pipe,labels_to_viz,viz_colors,is_databricks_env,write_to_streamlit,streamlit_key)
elif viz_type == 'dep' : return VizUtilsOS.viz_dep(anno_res, pipe,is_databricks_env,write_to_streamlit,streamlit_key)
else : raise ValueError("Could not find applicable viz_type. Please make sure you specify either ner, dep, resolution, relation, assert or dep and have loaded corrosponding components")
@staticmethod
def viz_HC(anno_res, pipe, viz_type,viz_colors,labels_to_viz,is_databricks_env,write_to_streamlit):
"""Vizualize licensed component"""
from nlu.pipe.viz.vis_utils_HC import VizUtilsHC
if viz_type == 'ner' : return VizUtilsHC.viz_ner(anno_res, pipe,labels_to_viz,viz_colors,is_databricks_env,write_to_streamlit)
elif viz_type == 'dep' : return VizUtilsHC.viz_dep(anno_res, pipe,is_databricks_env,write_to_streamlit)
elif viz_type == 'resolution' : return VizUtilsHC.viz_resolution(anno_res, pipe,viz_colors,is_databricks_env,write_to_streamlit)
elif viz_type == 'relation' : return VizUtilsHC.viz_relation(anno_res, pipe,is_databricks_env,write_to_streamlit)
elif viz_type == 'assert' : return VizUtilsHC.viz_assertion(anno_res, pipe,viz_colors,is_databricks_env,write_to_streamlit)
else : raise ValueError("Could not find applicable viz_type. Please make sure you specify either ner, dep, resolution, relation, assert or dep and have loaded corrosponding components")
"""Define whiche annotators model are definable by which vizualizer. There are 5 in total, 2 open source and 5 HC"""
# vizalbe_components_OC = {
# 'ner' : [NerConverter],
# 'dep' : [DependencyParserModel],
# }
# vizalbe_components_HC = {
# 'ner':[NerConverter,NerConverterInternal],
# 'resolution' : [SentenceEntityResolverModel, ChunkEntityResolverModel] ,
# 'relation' : [RelationExtractionModel,RelationExtractionDLModel],
# 'assert' : [AssertionDLModel,AssertionLogRegApproach],
# 'dep' : [DependencyParserModel],
# } | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/viz/vis_utils.py | 0.539469 | 0.251823 | vis_utils.py | pypi |
from sparknlp_display import NerVisualizer,DependencyParserVisualizer
from sparknlp.annotator import NerConverter,DependencyParserModel, TypedDependencyParserModel, PerceptronModel
from sparknlp.base import DocumentAssembler
class VizUtilsOS():
"""Utils for interfacing with the Spark-NLP-Display lib and vizzing Open Source Components - Open source"""
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
@staticmethod
def infer_viz_open_source(pipe)->str:
"""For a given NLUPipeline with only open source components, infers which visualizations are applicable. """
for c in pipe.components:
if isinstance(c.model, NerConverter) : return 'ner'
if isinstance(c.model, DependencyParserModel) : return 'dep'
@staticmethod
def viz_ner(anno_res, pipe,labels = None , viz_colors={},is_databricks_env=False,write_to_streamlit=False,streamlit_key='RANDOM' ):
"""Infer columns required for ner viz and then viz it.
viz_colors : set label colors by specifying hex codes , i.e. viz_colors = {'LOC':'#800080', 'PER':'#77b5fe'}
labels : only allow these labels to be displayed. (default: [] - all labels will be displayed)
"""
document_col,entities_col = VizUtilsOS.infer_ner_dependencies(pipe)
ner_vis = NerVisualizer()
ner_vis.set_label_colors(viz_colors)
if write_to_streamlit :
import streamlit as st
HTML = ner_vis.display(anno_res,label_col=entities_col,document_col=document_col, labels=labels, return_html=True )
CSS,HTML = HTML.split('</style>')
CSS = CSS + '</style>'
HTML = f'<div> {HTML} '
st.markdown(CSS, unsafe_allow_html=True)
st.markdown(VizUtilsOS.HTML_WRAPPER.format(HTML), unsafe_allow_html=True)
elif not is_databricks_env:
ner_vis.display(anno_res,label_col=entities_col,document_col=document_col, labels=labels )
else :
return ner_vis.display(anno_res,label_col=entities_col,document_col=document_col, labels=labels, return_html=True )
@staticmethod
def infer_ner_dependencies(pipe):
"""Finds entities and doc cols for ner viz"""
doc_component = None
entities_component = None
for c in pipe.components:
if isinstance(c.model, NerConverter) : entities_component = c
if isinstance(c.model, DocumentAssembler) : doc_component = c
document_col = doc_component.out_types[0]
entities_col = entities_component.out_types[0]
return document_col, entities_col
@staticmethod
def viz_dep(anno_res,pipe,is_databricks_env,write_to_streamlit,streamlit_key='RANDOM'):
"""Viz dep result"""
pos_col,dep_typ_col,dep_untyp_col = VizUtilsOS.infer_dep_dependencies(pipe)
dependency_vis = DependencyParserVisualizer()
if write_to_streamlit :
import streamlit as st
SVG = dependency_vis.display(anno_res,pos_col =pos_col,dependency_col = dep_untyp_col ,dependency_type_col = dep_typ_col,return_html=True)
# st.markdown(SVG, unsafe_allow_html=True)
st.markdown(VizUtilsOS.HTML_WRAPPER.format(SVG), unsafe_allow_html=True)
elif not is_databricks_env:
dependency_vis.display(anno_res,pos_col =pos_col,dependency_col = dep_untyp_col ,dependency_type_col = dep_typ_col)
else:
return dependency_vis.display(anno_res,pos_col =pos_col,dependency_col = dep_untyp_col ,dependency_type_col = dep_typ_col,return_html=True)
@staticmethod
def infer_dep_dependencies(pipe):
"""Finds entities,pos,dep_typed,dep_untyped and doc cols for dep viz viz"""
# doc_component = None
pos_component = None
dep_untyped_component = None
dep_typed_component = None
for c in pipe.components:
if isinstance(c.model, PerceptronModel) : pos_component = c
if isinstance(c.model, TypedDependencyParserModel) : dep_typed_component = c
if isinstance(c.model, DependencyParserModel) : dep_untyped_component = c
pos_col = pos_component.out_types[0]
dep_typ_col = dep_typed_component.out_types[0]
dep_untyp_col = dep_untyped_component.out_types[0]
return pos_col,dep_typ_col,dep_untyp_col | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/viz/vis_utils_OS.py | 0.558809 | 0.219317 | vis_utils_OS.py | pypi |
from nlu.discovery import Discoverer
from typing import List, Optional, Dict
import streamlit as st
from nlu.pipe.viz.streamlit_viz.streamlit_utils_OS import StreamlitUtilsOS
from nlu.pipe.viz.streamlit_viz.gen_streamlit_code import get_code_for_viz
from nlu.pipe.viz.streamlit_viz.styles import _set_block_container_style
from nlu.pipe.viz.streamlit_viz.streamlit_viz_tracker import StreamlitVizTracker
class NERStreamlitBlock():
@staticmethod
def visualize_ner(
pipe, # Nlu component_list
text:str,
ner_tags: Optional[List[str]] = None,
show_label_select: bool = True,
show_table: bool = False,
title: Optional[str] = "Named Entities",
sub_title: Optional[str] = "Recognize various `Named Entities (NER)` in text entered and filter them. You can select from over `100 languages` in the dropdown.",
colors: Dict[str, str] = {},
show_color_selector: bool = False,
set_wide_layout_CSS:bool=True,
generate_code_sample:bool = False,
key = "NLU_streamlit",
model_select_position:str = 'side',
show_model_select : bool = True,
show_text_input:bool = True,
show_infos:bool = True,
show_logo:bool = True,
):
StreamlitVizTracker.footer_displayed=False
if set_wide_layout_CSS : _set_block_container_style()
if show_logo :StreamlitVizTracker.show_logo()
if show_model_select :
model_selection = Discoverer.get_components('ner',include_pipes=True)
model_selection.sort()
if model_select_position == 'side':ner_model_2_viz = st.sidebar.selectbox("Select a NER model",model_selection,index=model_selection.index(pipe.nlu_ref.split(' ')[0]))
else : ner_model_2_viz = st.selectbox("Select a NER model",model_selection,index=model_selection.index(pipe.nlu_ref.split(' ')[0]))
pipe = pipe if pipe.nlu_ref == ner_model_2_viz else StreamlitUtilsOS.get_pipe(ner_model_2_viz)
if title: st.header(title)
if show_text_input : text = st.text_area("Enter text you want to visualize NER classes for below", text, key=key)
if sub_title : st.subheader(sub_title)
if generate_code_sample: st.code(get_code_for_viz('NER',StreamlitUtilsOS.extract_name(pipe),text))
if ner_tags is None: ner_tags = StreamlitUtilsOS.get_NER_tags_in_pipe(pipe)
if not show_color_selector :
if show_label_select:
exp = st.beta_expander("Select entity labels to highlight")
label_select = exp.multiselect(
"These labels are predicted by the NER model. Select which ones you want to display",
options=ner_tags,default=list(ner_tags))
else : label_select = ner_tags
pipe.viz(text,write_to_streamlit=True, viz_type='ner',labels_to_viz=label_select,viz_colors=colors, streamlit_key=key)
else : # TODO WIP color select
cols = st.beta_columns(3)
exp = cols[0].beta_expander("Select entity labels to display")
color = st.color_picker('Pick A Color', '#00f900',key = key)
color = cols[2].color_picker('Pick A Color for a specific entity label', '#00f900',key = key)
tag2color = cols[1].selectbox('Pick a ner tag to color', ner_tags,key = key)
colors[tag2color]=color
if show_table : st.write(pipe.predict(text, output_level='chunk'),key = key)
if show_infos :
# VizUtilsStreamlitOS.display_infos()
StreamlitVizTracker.display_model_info(pipe.nlu_ref, pipes = [pipe])
StreamlitVizTracker.display_footer() | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/viz/streamlit_viz/viz_building_blocks/ner.py | 0.609292 | 0.222816 | ner.py | pypi |
from typing import List
import logging
from nlu.pipe.nlu_component import NluComponent
from nlu.universe.logic_universes import AnnoTypes
from nlu.universe.feature_node_ids import NLP_NODE_IDS, NLP_HC_NODE_IDS
from nlu.universe.atoms import JslAnnoId
logger = logging.getLogger('nlu')
import inspect
from nlu.pipe.pipe_component import SparkNLUComponent
from nlu.pipe.utils.resolution.storage_ref_utils import StorageRefUtils
from nlu.universe.feature_universes import NLP_FEATURES, OCR_FEATURES
class ComponentUtils:
"""Component and Column Level logic operations and utils"""
@staticmethod
def config_chunk_embed_converter(converter: SparkNLUComponent) -> SparkNLUComponent:
'''For a Chunk to be added to a pipeline, configure its input/output and set storage ref to amtch the storage ref and
enfore storage ref notation. This will be used to infer backward later which component should feed this consumer'''
storage_ref = StorageRefUtils.extract_storage_ref(converter)
input_embed_col = ComponentUtils.extract_embed_col(converter)
new_embed_col_with_AT_notation = input_embed_col + "@" + storage_ref
converter.info.inputs.remove(input_embed_col)
converter.info.inputs.append(new_embed_col_with_AT_notation)
converter.info.spark_input_column_names.remove(input_embed_col)
converter.info.spark_input_column_names.append(new_embed_col_with_AT_notation)
converter.model.setInputCols(converter.info.inputs)
return converter
@staticmethod
def clean_irrelevant_features(feature_list, remove_AT_notation=False, remove_text = True):
'''
Remove irrelevant features from a list of component features
Also remove the @notation from names, since they are irrelevant for ordering
:param feature_list: list of features
:param remove_AT_notation: remove AT notation from os_components names if true. Used for sorting
:return: list with only relevant feature names
'''
# remove irrelevant missing features for pretrained models
# Most of these should be provided externally by the user and cannot be resolved
if 'text' in feature_list and remove_text:
feature_list.remove('text')
if 'raw_text' in feature_list:
feature_list.remove('raw_text')
if 'raw_texts' in feature_list:
feature_list.remove('raw_texts')
if 'label' in feature_list:
feature_list.remove('label')
if 'sentiment_label' in feature_list:
feature_list.remove('sentiment_label')
if '%%%feature_elements%%%' in feature_list:
feature_list.remove('%%%feature_elements%%%')
if OCR_FEATURES.BINARY_IMG in feature_list:
feature_list.remove(OCR_FEATURES.BINARY_IMG)
if OCR_FEATURES.FILE_PATH in feature_list:
feature_list.remove(OCR_FEATURES.FILE_PATH)
if OCR_FEATURES.BINARY_DOCX in feature_list:
feature_list.remove(OCR_FEATURES.BINARY_DOCX)
if OCR_FEATURES.BINARY_PDF in feature_list:
feature_list.remove(OCR_FEATURES.BINARY_PDF)
if remove_AT_notation:
new_cs = []
for c in feature_list:
new_cs.append(c.split("@")[0])
return new_cs
return feature_list
@staticmethod
def component_has_embeddings_requirement(component: NluComponent):
'''
Check for the input component, wether it depends on some embedding. Returns True if yes, otherwise False.
:param component: The component to check
:return: True if the component needs some specifc embedding (i.e.glove, bert, elmo etc..). Otherwise returns False
'''
return component.is_storage_ref_consumer
@staticmethod
def component_has_embeddings_provisions(component: SparkNLUComponent):
'''
Check for the input component, wether it depends on some embedding. Returns True if yes, otherwise False.
:param component: The component to check
:return: True if the component needs some specifc embedding (i.e.glove, bert, elmo etc..). Otherwise returns False
'''
if type(component) == type(list) or type(component) == type(set):
for feature in component:
if 'embed' in feature: return True
return False
else:
for feature in component.out_types:
if 'embed' in feature: return True
return False
@staticmethod
def extract_storage_ref_AT_notation_for_embeds(component: NluComponent, col='input'):
'''
Extract <col>_embed_col@storage_ref notation from a component if it has a storage ref, otherwise '
:param component: To extract notation from
:cols component: Wether to extract for the input or output col
:return: '' if no storage_ref, <col>_embed_col@storage_ref otherwise
'''
if col == 'input':
e_col = next(filter(lambda s: 'embed' in s, component.spark_input_column_names))
elif col == 'output':
e_col = next(filter(lambda s: 'embed' in s, component.spark_output_column_names))
stor_ref = StorageRefUtils.extract_storage_ref(component)
return e_col + '@' + stor_ref
@staticmethod
def is_embedding_provider(component: NluComponent) -> bool:
"""Check if a NLU Component returns/generates embeddings """
return component.is_storage_ref_producer
@staticmethod
def is_embedding_consumer(component: NluComponent) -> bool:
"""Check if a NLU Component consumes embeddings """
return component.is_storage_ref_consumer
@staticmethod
def is_embedding_converter(component: NluComponent) -> bool:
"""Check if NLU component is embedding converter """
return component.name in [NLP_NODE_IDS.SENTENCE_EMBEDDINGS_CONVERTER,
NLP_NODE_IDS.SENTENCE_EMBEDDINGS_CONVERTER]
@staticmethod
def is_NER_provider(component: NluComponent) -> bool:
"""Check if a NLU Component wraps a NER/NER-Medical model """
if component.name in [NLP_HC_NODE_IDS.MEDICAL_NER, NLP_HC_NODE_IDS.TRAINABLE_MEDICAL_NER, NLP_NODE_IDS.NER_DL,
NLP_NODE_IDS.TRAINABLE_NER_DL, NLP_NODE_IDS.TRAINABLE_NER_CRF,
NLP_NODE_IDS.NER_CRF]: return True
if component.type == AnnoTypes.TRANSFORMER_TOKEN_CLASSIFIER: return True
@staticmethod
def is_NER_converter(component: NluComponent) -> bool:
"""Check if a NLU Component wraps a NER-IOB to NER-Pr etty converter """
return component.name in [NLP_HC_NODE_IDS.NER_CONVERTER_INTERNAL, NLP_NODE_IDS.NER_CONVERTER]
@staticmethod
def extract_NER_col(component: NluComponent, column='input') -> str:
"""Extract the exact name of the NER column in the component"""
if column == 'input':
for f in component.in_types:
if f == NLP_FEATURES.NAMED_ENTITY_IOB:
return f
if column == 'output':
for f in component.out_types:
if f == NLP_FEATURES.NAMED_ENTITY_IOB:
return f
raise ValueError(f"Could not find NER col for component ={component}")
@staticmethod
def extract_NER_converter_col(component: NluComponent, column='input') -> str:
"""Extract the exact name of the NER-converter column in the component"""
if column == 'input':
for f in component.in_types:
if f == NLP_FEATURES.NAMED_ENTITY_IOB:
return f
if column == 'output':
for f in component.out_types:
if f == NLP_FEATURES.NAMED_ENTITY_CONVERTED:
return f
raise ValueError(f"Could not find NER Converter col for component ={component}")
@staticmethod
def extract_embed_col(component: NluComponent, column='input') -> str:
"""Extract the exact name of the embed column in the component"""
if column == 'input':
for c in component.spark_input_column_names:
if 'embed' in c: return c
if column == 'output':
for c in component.spark_output_column_names:
if 'embed' in c: return c
raise ValueError(f"Could not find Embed col for component ={component}")
@staticmethod
def is_untrained_model(component: SparkNLUComponent) -> bool:
'''
Check for a given component if it is an embelishment of an traianble model.
In this case we will ignore embeddings requirements further down the logic pipeline
:param component: Component to check
:return: True if it is trainable, False if not
'''
if 'is_untrained' in dict(inspect.getmembers(component.info)).keys(): return True
return False
@staticmethod
def set_storage_ref_attribute_of_embedding_converters(pipe_list: List[NluComponent]):
"""For every embedding converter, we set storage ref attr on it, based on what the storage ref from it's provider is """
for converter in pipe_list:
if ComponentUtils.is_embedding_provider(converter) and ComponentUtils.is_embedding_converter(converter):
# First find the embed col of the converter
embed_col = ComponentUtils.extract_embed_col(converter)
for provider in pipe_list:
# Now find the Embedding generator that is feeding the converter
if embed_col in provider.spark_input_column_names:
converter.storage_ref = StorageRefUtils.nlp_extract_storage_ref_nlp_model(provider.model)
# converter.storage_ref = StorageRefUtils.extract_storage_ref(provider)
return pipe_list
@staticmethod
def extract_embed_level_identity(component, col='input'):
"""Figure out if component feeds on chunk/sent aka doc/word emb for either nput or output cols"""
if col == 'input':
if any(filter(lambda s: 'document_embed' in s, component.info.inputs)): return 'document_embeddings'
if any(filter(lambda s: 'sentence_embed' in s, component.info.inputs)): return 'sentence_embeddings'
if any(filter(lambda s: 'chunk_embed' in s, component.info.inputs)): return 'chunk_embeddings'
if any(filter(lambda s: 'token_embed' in s, component.info.inputs)): return 'token_embeddings'
elif col == 'output':
if any(filter(lambda s: 'document_embed' in s, component.out_types)): return 'document_embeddings'
if any(filter(lambda s: 'sentence_embed' in s, component.out_types)): return 'sentence_embeddings'
if any(filter(lambda s: 'chunk_embed' in s, component.out_types)): return 'chunk_embeddings'
if any(filter(lambda s: 'token_embed' in s, component.out_types)): return 'token_embeddings'
@staticmethod
def are_producer_consumer_matches(e_consumer: SparkNLUComponent, e_provider: SparkNLUComponent) -> bool:
"""Check for embedding_consumer and embedding_producer if they match storage_ref and output level wise wise """
if StorageRefUtils.extract_storage_ref(e_consumer) == StorageRefUtils.extract_storage_ref(e_provider):
if ComponentUtils.extract_embed_level_identity(e_consumer,
'input') == ComponentUtils.extract_embed_level_identity(
e_provider, 'output'):
return True
## TODO FALL BACK FOR BAD MATCHES WHICH ACTUALLY MATCH-> consult name space
return False
@staticmethod
def get_nlu_ref_identifier(component: NluComponent) -> str:
"""The tail of a NLU ref after splitting on '.' gives a unique identifier for NON-Aliased components
If result is '' , model UID will be used as identifier
"""
tail = ''
tail = component.nlu_ref.split('.')[-1].split('@')[-1]
if tail == '':
logger.warning(
f"Could not deduct tail from component={component}. This is intended for CustomModelComponents used in offline mode")
tail = str(component.model)
return tail
@staticmethod
def remove_storage_ref_from_features(features: List[str]):
"""Clean storage ref from every str in list """
return [f.split('@')[0] for f in features] | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/utils/component_utils.py | 0.865267 | 0.313288 | component_utils.py | pypi |
from typing import List, Dict
import logging
from nlu.universe.logic_universes import NLP_LEVELS, AnnoTypes
from nlu.universe.feature_node_ids import NLP_NODE_IDS
from nlu.universe.feature_universes import NLP_FEATURES
logger = logging.getLogger('nlu')
from nlu.universe.universes import Licenses
from sparknlp.base import *
from sparknlp.annotator import *
from nlu.pipe.col_substitution.col_name_substitution_utils import ColSubstitutionUtils
"""Component and Column Level logic operations and utils"""
class OutputLevelUtils():
levels = {
'token': ['token', 'pos', 'ner', 'lemma', 'lem', 'stem', 'stemm', 'word_embeddings', 'named_entity',
'entity', 'dependency',
'labeled_dependency', 'dep', 'dep.untyped', 'dep.typed'],
'sentence': ['sentence', 'sentence_embeddings', ] + ['sentiment', 'classifer', 'category'],
'chunk': ['chunk', 'embeddings_chunk', 'chunk_embeddings'],
'document': ['document', 'language'],
'embedding_level': []
# ['sentiment', 'classifer'] # WIP, wait for Spark NLP Getter/Setter fixes to implement this properly
# embedding level annotators output levels depend on the level of the embeddings they are fed. If we have Doc/Chunk/Word/Sentence embeddings, those annotators output at the same level.
}
annotator_levels_approach_based = {
'document': [DocumentAssembler, Chunk2Doc,
YakeKeywordExtraction, DocumentNormalizer
],
'sentence': [SentenceDetector, SentenceDetectorDLApproach],
'chunk': [Chunker, ChunkEmbeddings, ChunkTokenizer, Token2Chunk, TokenAssembler,
NerConverter, Doc2Chunk, NGramGenerator],
'token': [NerCrfApproach, NerDLApproach,
PerceptronApproach,
Stemmer,
ContextSpellCheckerApproach,
WordSegmenterApproach,
Lemmatizer, LemmatizerModel, TypedDependencyParserApproach, DependencyParserApproach,
Tokenizer, RegexTokenizer, RecursiveTokenizer
, DateMatcher, TextMatcher, BigTextMatcher, MultiDateMatcher,
WordSegmenterApproach
],
# 'sub_token': [StopWordsCleaner, DateMatcher, TextMatcher, BigTextMatcher, MultiDateMatcher],
# these can be document or sentence
'input_dependent': [ViveknSentimentApproach, SentimentDLApproach, ClassifierDLApproach,
LanguageDetectorDL,
MultiClassifierDLApproach, SentenceEmbeddings, NorvigSweetingApproach,
BertForSequenceClassification, DistilBertForTokenClassification, ],
'multi': [MultiClassifierDLApproach, SentenceEmbeddings, NorvigSweetingApproach, ]
# 'unclassified': [Yake, Ngram]
}
annotator_levels_model_based = {
'document': [],
'sentence': [SentenceDetectorDLModel, ],
'chunk': [ChunkTokenizerModel, ChunkTokenizerModel, ],
'token': [ContextSpellCheckerModel, AlbertEmbeddings, BertEmbeddings, ElmoEmbeddings, WordEmbeddings,
XlnetEmbeddings, WordEmbeddingsModel,
# NER models are token level, they give IOB predictions and cofidences for EVERY token!
NerDLModel, NerCrfModel, PerceptronModel, SymmetricDeleteModel, NorvigSweetingModel,
ContextSpellCheckerModel,
TypedDependencyParserModel, DependencyParserModel,
RecursiveTokenizerModel,
TextMatcherModel, BigTextMatcherModel, RegexMatcherModel,
WordSegmenterModel, TokenizerModel,
XlmRoBertaEmbeddings, RoBertaEmbeddings, DistilBertEmbeddings,
BertForTokenClassification, DistilBertForTokenClassification,
AlbertForTokenClassification, XlmRoBertaForTokenClassification,
RoBertaForTokenClassification, LongformerForTokenClassification,
XlnetForTokenClassification,
],
# 'sub_token': [TextMatcherModel, BigTextMatcherModel, RegexMatcherModel, ],
# sub token is when annotator is token based but some tokens may be missing since dropped/cleaned
'sub_token': [
StopWordsCleaner, NormalizerModel
],
'input_dependent': [BertSentenceEmbeddings, UniversalSentenceEncoder, ViveknSentimentModel,
SentimentDLModel, ClassifierDLModel,
MarianTransformer, T5Transformer,
XlmRoBertaEmbeddings, RoBertaEmbeddings, DistilBertEmbeddings,
],
'multi': [MultiClassifierDLModel, MultiClassifierDLModel, ]
}
all_embeddings = {
'token': [AlbertEmbeddings, BertEmbeddings, ElmoEmbeddings, WordEmbeddings,
XlnetEmbeddings, WordEmbeddingsModel],
'input_dependent': [SentenceEmbeddings, UniversalSentenceEncoder, BertSentenceEmbeddings]
}
@staticmethod
def infer_output_level(pipe):
'''
This function checks the LAST component of the NLU pipeline and infers
and infers from that the output level via checking the components' info.
It sets the output level of the component_list accordingly
'''
if pipe.output_level == '':
# Loop in reverse over component_list and get first non util/sentence_detecotr/tokenizer/doc_assember. If there is non, take last
bad_types = [AnnoTypes.HELPER_ANNO, AnnoTypes.SENTENCE_DETECTOR]
bad_names = [NLP_NODE_IDS.TOKENIZER]
for c in pipe.components[::-1]:
if any(t in c.type for t in bad_types):
continue
if any(n in c.name for n in bad_names):
continue
pipe.output_level = OutputLevelUtils.resolve_component_to_output_level(pipe, c)
logger.info(f'Inferred and set output level of pipeline to {pipe.output_level}', )
break
# Normalizer bug that does not happen in debugger bugfix
if pipe.output_level is None or pipe.output_level == '':
pipe.output_level = NLP_LEVELS.DOCUMENT
logger.info(f'Inferred and set output level of pipeline to {pipe.output_level}')
else:
return
@staticmethod
def get_output_level_of_embeddings_provider(pipe, field_type, field_name):
'''
This function will go through all components to find the component which generate @component_output_column_name.
Then it will go gain through all components to find the component, from which @component_output_column_name is taking its inputs
Then it will return the type of the provider component. This result isused to resolve the output level of the component that depends on the inpit for the output level
:param field_type: The type of the field we want to resolve the input level for
:param field_name: The name of the field we want to resolve the input level for
:return:
'''
# find the component. Column output name should be unique
component_inputs = []
for component in pipe.components:
if field_name == component.info.name:
component_inputs = component.info.spark_input_column_names
# get the embedding feature name
target_output_component = ''
for input_name in component_inputs:
if 'embed' in input_name: target_output_component = input_name
# get the model that outputs that feature
for component in pipe.components:
component_outputs = component.info.spark_output_column_names
for input_name in component_outputs:
if target_output_component == input_name:
# this is the component that feeds into the component we are trying to resolve the output level for.
# That is so, because the output of this component matches the input of the component we are resolving
return pipe.resolve_type_to_output_level(component.info.type)
@staticmethod
def resolve_type_to_output_level(pipe, field_type, field_name):
'''
This checks the levels dict for what the output level is for the input annotator type.
If the annotator type depends on the embedding level, we need further checking.
@ param field_type : type of the spark field
@ param name : name of thhe spark field
@ return : String, which corrosponds to the output level of this Component.
'''
logger.info('Resolving output level for field_type=%s and field_name=%s', field_type, field_name)
if field_name == 'sentence':
logger.info('Resolved output level for field_type=%s and field_name=%s to Sentence level', field_type,
field_name)
return 'sentence'
if field_type in pipe.levels['token']:
logger.info('Resolved output level for field_type=%s and field_name=%s to Token level ', field_type,
field_name)
return 'token'
if field_type in pipe.levels['sentence']:
logger.info('Resolved output level for field_type=%s and field_name=%s to sentence level', field_type,
field_name)
return 'sentence'
if field_type in pipe.levels['chunk']:
logger.info('Resolved output level for field_type=%s and field_name=%s to Chunk level ', field_type,
field_name)
return 'chunk'
if field_type in pipe.levels['document']:
logger.info('Resolved output level for field_type=%s and field_name=%s to document level', field_type,
field_name)
return 'document'
if field_type in pipe.levels['embedding_level']:
logger.info('Resolved output level for field_type=%s and field_name=%s to embeddings level', field_type,
field_name)
return pipe.get_output_level_of_embeddings_provider(field_type, field_name) # recursive resolution
@staticmethod
def resolve_input_dependent_component_to_output_level(pipe, component):
'''
For a given NLU component which is input dependent , resolve its output level by checking if it's input stem
from document or sentence based annotators :param component: to resolve :return: resolve component
'''
# (1.) A classifier, which is using sentence/document. We just check input cols
if 'document' in component.spark_input_column_names:
return 'document'
if 'sentence' in component.spark_input_column_names:
return 'sentence'
# (2.) A classifier, which is using sentence/doc embeddings.
# We iterate over the component_list and check which Embed component is feeding the classifier and what the input that embed annotator is (sent or doc)
for c in pipe.components:
# check if os_components is of sentence embedding class which is always input dependent
if any(isinstance(c.model, e) for e in OutputLevelUtils.all_embeddings['input_dependent']): # TODO refactor
if NLP_FEATURES.DOCUMENT in c.spark_input_column_names: return NLP_FEATURES.DOCUMENT
if NLP_FEATURES.SENTENCE in c.spark_input_column_names: return NLP_FEATURES.SENTENCE
@staticmethod
def resolve_component_to_output_level(pipe, component):
'''
For a given NLU component, resolve its output level, by checking annotator_levels dicts for approaches and models
If output level is input dependent, resolve_input_dependent_component_to_output_level will resolve it
:param component: to resolve
:return: resolve component
'''
for level in OutputLevelUtils.annotator_levels_model_based.keys():
for t in OutputLevelUtils.annotator_levels_model_based[level]:
if isinstance(component.model, t):
if level == 'input_dependent':
return OutputLevelUtils.resolve_input_dependent_component_to_output_level(pipe, component)
else:
return level
for level in OutputLevelUtils.annotator_levels_approach_based.keys():
for t in OutputLevelUtils.annotator_levels_approach_based[level]:
if isinstance(component.model, t):
if level == 'input_dependent':
return OutputLevelUtils.resolve_input_dependent_component_to_output_level(pipe, component)
else:
return level
if pipe.has_licensed_components:
from nlu.pipe.extractors.output_level_HC_map import HC_anno2output_level
for level in HC_anno2output_level.keys():
for t in HC_anno2output_level[level]:
if isinstance(component.model, t):
if level == 'input_dependent':
return OutputLevelUtils.resolve_input_dependent_component_to_output_level(pipe, component)
else:
return level
@staticmethod
def get_output_level_mappings(pipe, df, anno_2_ex_config, get_embeddings):
"""Get a dict where key=spark_colname and val=output_level, inferred from processed dataframe and
component_list that is currently running """
output_level_map = {}
same_output_level_map = {}
not_same_output_level_map = {}
for c in pipe.components:
if 'embedding' in c.type and get_embeddings == False: continue
generated_cols = ColSubstitutionUtils.get_final_output_cols_of_component(c, df, anno_2_ex_config)
output_level = OutputLevelUtils.resolve_component_to_output_level(pipe, c)
if output_level == pipe.output_level:
for g_c in generated_cols: same_output_level_map[g_c] = output_level
else:
for g_c in generated_cols: not_same_output_level_map[g_c] = output_level
for g_c in generated_cols: output_level_map[g_c] = output_level
return output_level_map, same_output_level_map, not_same_output_level_map
@staticmethod
def get_cols_at_same_output_level(pipe, df, anno_2_ex_config, col2output_level: Dict[str, str]) -> List[str]:
"""Get List of cols which are at same output level as the component_list is currently configured to"""
same_output_level_cols = []
for c in pipe.components:
if col2output_level[c.out_types[0]] == pipe.output_level:
same_output_level_cols + ColSubstitutionUtils.get_final_output_cols_of_component(c, df,
anno_2_ex_config)
return same_output_level_cols
@staticmethod
def get_cols_not_at_same_output_level(pipe, df, anno_2_ex_config, col2output_level: Dict[str, str]) -> List[str]:
"""Get List of cols which are not at same output level as the component_list is currently configured to"""
return [c.out_types[0] for c in pipe.components if
not col2output_level[c.out_types[0]] == pipe.output_level]
@staticmethod
def get_output_level_mapping_by_component(pipe) -> Dict[str, str]:
"""Get a dict where key=colname and val=output_level, inferred from processed dataframe and component_list
that is currently running """
nlp_levels = {c: OutputLevelUtils.resolve_component_to_output_level(pipe, c) for c in pipe.components}
for c in pipe.components :
if c.license == Licenses.ocr:
nlp_levels[c] = c.output_level
return {c: OutputLevelUtils.resolve_component_to_output_level(pipe, c) for c in pipe.components} | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/utils/output_level_resolution_utils.py | 0.842572 | 0.277932 | output_level_resolution_utils.py | pypi |
import glob
import logging
import os
from typing import List
import pyspark
import numpy as np
import pandas as pd
logger = logging.getLogger('nlu')
class OcrDataConversionUtils:
@staticmethod
def validate_OCR_compatible_inputs(data):
"""Validate for input data that it contains a path pointing to file or folder"""
if isinstance(data, List):
return OcrDataConversionUtils.check_iterable_paths_are_valid(data)
if isinstance(data, str):
return OcrDataConversionUtils.check_iterable_paths_are_valid([data])
if isinstance(data, pd.DataFrame):
return 'path' in data.columns
if isinstance(data, pd.Series):
return 'path' in data.name
if isinstance(data, pyspark.sql.dataframe.DataFrame):
return 'path' in data.columns
if isinstance(data, np.ndarray):
return OcrDataConversionUtils.check_iterable_paths_are_valid(data)
@staticmethod
def check_iterable_paths_are_valid(iterable_paths):
"""Validate for iterable data input if all elements point to file or folder"""
paths_validness = []
for p in iterable_paths:
if os.path.isdir(p) or os.path.isfile(p):
paths_validness.append(True)
else:
print(f'Warning : Invalid path for folder or file in input. Could validate path.\n'
f'NLU will try and ignore this issue but you might run into errors.\n'
f'Please make sure all paths are valid\n')
print(f'For path = {p}')
paths_validness.append(False)
return all(paths_validness)
@staticmethod
def check_all_paths_point_to_accepted_file_type(paths, file_types):
"""Validate that all paths point to a file type defined by file_types"""
pass
@staticmethod
def glob_files_of_accepted_type(paths, file_types):
"""Get all paths which point to correct file types from iterable paths which can contain file and folder paths
1. paths point to a file which is suffixed with one of the accepted file_types, i.e. path/to/file.type
2. path points to a folder, in this case folder is recurisvely searched for valid files and accepted paths will be in return result
"""
accepted_file_paths = []
for p in paths:
for t in file_types:
t = t.lower()
if os.path.isfile(p):
if p.lower().split('.')[-1] == t:
accepted_file_paths.append(p)
elif os.path.isdir(p):
accepted_file_paths += glob.glob(p + f'/**/*.{t}', recursive=True)
else:
print(f"Invalid path = {p} pointing neither to file or folder on this machine")
return accepted_file_paths
@staticmethod
def extract_iterable_paths_from_data(data):
"""Extract an iterable object containing paths from input data"""
if isinstance(data, List):
return data
if isinstance(data, str):
return [data]
if isinstance(data, pd.DataFrame):
return list(data['path'].values)
if isinstance(data, pd.Series):
return list(data.values)
if isinstance(data, pyspark.sql.dataframe.DataFrame):
return [p['path'] for p in data.select('path').collect()]
if isinstance(data, np.ndarray):
return list(data)
@staticmethod
def get_accepted_ocr_file_types(pipe):
"""Get all file typtes/suffixes that can be processed by the pipeline"""
accepted_files = []
for c in pipe.components:
if c.applicable_file_types:
accepted_files += c.applicable_file_types
return list(set(accepted_files)) | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/utils/ocr_data_conversion_utils.py | 0.551091 | 0.281126 | ocr_data_conversion_utils.py | pypi |
import glob
import logging
from typing import List
import os
import numpy as np
import pyspark
import sparknlp
from pyspark.sql.functions import monotonically_increasing_id
from nlu.pipe.utils.ocr_data_conversion_utils import OcrDataConversionUtils
logger = logging.getLogger('nlu')
from nlu.pipe.pipe_logic import PipeUtils
import pandas as pd
from nlu.pipe.utils.data_conversion_utils import DataConversionUtils
def __predict_standard_spark(pipe, data, output_level, positions, keep_stranger_features, metadata,
drop_irrelevant_cols, return_spark_df, get_embeddings):
# 1. Convert data to Spark DF
data, stranger_features, output_datatype = DataConversionUtils.to_spark_df(data, pipe.spark, pipe.raw_text_column)
# 3. Apply Spark Pipeline
data = pipe.spark_transformer_pipe.transform(data)
# 4. Convert resulting spark DF into nicer format and by default into pandas.
if return_spark_df: return data # Returns RAW Spark Dataframe result of component_list prediction
return pipe.pythonify_spark_dataframe(data,
keep_stranger_features=keep_stranger_features,
stranger_features=stranger_features,
output_metadata=metadata,
drop_irrelevant_cols=drop_irrelevant_cols,
positions=positions,
output_level=output_level,
get_embeddings=get_embeddings
)
def __predict_multi_threaded_light_pipe(pipe, data, output_level, positions, keep_stranger_features, metadata,
drop_irrelevant_cols,get_embeddings ):
# 1. Try light component_list predcit
# 2. if vanilla Fails use Vanilla
# 3. if vanilla fails raise error
data, stranger_features, output_datatype = DataConversionUtils.to_pandas_df(data, pipe.raw_text_column)
# Predict -> Cast to PDF -> Join with original inputs. It does NOT yield EMBEDDINGS.
data = data.join(pd.DataFrame(pipe.light_spark_transformer_pipe.fullAnnotate(data.text.values)))
return pipe.pythonify_spark_dataframe(data,
keep_stranger_features=keep_stranger_features,
stranger_features=stranger_features,
output_metadata=metadata,
drop_irrelevant_cols=drop_irrelevant_cols,
positions=positions,
output_level=output_level,
get_embeddings=get_embeddings
)
def __predict_ocr_spark(pipe, data, output_level, positions, keep_stranger_features, metadata,
drop_irrelevant_cols,get_embeddings ):
"""
Check if there are any OCR components in the Pipe.
If yes, we verify data contains pointer to folder or image files.
If yes, df = spark.read.format("binaryFile").load(imagePath)
Run OCR pipe on df and pythonify procedure afterwards
"""
pipe.fit()
OcrDataConversionUtils.validate_OCR_compatible_inputs(data)
paths = OcrDataConversionUtils.extract_iterable_paths_from_data(data)
accepted_file_types = OcrDataConversionUtils.get_accepted_ocr_file_types(pipe)
file_paths = OcrDataConversionUtils.glob_files_of_accepted_type(paths, accepted_file_types)
spark = sparknlp.start() # Fetches Spark Session that has already been licensed
data = pipe.spark_transformer_pipe.transform(spark.read.format("binaryFile").load(file_paths)).withColumn('origin_index', monotonically_increasing_id().alias('origin_index'))
return pipe.pythonify_spark_dataframe(data,
keep_stranger_features=keep_stranger_features,
output_metadata=metadata,
drop_irrelevant_cols=drop_irrelevant_cols,
positions=positions,
output_level=output_level,
get_embeddings=get_embeddings
)
def __predict__(pipe, data, output_level, positions, keep_stranger_features, metadata, multithread,
drop_irrelevant_cols, return_spark_df, get_embeddings):
'''
Annotates a Pandas Dataframe/Pandas Series/Numpy Array/Spark DataFrame/Python List strings /Python String
:param data: Data to predict on
:param output_level: output level, either document/sentence/chunk/token
:param positions: wether to output indexes that map predictions back to position in origin string
:param keep_stranger_features: wether to keep columns in the dataframe that are not generated by pandas. I.e. when you s a dataframe with 10 columns and only one of them is named text, the returned dataframe will only contain the text column when set to false
:param metadata: wether to keep additonal metadata in final df or not like confidiences of every possible class for preidctions.
:param multithread: Whether to use multithreading based lightpipeline. In some cases, this may cause errors.
:param drop_irellevant_cols: Wether to drop cols of different output levels, i.e. when predicting token level and dro_irrelevant_cols = True then chunk, sentence and Doc will be dropped
:param return_spark_df: Prediction results will be returned right after transforming with the Spark NLP pipeline
:return:
'''
if output_level != '': pipe.output_level = output_level
if output_level == 'sentence' or output_level == 'document': pipe.components = PipeUtils.configure_component_output_levels(
pipe)
if output_level in ['token', 'chunk', 'relation']: pipe.components = PipeUtils.configure_component_output_levels(
pipe, 'document')
if get_embeddings == None:
# Grab embeds if nlu ref is of type embed
get_embeddings = True if 'embed' in pipe.nlu_ref else False
if not pipe.is_fitted:
if pipe.has_trainable_components:
pipe.fit(data)
else:
pipe.fit()
# configure Lightpipline usage
pipe.configure_light_pipe_usage(DataConversionUtils.size_of(data), multithread)
if pipe.contains_ocr_components:
# Ocr processing
try :
return __predict_ocr_spark(pipe, data, output_level, positions, keep_stranger_features,
metadata, drop_irrelevant_cols, get_embeddings=get_embeddings)
except Exception as err:
logger.warning(f"Predictions Failed={err}")
pipe.print_exception_err(err)
raise Exception("Failure to process data with NLU OCR pipe")
if return_spark_df:
try:
return __predict_standard_spark(pipe, data, output_level, positions, keep_stranger_features, metadata,
drop_irrelevant_cols, return_spark_df)
except Exception as err:
logger.warning(f"Predictions Failed={err}")
pipe.print_exception_err(err)
raise Exception("Failure to process data with NLU")
elif not get_embeddings and multithread:
# Try Multithreaded with Fallback vanilla as option. No Embeddings in this mode
try:
return __predict_multi_threaded_light_pipe(pipe, data, output_level, positions, keep_stranger_features,
metadata, drop_irrelevant_cols, get_embeddings=get_embeddings)
except Exception as err:
logger.warning(
f"Multithreaded mode with Lightpipeline failed. trying to predict again with non multithreaded mode, err={err}")
try:
return __predict_standard_spark(pipe, data, output_level, positions, keep_stranger_features, metadata,
drop_irrelevant_cols, return_spark_df, get_embeddings)
except Exception as err:
logger.warning(f"Predictions Failed={err}")
pipe.print_exception_err(err)
raise Exception("Failure to process data with NLU")
else:
# Standard predict with no fallback
try:
return __predict_standard_spark(pipe, data, output_level, positions, keep_stranger_features, metadata,
drop_irrelevant_cols, return_spark_df, get_embeddings)
except Exception as err:
logger.warning(f"Predictions Failed={err}")
pipe.print_exception_err(err)
raise Exception("Failure to process data with NLU")
def debug_print_pipe_cols(pipe):
for c in pipe.components:
print(f'{c.spark_input_column_names}->{c.name}->{c.spark_output_column_names}') | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/utils/predict_helper.py | 0.683842 | 0.265904 | predict_helper.py | pypi |
from sparknlp.annotator import *
import inspect
import logging
logger = logging.getLogger('nlu')
from nlu.pipe.utils.component_utils import ComponentUtils
import nlu
import requests
class ModelHubUtils():
modelhub_json_url = 'https://nlp.johnsnowlabs.com/models.json'
data = requests.get(modelhub_json_url).json()
"""Pipe Level logic oprations and utils"""
@staticmethod
def NLU_ref_to_NLP_ref(nlu_ref: str,lang: str =None) -> str:
"""Resolve a Spark NLU reference to a NLP reference.
Args :
NLU_ref : which nlu model's nlp refrence to return.
lang : what language is the model in.
"""
nlu_namespaces_to_check = [nlu.Spellbook.pretrained_pipe_references, nlu.Spellbook.pretrained_models_references, nlu.Spellbook.pretrained_healthcare_model_references, nlu.Spellbook.licensed_storage_ref_2_nlu_ref , nlu.Spellbook.storage_ref_2_nlu_ref]#component_alias_references ]
for dict_ in nlu_namespaces_to_check:
if lang:
if lang in dict_.keys():
for reference in dict_[lang]:
if reference ==nlu_ref:
return dict_[lang][reference]
else :
for dict_ in nlu_namespaces_to_check:
for lang in dict_:
for reference in dict_[lang]:
if reference ==nlu_ref:
return dict_[lang][reference]
def get_url_by_nlu_refrence(nlu_refrence: str ) -> str:
"""Rsolves a URL for an NLU refrence.
Args :
nlu_refrence : Which nlu refrence's url to return.
"""
# getting spark refrence for given nlu refrence
nlp_refrence= ModelHubUtils.NLU_ref_to_NLP_ref(nlu_refrence)
if nlp_refrence == None :
print(nlp_refrence," ", nlu_refrence)
for model in ModelHubUtils.data :
if (model['language'] in nlu_refrence.split(".") or model['language'] in nlp_refrence.split('_')) and model['name'] == nlp_refrence:
return f"https://nlp.johnsnowlabs.com/{model['url']}"
def return_json_entry(nlp_refrence:str,language:str) -> dict:
"""Resolves a Json entry for an nlp_refrence.
Args:
nlp_refrence: What nlp_refrence to resolve.
language : Which language the model is in.
"""
for model in ModelHubUtils.data :
if model['language']== language and model["name"] == nlp_refrence:
return model | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/utils/modelhub_utils.py | 0.62681 | 0.286537 | modelhub_utils.py | pypi |
import logging
from nlu.pipe.nlu_component import NluComponent
from nlu.pipe.utils.resolution import uid_to_storage_ref as uid2storageref
logger = logging.getLogger('nlu')
"""Storage Ref logic operations and utils"""
class StorageRefUtils:
@staticmethod
def has_storage_ref(component: NluComponent):
"""Storage ref is either on the model or nlu component defined """
return component.has_storage_ref
@staticmethod
def extract_storage_ref(component: NluComponent):
"""Extract storage ref from either a NLU component or NLP Annotator. First checks if annotator has storage
ref, otherwise check NLU attribute """
# TODO converters have no getStorageRef() and we must read it from nlu_component if defined!
if StorageRefUtils.has_storage_ref(component):
return StorageRefUtils.nlp_extract_storage_ref_nlp_model(component)
else:
raise ValueError(
f'Tried to extract storage ref from component which has no storageref ! Component = {component}')
@staticmethod
def fallback_storage_ref_resolutions(storage_ref):
"""
For every storage ref result, we check if its storage ref is defined as its UID and if a fallback storageref
is available. If available, alternative is returned, otherwise original
"""
if storage_ref in uid2storageref.mappings.keys():
return uid2storageref.mappings[storage_ref]
else:
return storage_ref
@staticmethod
def has_component_storage_ref_or_anno_storage_ref(component: NluComponent):
"""Storage ref is either on the model or nlu component defined """
return component.has_storage_ref
@staticmethod
def nlp_component_has_storage_ref(model):
"""Check if a storage ref is defined on the Spark NLP Annotator model"""
for k, _ in model.extractParamMap().items():
if k.name == 'storageRef':
return True
return False
@staticmethod
def extract_storage_ref_from_component(component):
"""Extract storage ref from a NLU component which embellished a Spark NLP Annotator"""
if StorageRefUtils.nlu_component_has_storage_ref(component):
return component.info.storage_ref
elif StorageRefUtils.nlp_component_has_storage_ref(component):
return StorageRefUtils.nlp_extract_storage_ref_nlp_model(component)
else:
return ''
@staticmethod
def nlu_extract_storage_ref_nlp_model(component):
"""Extract storage ref from a NLU component which embellished a Spark NLP Annotator"""
return component.model.extractParamMap()[component.model.getParam('storageRef')]
@staticmethod
def nlu_component_has_storage_ref(component):
"""Check if a storage ref is defined on the Spark NLP Annotator embellished by the NLU Component"""
if hasattr(component.info, 'storage_ref'):
return True
return False
@staticmethod
def nlp_extract_storage_ref_nlp_model(component: NluComponent):
"""Extract storage ref from a NLU component which embellished a Spark NLP Annotator"""
# Embedding Converters don't have storage ref attribute on class, but NLU component has attribute for it
params = list(component.model.extractParamMap().keys())
for p in params:
if p.name == 'storageRef':
storage_ref = component.model.extractParamMap()[component.model.getParam('storageRef')]
if not storage_ref:
# For untrained components storage ref will be none
return ''
else:
return storage_ref
if not component.storage_ref:
return ''
return component.storage_ref | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/utils/resolution/storage_ref_utils.py | 0.436982 | 0.464476 | storage_ref_utils.py | pypi |
import logging
logger = logging.getLogger('nlu')
from pyspark.sql.dataframe import DataFrame
import numpy as np
import pandas as pd
from pyspark.sql.types import ByteType, ShortType, IntegerType, LongType, FloatType, \
DoubleType, BooleanType, MapType, TimestampType, StructType, DataType
from pyspark.sql.pandas.types import _check_series_localize_timestamps, \
_convert_map_items_to_dict
from pyspark.sql.pandas.utils import require_minimum_pandas_version
import pyarrow
class PaConversionUtils():
@staticmethod
def convert_via_pyarrow(sdf:DataFrame ) -> pd.DataFrame:
"""Convert a Spark Dataframe to a pandas Dataframe using PyArrow shared memory blocks between Spark and Pandas backends.
Args:
sdf:DataFrame
"""
require_minimum_pandas_version()
timezone = sdf.sql_ctx._conf.sessionLocalTimeZone()
# Rename columns to avoid duplicated column names.
tmp_column_names = ['col_{}'.format(i) for i in range(len(sdf.columns))]
batches = sdf.toDF(*tmp_column_names)._collect_as_arrow()
if len(batches) > 0:
table = pyarrow.Table.from_batches(batches)
# Pandas DataFrame created from PyArrow uses datetime64[ns] for date type
# values, but we should use datetime.date to match the behavior with when
# Arrow optimization is disabled.
pdf = table.to_pandas(date_as_object=True)
# Rename back to the original column names.
pdf.columns = sdf.columns
for field in sdf.schema:
if isinstance(field.dataType, TimestampType):
pdf[field.name] = \
_check_series_localize_timestamps(pdf[field.name], timezone)
elif isinstance(field.dataType, MapType):
pdf[field.name] = \
_convert_map_items_to_dict(pdf[field.name])
return pdf
else:return pd.DataFrame.from_records([], columns=sdf.columns) | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/pipe/utils/pyarrow_conversion/pa_conversion.py | 0.682997 | 0.389605 | pa_conversion.py | pypi |
from nlu.pipe.pipe_component import SparkNLUComponent
class Classifier(SparkNLUComponent):
def __init__(self, annotator_class='sentiment_dl', language='en', component_type='classifier', get_default=True,
model=None, nlp_ref='', nlu_ref='', trainable=False, is_licensed=False, do_ref_checks=True,
loaded_from_pretrained_pipe=False):
if do_ref_checks:
if 'e2e' in nlu_ref or 'toxic' in nlu_ref:
annotator_class = 'multi_classifier'
elif 'e2e' in nlp_ref or 'toxic' in nlp_ref:
annotator_class = 'multi_classifier'
elif 'distilbert_sequence' in nlp_ref or 'distilbert_sequence' in nlu_ref:
annotator_class = 'seq_distilbert'
elif 'bert_sequence' in nlp_ref or 'bert_sequence' in nlu_ref:
annotator_class = 'seq_bert'
elif 'token_bert' in nlp_ref or 'token_bert' in nlu_ref:
annotator_class = 'token_bert'
elif 'token_distilbert' in nlp_ref or 'token_distilbert' in nlu_ref:
annotator_class = 'token_distilbert'
elif 'token_distilroberta' in nlp_ref or 'token_distilroberta' in nlu_ref:
annotator_class = 'token_roberta'
elif 'token_xlm_roberta' in nlp_ref or 'token_xlm_roberta' in nlu_ref:
annotator_class = 'token_xlm_roberta'
elif 'token_roberta' in nlp_ref or 'token_roberta' in nlu_ref:
annotator_class = 'token_roberta'
elif 'token_albert' in nlp_ref or 'token_albert' in nlu_ref:
annotator_class = 'token_albert'
elif 'token_xlnet' in nlp_ref or 'token_xlnet' in nlu_ref:
annotator_class = 'token_xlnet'
elif 'token_longformer' in nlp_ref or 'token_longformer' in nlu_ref:
annotator_class = 'token_longformer'
elif 'multiclassifierdl' in nlp_ref:
annotator_class = 'multi_classifier'
elif 'classifierdl' in nlp_ref:
annotator_class = 'classifier_dl'
elif 'yake' in nlu_ref:
annotator_class = 'yake'
elif 'yake' in nlp_ref:
annotator_class = 'yake'
elif 'sentimentdl' in nlp_ref:
annotator_class = 'sentiment_dl'
elif 'vivekn' in nlp_ref or 'vivekn' in nlp_ref:
annotator_class = 'vivekn_sentiment'
elif 'wiki_' in nlu_ref or 'wiki_' in nlp_ref:
annotator_class = 'language_detector'
elif 'pos' in nlu_ref and 'ner' not in nlu_ref:
annotator_class = 'pos'
elif 'pos' in nlp_ref and 'ner' not in nlp_ref:
annotator_class = 'pos'
elif 'icd' in nlu_ref and 'med_ner' not in nlu_ref:
annotator_class = 'classifier_dl'
elif 'med_ner' in nlu_ref:
annotator_class = 'ner_healthcare'
elif 'generic_classifier' in nlu_ref:
annotator_class = 'generic_classifier'
elif 'ner' in nlu_ref and 'generic' not in nlu_ref:
annotator_class = 'ner'
elif 'ner' in nlp_ref and 'generic' not in nlp_ref:
annotator_class = 'ner'
if model != None:
self.model = model
from sparknlp.annotator import NerDLModel, NerCrfModel
if isinstance(self.model, (NerDLModel, NerCrfModel)):
self.model.setIncludeConfidence(True)
elif is_licensed:
from sparknlp_jsl.annotator import MedicalNerModel
if isinstance(self.model, MedicalNerModel): self.model.setIncludeConfidence(True)
else:
if 'seq_distilbert' == annotator_class:
from nlu import SeqDilstilBertClassifier
if get_default:
self.model = SeqDilstilBertClassifier.get_default_model()
elif is_licensed:
self.model = SeqDilstilBertClassifier.get_pretrained_model(nlp_ref, language, 'clinical/models')
else:
self.model = SeqDilstilBertClassifier.get_pretrained_model(nlp_ref, language)
elif 'seq_bert' == annotator_class:
from nlu import SeqBertClassifier
if get_default:
self.model = SeqBertClassifier.get_default_model()
elif is_licensed:
self.model = SeqBertClassifier.get_pretrained_model(nlp_ref, language, 'clinical/models')
else:
self.model = SeqBertClassifier.get_pretrained_model(nlp_ref, language)
elif 'sentiment' in annotator_class and 'vivekn' not in annotator_class:
from nlu import SentimentDl
if trainable:
self.model = SentimentDl.get_default_trainable_model()
elif is_licensed:
self.model = SentimentDl.get_pretrained_model(nlp_ref, language, bucket='clinical/models')
elif get_default:
self.model = SentimentDl.get_default_model()
else:
self.model = SentimentDl.get_pretrained_model(nlp_ref, language)
elif 'token_distilbert' == annotator_class:
from nlu import TokenDistilBert
if get_default:
self.model = TokenDistilBert.get_default_model()
elif is_licensed:
self.model = TokenDistilBert.get_pretrained_model(nlp_ref, language, 'clinical/models')
else:
self.model = TokenDistilBert.get_pretrained_model(nlp_ref, language)
elif 'token_bert' == annotator_class:
from nlu import TokenBert
if get_default:
self.model = TokenBert.get_default_model()
elif is_licensed:
self.model = TokenBert.get_pretrained_model(nlp_ref, language, 'clinical/models')
else:
self.model = TokenBert.get_pretrained_model(nlp_ref, language)
elif 'token_xlm_roberta' == annotator_class:
from nlu import TokenXlmRoBerta
if get_default:
self.model = TokenXlmRoBerta.get_default_model()
elif is_licensed:
self.model = TokenXlmRoBerta.get_pretrained_model(nlp_ref, language, 'clinical/models')
else:
self.model = TokenXlmRoBerta.get_pretrained_model(nlp_ref, language)
elif 'token_roberta' == annotator_class:
from nlu import TokenRoBerta
if get_default:
self.model = TokenRoBerta.get_default_model()
elif is_licensed:
self.model = TokenRoBerta.get_pretrained_model(nlp_ref, language, 'clinical/models')
else:
self.model = TokenRoBerta.get_pretrained_model(nlp_ref, language)
elif 'token_albert' == annotator_class:
from nlu import TokenAlbert
if get_default:
self.model = TokenAlbert.get_default_model()
elif is_licensed:
self.model = TokenAlbert.get_pretrained_model(nlp_ref, language, 'clinical/models')
else:
self.model = TokenAlbert.get_pretrained_model(nlp_ref, language)
elif 'token_longformer' == annotator_class:
from nlu import TokenLongFormer
if get_default:
self.model = TokenLongFormer.get_default_model()
elif is_licensed:
self.model = TokenLongFormer.get_pretrained_model(nlp_ref, language, 'clinical/models')
else:
self.model = TokenLongFormer.get_pretrained_model(nlp_ref, language)
elif 'token_xlnet' == annotator_class:
from nlu import TokenXlnet
if get_default:
self.model = TokenXlnet.get_default_model()
elif is_licensed:
self.model = TokenXlnet.get_pretrained_model(nlp_ref, language, 'clinical/models')
else:
self.model = TokenXlnet.get_pretrained_model(nlp_ref, language)
elif 'generic_classifier' in annotator_class:
from nlu.components.classifiers.generic_classifier.generic_classifier import GenericClassifier
if trainable:
self.model = GenericClassifier.get_default_trainable_model()
else:
self.model = GenericClassifier.get_pretrained_model(nlp_ref, language, bucket='clinical/models')
elif 'vivekn' in annotator_class:
from nlu import ViveknSentiment
if get_default:
self.model = ViveknSentiment.get_default_model()
else:
self.model = ViveknSentiment.get_pretrained_model(nlp_ref, language)
elif 'ner' in annotator_class and 'ner_healthcare' not in annotator_class:
from nlu import NERDL
if trainable:
self.model = NERDL.get_default_trainable_model()
elif is_licensed:
self.model = NERDL.get_pretrained_model(nlp_ref, language, bucket='clinical/models')
elif get_default:
self.model = NERDL.get_default_model()
else:
self.model = NERDL.get_pretrained_model(nlp_ref, language)
if hasattr(self, 'model'): self.model.setIncludeConfidence(True)
elif 'ner.crf' in annotator_class:
from nlu import NERDLCRF
if get_default:
self.model = NERDLCRF.get_default_model()
else:
self.model = NERDLCRF.get_pretrained_model(nlp_ref, language)
if hasattr(self, 'model'): self.model.setIncludeConfidence(True)
elif ('classifier_dl' in annotator_class or annotator_class == 'toxic') and not 'multi' in annotator_class:
from nlu import ClassifierDl
if trainable:
self.model = ClassifierDl.get_trainable_model()
elif is_licensed:
self.model = ClassifierDl.get_pretrained_model(nlp_ref, language, bucket='clinical/models')
elif get_default:
self.model = ClassifierDl.get_default_model()
else:
self.model = ClassifierDl.get_pretrained_model(nlp_ref, language)
if hasattr(self.model, 'setIncludeConfidence'): self.model.setIncludeConfidence(True)
elif 'language_detector' in annotator_class:
from nlu import LanguageDetector
if get_default:
self.model = LanguageDetector.get_default_model()
else:
self.model = LanguageDetector.get_pretrained_model(nlp_ref, language)
elif 'pos' in annotator_class:
from nlu import PartOfSpeechJsl
if trainable:
self.model = PartOfSpeechJsl.get_default_trainable_model()
elif get_default:
self.model = PartOfSpeechJsl.get_default_model()
elif is_licensed:
self.model = PartOfSpeechJsl.get_pretrained_model(nlp_ref, language, bucket='clinical/models')
else:
self.model = PartOfSpeechJsl.get_pretrained_model(nlp_ref, language)
elif 'yake' in annotator_class:
from nlu import Yake
self.model = Yake.get_default_model()
elif 'multi_classifier' in annotator_class:
from nlu import MultiClassifier
if trainable:
self.model = MultiClassifier.get_default_trainable_model()
elif get_default:
self.model = MultiClassifier.get_default_model()
else:
self.model = MultiClassifier.get_pretrained_model(nlp_ref, language)
elif 'ner_healthcare' in annotator_class:
from nlu.components.classifiers.ner_healthcare.ner_dl_healthcare import NERDLHealthcare
if trainable:
self.model = NERDLHealthcare.get_default_trainable_model()
else:
self.model = NERDLHealthcare.get_pretrained_model(nlp_ref, language, bucket='clinical/models')
SparkNLUComponent.__init__(self, annotator_class, component_type, nlu_ref, nlp_ref, language,
loaded_from_pretrained_pipe, is_licensed) | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/components/classifier.py | 0.427994 | 0.288315 | classifier.py | pypi |
from nlu.pipe.pipe_component import SparkNLUComponent
class Util(SparkNLUComponent):
def __init__(self, annotator_class='document_assembler', component_type='util', model = None, loaded_from_pretrained_pipe = False, nlu_ref='',nlp_ref='',lang='en',is_licensed=False):
# super(Tokenizer,self).__init__(annotator_class = annotator_class, component_type = component_type)
if annotator_class == 'ner_converter':
annotator_class = 'ner_to_chunk_converter'
if model != None : self.model = model
else :
if annotator_class == 'document_assembler':
from nlu import SparkNlpDocumentAssembler
self.model = SparkNlpDocumentAssembler.get_default_model()
elif annotator_class == 'deep_sentence_detector' :
from nlu import SentenceDetectorDeep
self.model = SentenceDetectorDeep.get_default_model()
elif annotator_class == 'sentence_detector' :
from nlu import SparkNLPSentenceDetector
self.model = SparkNLPSentenceDetector.get_default_model()
elif annotator_class == 'ner_to_chunk_converter' :
from nlu import NerToChunkConverter
self.model = NerToChunkConverter.get_default_model()
elif annotator_class == 'sentence_embeddings':
from nlu import SparkNLPSentenceEmbeddings
self.model = SparkNLPSentenceEmbeddings.get_default_model()
elif annotator_class == 'feature_assembler':
from nlu.components.utils.feature_assembler.feature_assembler import SparkNLPFeatureAssembler
self.model = SparkNLPFeatureAssembler.get_default_model()
elif annotator_class == 'ner_to_chunk_converter_licensed':
from nlu.components.utils.ner_to_chunk_converter_licensed.ner_to_chunk_converter_licensed import NerToChunkConverterLicensed
self.model = NerToChunkConverterLicensed.get_default_model()
elif annotator_class == 'chunk_merger':
from nlu.components.utils.chunk_merger.chunk_merger import ChunkMerger
self.model = ChunkMerger.get_default_model()
elif annotator_class == 'doc2chunk':
from nlu.components.utils.doc2chunk.doc_2_chunk import Doc_2_Chunk
self.model = Doc_2_Chunk.get_default_model()
elif annotator_class == 'chunk_2_doc':
from nlu.components.utils.chunk_2_doc.doc_2_chunk import Chunk_2_Doc
self.model = Chunk_2_Doc.get_default_model()
SparkNLUComponent.__init__(self, annotator_class, component_type, nlu_ref, lang,loaded_from_pretrained_pipe= loaded_from_pretrained_pipe ) | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/components/util.py | 0.54359 | 0.245486 | util.py | pypi |
import nlu
from nlu.pipe.pipe_component import SparkNLUComponent
class Matcher(SparkNLUComponent):
def __init__(self, annotator_class='date_matcher', language='en', component_type='matcher', get_default=False,
nlp_ref='', model=None, nlu_ref='', dataset='', is_licensed=False, loaded_from_pretrained_pipe=False):
if 'date' in nlp_ref or 'date' in nlu_ref:
annotator_class = 'date_matcher'
elif 'regex' in nlp_ref or 'regex' in nlu_ref:
annotator_class = 'regex_matcher'
elif 'context' in nlu_ref:
annotator_class = 'context_parser'
elif 'text' in nlp_ref or 'text' in nlu_ref:
annotator_class = 'text_matcher'
elif '_matcher' not in annotator_class:
annotator_class = annotator_class + '_matcher'
if model != None:
self.model = model
else:
if 'context' in annotator_class:
from nlu.components.matchers.context_parser.context_parser import ContextParser
is_licensed = True
if get_default:
self.model = ContextParser.get_default_model()
else:
self.model = ContextParser.get_default_model()
elif 'text' in annotator_class:
from nlu import TextMatcher
if get_default or nlp_ref =='text_matcher':
self.model = TextMatcher.get_default_model()
else:
self.model = TextMatcher.get_pretrained_model(nlp_ref, language)
elif 'date' in annotator_class:
from nlu import DateMatcher
from nlu.components.matchers.date_matcher.date_matcher import DateMatcher as DateM
if get_default: self.model = DateM.get_default_model()
else: self.model = DateM.get_default_model()
elif 'regex' in annotator_class:
from nlu import RegexMatcher
if get_default:
self.model = RegexMatcher.get_default_model()
else:
self.model = RegexMatcher.get_pretrained_model(nlu_ref, language)
SparkNLUComponent.__init__(self, annotator_class, component_type, nlu_ref, nlp_ref, language,loaded_from_pretrained_pipe , is_licensed) | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/components/matcher.py | 0.430267 | 0.232027 | matcher.py | pypi |
import logging
logger = logging.getLogger('nlu')
import nlu
import requests
class ModelHubUtils():
modelhub_json_url = 'https://nlp.johnsnowlabs.com/models.json'
data = requests.get(modelhub_json_url).json()
"""Modelhub utils"""
@staticmethod
def NLU_ref_to_NLP_ref(nlu_ref: str, lang: str = None) -> str:
"""Resolve a Spark NLU reference to q NLP reference.
Args :
NLU_ref : which nlu model's nlp refrence to return.
lang : what language is the model in.
"""
nlu_namespaces_to_check = [nlu.Spellbook.pretrained_pipe_references, nlu.Spellbook.pretrained_models_references,
nlu.Spellbook.pretrained_healthcare_model_references,
nlu.Spellbook.licensed_storage_ref_2_nlu_ref,
nlu.Spellbook.storage_ref_2_nlu_ref] # ]
for dict_ in nlu_namespaces_to_check:
if lang:
if lang in dict_.keys():
for reference in dict_[lang]:
if reference == nlu_ref:
return dict_[lang][reference]
else:
for dict_ in nlu_namespaces_to_check:
for lang in dict_:
for reference in dict_[lang]:
if reference == nlu_ref:
return dict_[lang][reference]
for _nlp_ref, nlp_ref_type in nlu.Spellbook.component_alias_references.items():
if _nlp_ref == nlu_ref: return nlp_ref_type[0]
return ''
@staticmethod
def get_url_by_nlu_refrence(nlu_refrence: str) -> str:
"""Rsolves a URL for an NLU refrence.
Args :
nlu_refrence : Which nlu refrence's url to return.
"""
# getting spark refrence for given nlu refrence
if nlu_refrence == '': return 'https://nlp.johnsnowlabs.com/models'
if nlu_refrence.split(".")[0] not in nlu.AllComponentsInfo().all_languages:
nlu_refrence = "en." + nlu_refrence
nlp_refrence = ModelHubUtils.NLU_ref_to_NLP_ref(nlu_refrence)
if nlp_refrence == None:
print(f"{nlp_refrence} {nlu_refrence}")
return 'https://nlp.johnsnowlabs.com/models'
else:
for model in ModelHubUtils.data:
if (model['language'] in nlu_refrence.split(".") or model['language'] in nlp_refrence.split('_')) and \
model['name'] == nlp_refrence:
return f"https://nlp.johnsnowlabs.com/{model['url']}"
return 'https://nlp.johnsnowlabs.com/models'
@staticmethod
def return_json_entry(nlu_refrence: str) -> dict:
"""Resolves a Json entry for an nlp_refrence.
Args:
nlp_refrence: What nlp_refrence to resolve.
"""
if nlu_refrence.split(".")[0] not in nlu.AllComponentsInfo().all_languages:
nlu_refrence = "en." + nlu_refrence
nlp_refrence = ModelHubUtils.NLU_ref_to_NLP_ref(nlu_refrence)
language = nlu_refrence.split(".")[0]
for model in ModelHubUtils.data:
if model['language'] == language and model["name"] == nlp_refrence:
return model | /roflma-3.4.0.tar.gz/roflma-3.4.0/nlu/utils/modelhub/modelhub_utils.py | 0.742141 | 0.333544 | modelhub_utils.py | pypi |
import csv
import os
from torchblocks.callback import TrainLogger
from torchblocks.metrics import Accuracy
from torchblocks.processor import InputExample, TextClassifierProcessor
from torchblocks.trainer import TextClassifierTrainer
from torchblocks.utils import (
build_argparse,
dict_to_text,
get_checkpoints,
prepare_device,
seed_everything,
)
from transformers import WEIGHTS_NAME
from roformer import (
RoFormerConfig,
RoFormerForSequenceClassification,
RoFormerTokenizer,
)
MODEL_CLASSES = {
"roformer": (RoFormerConfig, RoFormerForSequenceClassification, RoFormerTokenizer)
}
class ChnSentiProcessor(TextClassifierProcessor):
def get_labels(self):
"""See base class."""
return ["0", "1"]
def read_data(self, input_file):
"""Reads a json list file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
lines = []
for line in reader:
lines.append(line)
return lines
def create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[1]
text_b = None
label = line[0]
examples.append(
InputExample(guid=guid, texts=[text_a, text_b], label=label)
)
return examples
def main():
args = build_argparse().parse_args()
if args.model_name is None:
args.model_name = args.model_path.split("/")[-1]
args.output_dir = args.output_dir + "{}".format(args.model_name)
os.makedirs(args.output_dir, exist_ok=True)
# output dir
prefix = "_".join([args.model_name, args.task_name])
logger = TrainLogger(log_dir=args.output_dir, prefix=prefix)
# device
logger.info("initializing device")
args.device, args.n_gpu = prepare_device(args.gpu, args.local_rank)
seed_everything(args.seed)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
# data processor
logger.info("initializing data processor")
tokenizer = tokenizer_class.from_pretrained(
args.model_path, do_lower_case=args.do_lower_case
)
processor = ChnSentiProcessor(
data_dir=args.data_dir, tokenizer=tokenizer, prefix=prefix
)
label_list = processor.get_labels()
num_labels = len(label_list)
args.num_labels = num_labels
# model
logger.info("initializing model and config")
config = config_class.from_pretrained(
args.model_path,
num_labels=num_labels,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(args.model_path, config=config)
model.to(args.device)
# trainer
logger.info("initializing traniner")
trainer = TextClassifierTrainer(
logger=logger,
args=args,
collate_fn=processor.collate_fn,
input_keys=processor.get_input_keys(),
metrics=[Accuracy()],
)
# do train
if args.do_train:
train_dataset = processor.create_dataset(
args.train_max_seq_length, "train.tsv", "train"
)
eval_dataset = processor.create_dataset(
args.eval_max_seq_length, "dev.tsv", "dev"
)
trainer.train(model, train_dataset=train_dataset, eval_dataset=eval_dataset)
# do eval
if args.do_eval and args.local_rank in [-1, 0]:
results = {}
eval_dataset = processor.create_dataset(
args.eval_max_seq_length, "test.tsv", "test"
)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints or args.checkpoint_number > 0:
checkpoints = get_checkpoints(
args.output_dir, args.checkpoint_number, WEIGHTS_NAME
)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("/")[-1].split("-")[-1]
model = model_class.from_pretrained(checkpoint, config=config)
model.to(args.device)
trainer.evaluate(
model, eval_dataset, save_preds=True, prefix=str(global_step)
)
if global_step:
result = {
"{}_{}".format(global_step, k): v
for k, v in trainer.records["result"].items()
}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
dict_to_text(output_eval_file, results)
if __name__ == "__main__":
main() | /roformer-0.4.3.tar.gz/roformer-0.4.3/examples/task_text_classification_chnsenti.py | 0.728555 | 0.216705 | task_text_classification_chnsenti.py | pypi |

# Rofunc: The Full Process Python Package for Robot Learning from Demonstration and Robot Manipulation
[](https://pypi.org/project/rofunc/)


[](https://github.com/Skylark0924/Rofunc/issues?q=is%3Aissue+is%3Aclosed)
[](https://github.com/Skylark0924/Rofunc/issues?q=is%3Aopen+is%3Aissue)
[](https://rofunc.readthedocs.io/en/latest/?badge=latest)
[](https://actions-badge.atrox.dev/Skylark0924/Rofunc/goto?ref=main)
> **Repository address: https://github.com/Skylark0924/Rofunc**
Rofunc package focuses on the **Imitation Learning (IL), Reinforcement Learning (RL) and Learning from Demonstration (LfD)** for
**(Humanoid) Robot Manipulation**. It provides valuable and convenient python functions, including _demonstration collection, data
pre-processing, LfD algorithms, planning, and control methods_. We also provide an Isaac Gym-based robot simulator for
evaluation. This package aims to advance the field by building a full-process toolkit and validation platform that
simplifies and standardizes the process of demonstration data collection, processing, learning, and its deployment on
robots.

- [Rofunc: The Full Process Python Package for Robot Learning from Demonstration and Robot Manipulation](#rofunc-the-full-process-python-package-for-robot-learning-from-demonstration-and-robot-manipulation)
- [Installation](#installation)
- [Install from PyPI (stable version)](#install-from-pypi-stable-version)
- [Install from Source (nightly version, recommended)](#install-from-source-nightly-version-recommended)
- [Documentation](#documentation)
- [Star History](#star-history)
- [Citation](#citation)
- [Related Papers](#related-papers)
- [The Team](#the-team)
- [Acknowledge](#acknowledge)
- [Learning from Demonstration](#learning-from-demonstration)
- [Planning and Control](#planning-and-control)
## Installation
### Install from PyPI (stable version)
The installation is very easy,
```shell
pip install rofunc
# [Option] Install with baseline RL frameworks (SKRL, RLlib, Stable Baselines3) and Envs (gymnasium[all], mujoco_py)
pip install rofunc[baselines]
```
and as you'll find later, it's easy to use as well!
```python
import rofunc as rf
```
Thus, have fun in the robotics world!
> **Note**
> Several requirements need to be installed before using the package. Please refer to
> the [installation guide](https://rofunc.readthedocs.io/en/latest/installation.html) for more details.
### Install from Source (nightly version, recommended)
```shell
git clone https://github.com/Skylark0924/Rofunc.git
cd Rofunc
# Create a conda environment
# Python 3.8 is strongly recommended
conda create -n rofunc python=3.8
# For Linux user
sh ./scripts/install.sh
# [Option] Install with baseline RL frameworks (SKRL, RLlib, Stable Baselines3)
sh ./scripts/install_w_baselines.sh
# [Option] For MacOS user (brew is required, Isaac Gym based simulator is not supported on MacOS)
sh ./scripts/mac_install.sh
```
> **Note**
> If you want to use functions related to ZED camera, you need to
> install [ZED SDK](https://www.stereolabs.com/developers/release/#downloads) manually. (We have tried to package it as
> a `.whl` file to add it to `requirements.txt`, unfortunately, the ZED SDK is not very friendly and doesn't support
> direct installation.)
## Documentation
[](https://rofunc.readthedocs.io/en/latest/)
[](https://rofunc.readthedocs.io/en/latest/auto_examples/index.html)
To give you a quick overview of the pipeline of `rofunc`, we provide an interesting example of learning to play Taichi
from human demonstration. You can find it in the [Quick start](https://rofunc.readthedocs.io/en/latest/quickstart.html)
section of the documentation.
The available functions and plans can be found as follows.
> **Note**
> ✅: Achieved 🔃: Reformatting ⛔: TODO
| Data | | Learning | | P&C | | Tools | | Simulator | |
|:-------------------------------------------------------------------------------:|---|:--------------------------------------------------------------------------------------:|----|:-----------------------------------------------------------------------:|-----|:----------------:|----|:-------------------------------------------------------------------------:|----|
| [`xsens.record`](https://rofunc.readthedocs.io/en/latest/devices/xsens.html) | ✅ | `DMP` | ⛔ | [`LQT`](https://rofunc.readthedocs.io/en/latest/planning/lqt.html) | ✅ | `Config` | ✅ | [`Franka`](https://rofunc.readthedocs.io/en/latest/simulator/franka.html) | ✅ |
| [`xsens.export`](https://rofunc.readthedocs.io/en/latest/devices/xsens.html) | ✅ | `GMR` | ✅ | `LQTBi` | ✅ | `robolab.coord` | ✅ | [`CURI`](https://rofunc.readthedocs.io/en/latest/simulator/curi.html) | ✅ |
| [`xsens.visual`](https://rofunc.readthedocs.io/en/latest/devices/xsens.html) | ✅ | `TPGMM` | ✅ | [`LQTFb`](https://rofunc.readthedocs.io/en/latest/planning/lqt_fb.html) | ✅ | `robolab.fk` | ✅ | `CURIMini` | 🔃 |
| [`opti.record`](https://rofunc.readthedocs.io/en/latest/devices/optitrack.html) | ✅ | `TPGMMBi` | ✅ | [`LQTCP`](https://rofunc.readthedocs.io/en/latest/planning/lqt_cp.html) | ✅ | `robolab.ik` | ✅ | `CURISoftHand` | ✅ |
| [`opti.export`](https://rofunc.readthedocs.io/en/latest/devices/optitrack.html) | ✅ | `TPGMM_RPCtl` | ✅ | `LQTCPDMP` | ✅ | `robolab.fd` | ⛔ | `Walker` | ✅ |
| [`opti.visual`](https://rofunc.readthedocs.io/en/latest/devices/optitrack.html) | ✅ | `TPGMM_RPRepr` | ✅ | `LQR` | ✅ | `robolab.id` | ⛔ | `Gluon` | 🔃 |
| [`zed.record`](https://rofunc.readthedocs.io/en/latest/devices/zed.html) | ✅ | `TPGMR` | ✅ | `PoGLQRBi` | ✅ | `visualab.dist` | ✅ | `Baxter` | 🔃 |
| [`zed.export`](https://rofunc.readthedocs.io/en/latest/devices/zed.html) | ✅ | `TPGMRBi` | ✅ | [`iLQR`](https://rofunc.readthedocs.io/en/latest/planning/ilqr.html) | 🔃 | `visualab.ellip` | ✅ | `Sawyer` | 🔃 |
| [`zed.visual`](https://rofunc.readthedocs.io/en/latest/devices/zed.html) | ✅ | `TPHSMM` | ✅ | `iLQRBi` | 🔃 | `visualab.traj` | ✅ | `Multi-Robot` | ✅ |
| `emg.record` | ✅ | [`RLBaseLine(SKRL)`](https://rofunc.readthedocs.io/en/latest/lfd/RLBaseLine/SKRL.html) | ✅ | `iLQRFb` | 🔃 | | | | |
| `emg.export` | ✅ | `RLBaseLine(RLlib)` | ✅ | `iLQRCP` | 🔃 | | | | |
| `emg.visual` | ✅ | `RLBaseLine(ElegRL)` | ✅ | `iLQRDyna` | 🔃 | | | | |
| `mmodal.record` | ⛔ | `BCO(RofuncIL)` | 🔃 | `iLQRObs` | 🔃 | | | | |
| `mmodal.export` | ✅ | `BC-Z(RofuncIL)` | ⛔ | `MPC` | ⛔ | | | | |
| | | `STrans(RofuncIL)` | ⛔ | `RMP` | ⛔ | | | | |
| | | `RT-1(RofuncIL)` | ⛔ | | | | | | |
| | | [`A2C(RofuncRL)`](https://rofunc.readthedocs.io/en/latest/lfd/RofuncRL/A2C.html) | ✅ | | | | | | |
| | | [`PPO(RofuncRL)`](https://rofunc.readthedocs.io/en/latest/lfd/RofuncRL/PPO.html) | ✅ | | | | | | |
| | | [`SAC(RofuncRL)`](https://rofunc.readthedocs.io/en/latest/lfd/RofuncRL/SAC.html) | ✅ | | | | | | |
| | | [`TD3(RofuncRL)`](https://rofunc.readthedocs.io/en/latest/lfd/RofuncRL/TD3.html) | ✅ | | | | | | |
| | | `CQL(RofuncRL)` | ⛔ | | | | | | |
| | | `TD3BC(RofuncRL)` | ⛔ | | | | | | |
| | | `DTrans(RofuncRL)` | 🔃 | | | | | | |
| | | `EDAC(RofuncRL)` | ⛔ | | | | | | |
| | | [`AMP(RofuncRL)`](https://rofunc.readthedocs.io/en/latest/lfd/RofuncRL/AMP.html) | ✅ | | | | | | |
| | | [`ASE(RofuncRL)`](https://rofunc.readthedocs.io/en/latest/lfd/RofuncRL/ASE.html) | ✅ | | | | | | |
| | | `ODTrans(RofuncRL)` | ⛔ | | | | | | |
## Star History
[](https://star-history.com/#Skylark0924/Rofunc&Date)
## Citation
If you use rofunc in a scientific publication, we would appreciate citations to the following paper:
```
@misc{Rofunc2022,
author = {Liu, Junjia and Li, Chenzui and Delehelle, Donatien and Li, Zhihao and Chen, Fei},
title = {Rofunc: The full process python package for robot learning from demonstration and robot manipulation},
year = {2022},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/Skylark0924/Rofunc}},
}
```
## Related Papers
1. Robot cooking with stir-fry: Bimanual non-prehensile manipulation of semi-fluid objects ([IEEE RA-L 2022](https://arxiv.org/abs/2205.05960) | [Code](rofunc/learning/RofuncIL/structured_transformer/strans.py))
```
@article{liu2022robot,
title={Robot cooking with stir-fry: Bimanual non-prehensile manipulation of semi-fluid objects},
author={Liu, Junjia and Chen, Yiting and Dong, Zhipeng and Wang, Shixiong and Calinon, Sylvain and Li, Miao and Chen, Fei},
journal={IEEE Robotics and Automation Letters},
volume={7},
number={2},
pages={5159--5166},
year={2022},
publisher={IEEE}
}
```
2. SoftGPT: Learn Goal-oriented Soft Object Manipulation Skills by Generative Pre-trained Heterogeneous Graph Transformer ([IROS 2023](https://arxiv.org/abs/2306.12677) | Code coming soon)
3. Learning Robot Generalized Bimanual Coordination using Relative Parameterization Method on Human Demonstration (IEEE CDC 2023 | [Code](./rofunc/learning/ml/tpgmm.py))
## The Team
Rofunc is developed and maintained by the [CLOVER Lab (Collaborative and Versatile Robots Laboratory)](https://feichenlab.com/), CUHK.
## Acknowledge
We would like to acknowledge the following projects:
### Learning from Demonstration
1. [pbdlib](https://gitlab.idiap.ch/rli/pbdlib-python)
2. [Ray RLlib](https://docs.ray.io/en/latest/rllib/index.html)
3. [ElegantRL](https://github.com/AI4Finance-Foundation/ElegantRL)
4. [SKRL](https://github.com/Toni-SM/skrl)
### Planning and Control
1. [Robotics codes from scratch (RCFS)](https://gitlab.idiap.ch/rli/robotics-codes-from-scratch)
| /rofunc-0.0.2.3.tar.gz/rofunc-0.0.2.3/README.md | 0.751283 | 0.967163 | README.md | pypi |
# Measures of roughness for molecular property landscapes
This package implements the roughness index (ROGI) presented in
["Roughness of Molecular Property Landscapes and Its Impact on Modellability"](#), as well
as the [SARI](https://pubs.acs.org/doi/10.1021/jm0705713), [MODI](https://pubs.acs.org/doi/10.1021/ci400572x),
and [RMODI](https://pubs.acs.org/doi/10.1021/acs.jcim.8b00313) indices.
## Installation
``rogi`` can be installed with ``pip``:
```
pip install rogi
```
Note that ``rdkit`` is a dependency but needs to be installed separately with `conda`.
### Requirements
* `numpy`
* `scipy>=1.4`
* `fastcluster`
* `pandas`
* `scikit-learn>=1`
* `rdkit >= 2021` to be installed with `conda`
## Usage
Note that ``ROGI`` and ``SARI`` are classes, while ``MODI`` and ``RMODI`` are functions.
### ROGI
If SMILES are used as input, Morgan fingerprints (length 2048, radius 2) are computed and
a distance matrix calculated with the Tanimoto metric:
```
from rogi import RoughnessIndex
ri = RoughnessIndex(Y=Y, smiles=smiles)
ri.compute_index()
>>> 0.42
```
With precomputed fingerprints:
```
ri = RoughnessIndex(Y=Y, fps=fingerprints)
ri.compute_index()
```
With descriptors you can pass a 2D array or a ``pandas.DataFrame`` where each row is a different
molecule, and each column a different descriptor:
```
ri = RoughnessIndex(Y=Y, X=descriptors, metric='euclidean')
ri.compute_index()
```
You can also precompute a distance matrix using any chosen representation and metric:
```
ri = RoughnessIndex(Y=Y, X=descriptors, metric='precomputed')
ri.compute_index()
```
### SARI
You can provide SMILES as input, and compute the SARI score without considering a reference
set of datasets as follows:
```
from rogi import SARI
sari = SARI(pKi=pKi, smiles=smiles, fingerprints='maccs')
sari.compute_sari()
>>> 0.42
```
To standardize the raw continuous and discontinuous scores based on a reference set of datasets,
you can compute the raw scores first and then provide SARI with their average and standard deviation:
```
raw_conts = []
raw_discs = []
for smiles, pKi in zip(datasets, affinities):
sari = SARI(pKi=pKi, smiles=smiles, fingerprints='maccs')
raw_cont, raw_disc = sari.compute_raw_scores()
raw_conts.append(raw_cont)
raw_discs.append(raw_disc)
mean_raw_cont = np.mean(raw_conts)
std_raw_cont = np.std(raw_conts)
mean_raw_disc = np.mean(raw_discs)
std_raw_disc = np.std(raw_discs)
sari = SARI(pKi=my_pKi, smiles=my_smiles, fingerprints='maccs')
sari.compute_sari(mean_raw_cont=mean_raw_cont, std_raw_cont=std_raw_cont,
mean_raw_disc=mean_raw_disc, std_raw_disc=std_raw_disc)
>>> 0.42
```
You can also pass a precomputed similarity matrix:
```
sari = SARI(pKi=pKi, sim_matrix=precomputed_similarity_matrix)
```
### RMODI
``RMODI`` is a function and takes a distance matrix in square form,
and a list of float, as input.
```
from rogi import RMODI
RMODI(Dx=square_dist_matrix, Y=Y)
>>> 0.42
```
The ``delta`` values used by default is ``0.625``, but can be changed with the ``delta`` argument:
```
from rogi import RMODI
RMODI(Dx=square_dist_matrix, Y=Y, delta=0.5)
>>> 0.21
```
### MODI
``MODI`` is a function and takes a distance matrix in square form,
and a list of binary labels (`0` and `1`), as input.
```
from rogi import MODI
MODI(Dx=square_dist_matrix, Y=Y)
>>> 0.42
```
## Citation
If you make use of the ``rogi`` package in scientific publications, please cite the following article:
```
@misc{rogi,
title={Roughness of molecular property landscapes and its impact on modellability},
author={Matteo Aldeghi and David E. Graff and Nathan Frey and Joseph A. Morrone and
Edward O. Pyzer-Knapp and Kirk E. Jordan and Connor W. Coley},
year={2022},
eprint={2207.09250},
archivePrefix={arXiv},
primaryClass={q-bio.QM}
}
```
If you use ``SARI``, please also cite:
```
@article{sari,
title={SAR Index: Quantifying the Nature of Structure−Activity Relationships},
author={Peltason, Lisa and Bajorath, J\"urgen},
journal={J. Med. Chem.},
publisher={American Chemical Society},
volume={50},
number={23},
pages={5571--5578},
year={2007}
}
```
If you use ``MODI``, please also cite:
```
@article{modi,
title={Data Set Modelability by QSAR},
author={"Golbraikh, Alexander and Muratov, Eugene and Fourches, Denis and
Tropsha, Alexander"}
journal={J. Chem. Inf. Model.},
publisher={American Chemical Society},
volume={54},
number={1},
pages={1--4},
year={2014}
}
```
If you use ``RMODI``, please also cite:
```
@article{rmodi,
title={Regression Modelability Index: A New Index for Prediction of the
Modelability of Data Sets in the Development of QSAR
Regression Models},
author={Luque Ruiz, Irene and G\'omez-Nieto, Miguel \'Angel},
journal={J. Chem. Inf. Model.},
publisher={American Chemical Society},
volume={58},
number={10},
pages={2069--2084},
year={2018}
}
```
| /rogi-0.1.tar.gz/rogi-0.1/README.md | 0.724675 | 0.979609 | README.md | pypi |
from abc import ABC, abstractmethod
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, TypeVar
from pandas import DataFrame
from rogii_solo.calculations.converters import convert_value, radians_to_degrees
from rogii_solo.calculations.enums import EMeasureUnits
from rogii_solo.papi.client import PapiClient
from rogii_solo.types import DataList
class Convertible:
@staticmethod
def convert_xy(value: float, measure_units: EMeasureUnits, force_to_meters: bool = False) -> Optional[float]:
if value is not None:
return convert_value(value, measure_units=measure_units, force_to_meters=force_to_meters)
@staticmethod
def convert_z(value: float, measure_units: EMeasureUnits) -> Optional[float]:
if value is not None:
return convert_value(value=value, measure_units=measure_units)
@staticmethod
def convert_angle(value: float) -> Optional[float]:
if value is not None:
return radians_to_degrees(value)
class BaseObject(ABC, Convertible):
"""
Base data object
"""
@abstractmethod
def to_dict(self, *args, **kwargs) -> Dict[str, Any]:
"""
Convert object to dict
:return
"""
pass
@abstractmethod
def to_df(self, *args, **kwargs) -> DataFrame:
"""
Convert object to DataFrame
:return
"""
pass
def _find_by_path(self,
obj: Dict or Iterable[Dict],
path: str or Iterable[str],
default: Any = None,
divider: str = None,
check_none: bool = False,
to_list: bool = False,
) -> Any:
"""
Find nested key value in dict
:param obj:
:param path:
:param default:
:param divider:
:param check_none:
:param to_list:
:return:
"""
if not obj:
return None if not to_list else []
if not isinstance(obj, (List, Tuple, Set)):
obj = [obj]
if not isinstance(path, (List, Tuple, Set)):
path = [path]
result = [] if to_list else None
for o in obj:
for p in path:
res = self.__find_by_path(
obj=o,
path=p,
default=default,
divider=divider,
check_none=check_none,
to_list=to_list,
)
if to_list:
result.extend(res)
elif not to_list and res:
result = res
break
return result
def __find_by_path(self,
obj: Dict,
path: str,
default: Any = None,
divider: str = None,
check_none: bool = False,
to_list: bool = False,
) -> Any:
if not obj:
return None if not to_list else []
for p in path.split(divider or "."):
if p not in obj or not obj[p]:
return default if not to_list else []
obj = obj[p]
obj = obj if not check_none else default if obj is None else obj
if not to_list:
return obj
return obj if isinstance(obj, list) else [obj] if obj else []
class ComplexObject(BaseObject):
"""
Object with access to PAPI
"""
def __init__(self, papi_client: PapiClient):
super().__init__()
self._papi_client = papi_client
def to_dict(self, *args, **kwargs) -> Dict[str, Any]:
return {}
def to_df(self, *args, **kwargs) -> DataFrame:
return DataFrame([self.to_dict(*args, **kwargs)])
T = TypeVar('T', bound=BaseObject)
class ObjectRepository(list[T]):
"""
List of objects with utility methods
"""
def __init__(self, objects: List[T] = None):
if objects is None:
objects = []
super().__init__(objects)
def to_dict(self, get_converted: bool = True) -> DataList:
"""
Return list of dicts
:return:
"""
return [object_.to_dict(get_converted) for object_ in self]
def to_df(self, get_converted: bool = True) -> DataFrame:
"""
Convert list to Pandas DataFrame
:return:
"""
return DataFrame(self.to_dict(get_converted))
def find_by_id(self, value) -> Optional[T]:
"""
Find object by ID
:param value:
:return:
"""
return self._find_by_attr(attr='uuid', value=value)
def find_by_name(self, value) -> Optional[T]:
"""
Find object by name
:param value:
:return:
"""
return self._find_by_attr(attr='name', value=value)
def _find_by_attr(self, attr: str, value) -> Optional[T]:
return next((item for item in self if getattr(item, attr, None) == value), None) | /rogii-solo-0.4.0.tar.gz/rogii-solo-0.4.0/src/rogii_solo/base.py | 0.888318 | 0.370823 | base.py | pypi |
from typing import Any, Dict
from pandas import DataFrame
import rogii_solo.interpretation
from rogii_solo.base import BaseObject
from rogii_solo.calculations.interpretation import get_segments, get_segments_boundaries, interpolate_horizon
from rogii_solo.calculations.trajectory import calculate_trajectory
from rogii_solo.types import Horizon as HorizonType
class Horizon(BaseObject):
def __init__(self, interpretation: 'rogii_solo.interpretation.Interpretation', **kwargs):
self.interpretation = interpretation
self.uuid = None
self.name = None
self.__dict__.update(kwargs)
def to_dict(self, get_converted: bool = True) -> Dict[str, Any]:
return self._get_data(get_converted)
def to_df(self, get_converted: bool = True) -> HorizonType:
data = self._get_data(get_converted)
return {
'meta': DataFrame([data['meta']]),
'points': DataFrame(data['points']),
}
def _get_data(self, get_converted: bool):
meta = {
'uuid': self.uuid,
'name': self.name,
}
points = self._calculate_points(get_converted)
return {
'meta': meta,
'points': points,
}
def _calculate_points(self, get_converted: bool):
well_data = self.interpretation.well.to_dict(get_converted=False)
trajectory_data = self.interpretation.well.trajectory.to_dict(get_converted=False)
assembled_segments_data = self.interpretation.assembled_segments
measure_units = self.interpretation.well.project.measure_unit
calculated_trajectory = calculate_trajectory(
well=well_data,
raw_trajectory=trajectory_data,
measure_units=measure_units
)
segments = get_segments(
well=well_data,
assembled_segments=assembled_segments_data['segments'],
calculated_trajectory=calculated_trajectory,
measure_units=measure_units
)
segments_boundaries = get_segments_boundaries(
assembled_segments=segments,
calculated_trajectory=calculated_trajectory
)
interpolated_horizon = interpolate_horizon(
segments_boundaries=segments_boundaries,
horizon_uuid=self.uuid,
horizon_tvd=assembled_segments_data['horizons'][self.uuid]['tvd']
)
if get_converted:
return [
{
'md': self.convert_z(point['md'], measure_units=measure_units),
'tvd': self.convert_z(point['tvd'], measure_units=measure_units),
}
for point in interpolated_horizon
]
return interpolated_horizon | /rogii-solo-0.4.0.tar.gz/rogii-solo-0.4.0/src/rogii_solo/horizon.py | 0.916447 | 0.204322 | horizon.py | pypi |
from math import fabs
from typing import Any, Dict, List
from rogii_solo.calculations.base import (
calc_segment_dip,
find_by_md,
find_last_by_md,
get_most_common,
get_nearest_values,
interpolate_linear
)
from rogii_solo.calculations.constants import DELTA
from rogii_solo.calculations.enums import EMeasureUnits
from rogii_solo.calculations.trajectory import calculate_trajectory, interpolate_trajectory_point
from rogii_solo.calculations.types import (
AssembledHorizons,
Segment,
SegmentBoundaries,
SegmentsBoundaries,
SegmentWithDip,
Trajectory
)
from rogii_solo.papi.types import PapiAssembledSegments
def get_segments(well: Dict[str, Any],
calculated_trajectory: Trajectory,
assembled_segments: PapiAssembledSegments,
measure_units: EMeasureUnits
) -> List[Segment]:
segments = []
mds, mds_map = [], {}
for i, point in enumerate(calculated_trajectory):
mds.append(point['md'])
mds_map[point['md']] = i
for assembled_segment in assembled_segments:
nearest_mds = get_nearest_values(
value=assembled_segment['md'],
input_list=mds
)
if len(nearest_mds) < 2:
# Interpretation start MD = calculated trajectory start MD
# Otherwise (MD approximately equal or equal the last trajectory point MD) two points are found
interpolated_point = calculated_trajectory[0]
else:
left_point_md, right_point_md = nearest_mds
left_point = calculated_trajectory[mds_map[left_point_md]]
right_point = calculated_trajectory[mds_map[right_point_md]]
interpolated_point = interpolate_trajectory_point(
left_point=left_point,
right_point=right_point,
md=assembled_segment['md'],
well=well,
measure_units=measure_units,
)
segments.append(Segment(
md=assembled_segment['md'],
vs=interpolated_point['vs'],
start=assembled_segment['start'],
end=assembled_segment['end'],
x=interpolated_point['x'],
y=interpolated_point['y'],
horizon_shifts=assembled_segment['horizon_shifts'],
boundary_type=assembled_segment['boundary_type'],
))
last_trajectory_point = calculated_trajectory[-1]
segments.append(Segment(
md=last_trajectory_point['md'],
vs=last_trajectory_point['vs'],
start=None,
end=None,
x=last_trajectory_point['x'],
y=last_trajectory_point['y'],
horizon_shifts=segments[-1]['horizon_shifts'],
boundary_type=segments[-1]['boundary_type'],
))
return segments
def get_segments_with_dip(segments: List[Segment], assembled_horizons: AssembledHorizons) -> List[SegmentWithDip]:
segments_with_dip = [
SegmentWithDip(**segment, dip=None) for segment in segments
]
for i in range(len(segments) - 1):
left_point = segments[i]
right_point = segments[i + 1]
if len(assembled_horizons):
segment_dips = []
sorted_assembled_horizons = {
uuid: assembled_horizon
for uuid, assembled_horizon in sorted(
assembled_horizons.items(),
key=lambda horizon_: horizon_[1]['tvd']
)
}
for horizon_uuid, horizon in sorted_assembled_horizons.items():
if fabs(right_point['md'] - left_point['md']) < DELTA:
segment_dip = None
segment_dips.append(segment_dip)
continue
horizon_shifts = left_point['horizon_shifts'][horizon_uuid]
shift_start = horizon_shifts['start']
shift_end = horizon_shifts['end']
segment_dip = calc_segment_dip(
delta_x=fabs(right_point['vs'] - left_point['vs']),
delta_y=shift_end - shift_start
)
segment_dips.append(segment_dip)
result_dip = get_most_common(segment_dips)
else:
shift_start = left_point['start']
shift_end = left_point['end']
result_dip = calc_segment_dip(
delta_x=fabs(right_point['vs'] - left_point['vs']),
delta_y=shift_end - shift_start
)
segments_with_dip[i]['dip'] = result_dip
if len(segments) > 0:
segments_with_dip[-1]['dip'] = (
90 if len(segments) == 1
else segments_with_dip[-2]['dip']
)
return segments_with_dip
def get_segments_boundaries(assembled_segments: List[Segment], calculated_trajectory) -> SegmentsBoundaries:
segments_boundaries = []
segments_mds = [segment['md'] for segment in assembled_segments]
calculated_trajectory_last_point_md = calculated_trajectory[-1]['md']
for calculated_trajectory_point in calculated_trajectory:
calculated_point_md = calculated_trajectory_point['md']
nearest_segments_mds = get_nearest_values(value=calculated_point_md, input_list=segments_mds)
if len(nearest_segments_mds) < 2:
segment_md = nearest_segments_mds[0]
if calculated_point_md < segment_md:
segments_boundaries.append(
SegmentBoundaries(
md=calculated_point_md,
left_point=None,
right_point=None,
interpolated_point=calculated_trajectory_point
)
)
continue
else:
left_point_md = segment_md
right_point_md = calculated_trajectory_last_point_md
else:
left_point_md, right_point_md = nearest_segments_mds
left_point = find_last_by_md(left_point_md, assembled_segments)
right_point = find_by_md(right_point_md, assembled_segments)
segments_boundaries.append(
SegmentBoundaries(
md=calculated_point_md,
left_point=left_point,
right_point=right_point,
interpolated_point=calculated_trajectory_point
)
)
return segments_boundaries
def get_last_segment_dip(well: Any, assembled_segments: Any, measure_units: EMeasureUnits):
well_data = well.to_dict(get_converted=False)
calculated_trajectory = calculate_trajectory(
raw_trajectory=well.trajectory.to_dict(get_converted=False),
well=well_data,
measure_units=measure_units
)
segments = get_segments(
well=well_data,
assembled_segments=assembled_segments['segments'],
calculated_trajectory=calculated_trajectory,
measure_units=measure_units
)
segments_with_dip = get_segments_with_dip(
segments=segments,
assembled_horizons=assembled_segments['horizons']
)
return segments_with_dip[-1]['dip']
def interpolate_horizon(segments_boundaries: SegmentsBoundaries, horizon_uuid: str, horizon_tvd: float):
points = []
for segment_boundaries in segments_boundaries:
md = segment_boundaries['md']
left_point = segment_boundaries['left_point']
right_point = segment_boundaries['right_point']
interpolated_point = segment_boundaries['interpolated_point']
if left_point is None:
points.append({
'md': md,
'tvd': None
})
continue
horizon_shift_start = left_point['horizon_shifts'][horizon_uuid]['start']
horizon_shift_end = left_point['horizon_shifts'][horizon_uuid]['end']
left_point_tvd = horizon_tvd + horizon_shift_start
right_point_tvd = horizon_tvd + horizon_shift_end
tvd = interpolate_linear(
x0=left_point['vs'],
y0=left_point_tvd,
x1=right_point['vs'],
y1=right_point_tvd,
x=interpolated_point['vs']
)
points.append({
'md': md,
'tvd': tvd
})
return points | /rogii-solo-0.4.0.tar.gz/rogii-solo-0.4.0/src/rogii_solo/calculations/interpretation.py | 0.814754 | 0.290012 | interpretation.py | pypi |
import math
from bisect import bisect_left
from collections import Counter
from typing import Any, Callable, Dict, List, Optional, TypeVar
from rogii_solo.calculations.constants import DELTA
T = TypeVar('T')
V = TypeVar('V')
def calc_hypotenuse_length(cathetus1: float, cathetus2: float) -> Optional[float]:
if cathetus1 is None or cathetus2 is None:
return
return math.sqrt(
math.pow(cathetus1, 2) +
math.pow(cathetus2, 2)
)
def calc_atan2(y: float, x: float) -> Optional[float]:
if x is None or y is None:
return
return math.atan2(y, x) or 0
def calc_vs(angle: float, distance: float, direction: float) -> Optional[float]:
if any(arg is None for arg in (angle, distance, direction)):
return
return distance * math.cos(angle - direction)
def calc_shape_factor(dog_leg: float) -> Optional[float]:
if dog_leg is None:
return
if (
math.fabs(dog_leg) > DELTA and
math.fabs(dog_leg - math.pi) > DELTA
):
return 2.0 * math.tan(0.5 * dog_leg) / dog_leg
return 1.0
def normalize_angle(angle: float) -> float:
if not angle:
return 0.0
modified_angle = angle
while modified_angle < 0:
modified_angle += 2 * math.pi
while modified_angle > 2 * math.pi:
modified_angle -= 2 * math.pi
return modified_angle
def get_nearest_values(value: Any, input_list: List[Any], key: Optional[Callable[[T], V]] = None) -> Any:
if not input_list:
return
pos = bisect_left(input_list, value, key=key)
if pos == 0:
values = [input_list[0]]
elif pos == len(input_list):
values = [input_list[-1]]
else:
values = [
input_list[pos - 1],
input_list[pos]
]
return values
def interpolate_linear(x0: float, y0: float, x1: float, y1: float, x: float) -> Optional[float]:
if any(arg is None for arg in (x0, y0, x1, y1, x)):
return
if x0 == x1:
return y0
return y0 + (y1 - y0) * (x - x0) / (x1 - x0)
def calc_segment_dip(delta_x: float, delta_y: float) -> Optional[float]:
if (
delta_x is None or
delta_y is None or
delta_x < DELTA
):
return
angle = math.atan2(delta_y, delta_x)
return 90 - math.degrees(angle)
def get_most_common(input_list: List[Any]) -> Any:
if not input_list:
return
return Counter(input_list).most_common()[0][0]
def find_by_key(key: str, value: float, input_list: List[Dict[str, Any]]) -> Dict[str, Any]:
return next((item for item in input_list if item[key] == value), {})
def find_last_by_key(key: str, value: float, input_list: List[Dict[str, Any]]) -> Dict[str, Any]:
filtered_list = [item for item in input_list if item[key] == value]
return filtered_list[-1] if len(filtered_list) > 0 else {}
def find_by_md(value: float, input_list: List[Dict[str, Any]]) -> Dict[str, Any]:
return find_by_key('md', value, input_list)
def find_last_by_md(value: float, input_list: List[Dict[str, Any]]) -> Dict[str, Any]:
return find_last_by_key('md', value, input_list)
def calc_segment_vs_length(x1: float, y1: float, x2: float, y2: float, azimuth_vs: float) -> float:
"""
Azimuth in radians
Segment start point (x1, y1)
Segment end point (x2, y2)
"""
vs_line_param = math.sin(azimuth_vs) * (x2 - x1) + math.cos(azimuth_vs) * (y2 - y1)
x = vs_line_param * math.sin(azimuth_vs)
y = vs_line_param * math.cos(azimuth_vs)
return math.sqrt(x ** 2 + y ** 2) | /rogii-solo-0.4.0.tar.gz/rogii-solo-0.4.0/src/rogii_solo/calculations/base.py | 0.874386 | 0.62661 | base.py | pypi |
import copy
from math import acos, cos, degrees, fabs, pi, sin
from typing import Any, Dict, Optional
from rogii_solo.calculations.base import calc_atan2, calc_hypotenuse_length, calc_shape_factor
from rogii_solo.calculations.base import calc_vs as base_calc_vs
from rogii_solo.calculations.base import normalize_angle
from rogii_solo.calculations.constants import DELTA, FEET_TO_METERS
from rogii_solo.calculations.enums import EMeasureUnits
from rogii_solo.calculations.types import RawTrajectory, Trajectory, TrajectoryPoint
def calculate_trajectory(
raw_trajectory: RawTrajectory,
well: Dict[str, Any],
measure_units: EMeasureUnits,
) -> Trajectory:
if not raw_trajectory or not well:
return []
calculated_trajectory = []
prev_point = None
for point in raw_trajectory:
calculated_point = calculate_trajectory_point(
prev_point=prev_point,
curr_point=prepare_trajectory_point(point, well['convergence']),
well=well,
measure_units=measure_units,
)
calculated_trajectory.append(calculated_point)
prev_point = calculated_point
return calculated_trajectory
def calculate_trajectory_point(
prev_point: Dict[str, Any],
curr_point: Dict[str, Any],
well: Dict[str, Any],
measure_units: EMeasureUnits,
) -> TrajectoryPoint:
if not prev_point:
return calculate_initial_trajectory_point(curr_point, well)
course_length = curr_point['md'] - prev_point['md']
if fabs(course_length) < DELTA:
return prev_point
prev_incl_sin, curr_incl_sin = sin(prev_point['incl']), sin(curr_point['incl'])
prev_incl_cos, curr_incl_cos = cos(prev_point['incl']), cos(curr_point['incl'])
curr_azim = normalize_angle(curr_point['azim'])
dog_leg = acos(
cos(prev_point['incl'] - curr_point['incl'])
- curr_incl_sin * prev_incl_sin
* (1.0 - cos(curr_azim - prev_point['azim']))
)
dls = calc_dls(dog_leg, course_length, measure_units=measure_units)
shape = calc_shape(dog_leg, course_length)
tvd = prev_point['tvd'] + shape * (curr_incl_cos + prev_incl_cos)
ns = (prev_point['ns'] or 0) + shape * (prev_incl_sin * cos(prev_point['azim']) + curr_incl_sin * cos(curr_azim))
ew = (prev_point['ew'] or 0) + shape * (prev_incl_sin * sin(prev_point['azim']) + curr_incl_sin * sin(curr_azim))
return TrajectoryPoint(
md=curr_point['md'],
incl=curr_point['incl'],
azim=curr_azim,
tvd=tvd,
tvdss=calc_tvdss(well['kb'], tvd),
ns=ns,
ew=ew,
x=calc_x(ew, well['xsrf']),
y=calc_y(ns, well['ysrf']),
vs=calc_vs(ns, ew, well['azimuth']),
dls=dls,
dog_leg=dog_leg
)
def interpolate_trajectory_point(
left_point: Dict[str, Any],
right_point: Dict[str, Any],
md: float,
well: Dict[str, Any],
measure_units: EMeasureUnits,
) -> TrajectoryPoint:
if fabs(md - left_point['md']) < DELTA:
return left_point
if fabs(md - right_point['md']) < DELTA:
return right_point
point_course_length = right_point['md'] - left_point['md']
course_length = md - left_point['md']
dog_leg = (course_length / point_course_length) * right_point['dog_leg']
shape = calc_shape(dog_leg, course_length)
left_incl_sin = sin(left_point['incl'])
left_incl_cos = cos(left_point['incl'])
left_azim_sin = sin(left_point['azim'])
left_azim_cos = cos(left_point['azim'])
dog_leg_sin = sin(dog_leg)
right_diff_dog_leg_sin = sin(right_point['dog_leg'] - dog_leg)
right_dog_leg_sin = sin(right_point['dog_leg'])
left_dog_legged_sin = (
left_incl_sin / right_dog_leg_sin
if right_dog_leg_sin < -DELTA or right_dog_leg_sin > DELTA
else 1.0
)
left_dog_legged_cos = (
left_incl_cos / right_dog_leg_sin
if right_dog_leg_sin < -DELTA or right_dog_leg_sin > DELTA
else 1.0
)
right_dog_legged_sin = (
sin(right_point['incl']) / right_dog_leg_sin
if right_dog_leg_sin < -DELTA or right_dog_leg_sin > DELTA
else 1.0
)
right_dog_legged_cos = (
cos(right_point['incl']) / right_dog_leg_sin
if right_dog_leg_sin < -DELTA or right_dog_leg_sin > DELTA
else 1.0
)
ext_delta_tvd = (
right_diff_dog_leg_sin * left_dog_legged_cos + dog_leg_sin * right_dog_legged_cos
if right_dog_leg_sin < -DELTA or right_dog_leg_sin > DELTA
else left_incl_cos
)
delta_tvd = shape * (ext_delta_tvd + left_incl_cos)
ext_delta_ns = (
right_diff_dog_leg_sin * left_dog_legged_sin * left_azim_cos
+ dog_leg_sin * right_dog_legged_sin * cos(right_point['azim'])
if right_dog_leg_sin < -DELTA or right_dog_leg_sin > DELTA
else left_incl_sin * left_azim_cos
)
ext_delta_ew = (
right_diff_dog_leg_sin * left_dog_legged_sin * left_azim_sin
+ dog_leg_sin * right_dog_legged_sin * sin(right_point['azim'])
if right_point['dog_leg'] < -DELTA or right_point['dog_leg'] > DELTA
else left_incl_sin * left_azim_sin
)
tvd = left_point['tvd'] + delta_tvd
ns = left_point['ns'] + shape * (ext_delta_ns + left_incl_sin * left_azim_cos)
ew = left_point['ew'] + shape * (ext_delta_ew + left_incl_sin * left_azim_sin)
x = calc_x(ew, well['xsrf'])
y = calc_y(ns, well['ysrf'])
vs = calc_vs(ns, ew, well['azimuth'])
incl = calc_atan2(calc_hypotenuse_length(ext_delta_ns, ext_delta_ew), ext_delta_tvd)
if incl < 0:
incl += pi
azim = normalize_angle(calc_atan2(ext_delta_ew, ext_delta_ns))
dls = calc_dls(dog_leg, course_length, measure_units=measure_units)
return TrajectoryPoint(
md=md,
incl=incl,
azim=azim,
tvd=tvd,
ns=ns,
ew=ew,
x=x,
y=y,
tvdss=calc_tvdss(kb=well['kb'], tvd=tvd),
vs=vs,
dls=dls,
dog_leg=dog_leg
)
def calculate_initial_trajectory_point(
point: Dict[str, Any],
well: Dict[str, Any],
) -> TrajectoryPoint:
tvd = well['tie_in_tvd'] if well['tie_in_tvd'] is not None else point['md']
return TrajectoryPoint(
md=point['md'],
incl=point['incl'],
azim=normalize_angle(point['azim']),
tvd=tvd,
tvdss=calc_tvdss(well['kb'], tvd),
ns=well['tie_in_ns'],
ew=well['tie_in_ew'],
x=calc_x(well['tie_in_ew'], well['xsrf']),
y=calc_y(well['tie_in_ns'], well['ysrf']),
vs=calc_vs(well['tie_in_ns'], well['tie_in_ew'], well['azimuth']),
dls=0,
dog_leg=0
)
def calc_x(ew: float, xsrf: Optional[float]) -> Optional[float]:
if xsrf is not None:
return (ew or 0) + xsrf
def calc_y(ns: float, ysrf: Optional[float]) -> Optional[float]:
if ysrf is not None:
return (ns or 0) + ysrf
def calc_vs(ns: float, ew: float, azimuth: float) -> float:
closure_distance = calc_hypotenuse_length(ns, ew)
closure_direction = calc_atan2(ew, ns)
return base_calc_vs(azimuth, closure_distance, closure_direction)
def calc_tvdss(kb: Optional[float], tvd: float) -> Optional[float]:
if kb is None:
return
return kb - tvd
DLS_RADIANS_MAP = {
EMeasureUnits.METER: 30,
EMeasureUnits.FOOT: 100 * FEET_TO_METERS,
EMeasureUnits.METER_FOOT: 100 * FEET_TO_METERS
}
def get_dls_unit_coefficient(measure_units: EMeasureUnits) -> float:
return DLS_RADIANS_MAP[measure_units]
def calc_dls(dog_leg: float, md_delta: float, measure_units: EMeasureUnits) -> float:
return degrees(dog_leg) * (get_dls_unit_coefficient(measure_units) / md_delta)
def calc_shape(dog_leg: float, course_length: float) -> float:
return 0.5 * calc_shape_factor(dog_leg) * course_length
def prepare_trajectory_point(point: Dict[str, Any], convergence: float):
prepared_point = copy.deepcopy(point)
prepared_point['azim'] = prepared_point['azim'] - convergence
return prepared_point | /rogii-solo-0.4.0.tar.gz/rogii-solo-0.4.0/src/rogii_solo/calculations/trajectory.py | 0.773815 | 0.302977 | trajectory.py | pypi |
from os import environ
from typing import Any, Dict, List, Optional, Tuple
from numpy import arange, ndarray
from rogii_solo import SoloClient
from rogii_solo.calculations.base import calc_hypotenuse_length, get_nearest_values
from rogii_solo.calculations.converters import feet_to_meters, radians_to_degrees
from rogii_solo.calculations.enums import EMeasureUnits
from rogii_solo.calculations.interpretation import get_segments
from rogii_solo.calculations.trajectory import calculate_trajectory, interpolate_trajectory_point
from rogii_solo.calculations.types import Trajectory, TrajectoryPoint
from rogii_solo.horizon import Horizon
from rogii_solo.interpretation import Interpretation
from rogii_solo.well import Well
STEP_LENGTH = 1
MAX_STEP_NUMBER = 50
# Inclination for start MD range, degrees
LANDING_INCLINATION = 75
DELTA = 0.000001
def restrict_trajectory(trajectory: Trajectory, start_md: float) -> Trajectory:
index = next((index for index, item in enumerate(trajectory) if item['md'] > start_md), None)
return trajectory[index:]
def get_segment_range(start_md: float, end_md: float, default_segment_step: float) -> ndarray:
segment_length = end_md - start_md
step_number = min(segment_length // default_segment_step, MAX_STEP_NUMBER)
md_step = segment_length / step_number
# Function doesn't return the start_md as a part of the array.
# end_md must be in the returned range, so use: end_md + md_step
return arange(start_md + md_step, end_md + md_step, md_step, dtype=float)
def interpolate_trajectory(well_data: Dict[str, Any],
trajectory: Trajectory,
measure_units: EMeasureUnits
) -> Trajectory:
interpolated_trajectory = [trajectory[0]]
step = feet_to_meters(STEP_LENGTH) if measure_units != EMeasureUnits.METER else STEP_LENGTH
for i in range(len(trajectory) - 1):
segment_range = get_segment_range(trajectory[i]['md'], trajectory[i + 1]['md'], step)
for md in segment_range:
interpolated_point = interpolate_trajectory_point(
left_point=trajectory[i],
right_point=trajectory[i + 1],
md=md,
well=well_data,
measure_units=EMeasureUnits.METER
)
interpolated_trajectory.append(interpolated_point)
return interpolated_trajectory
def insert_points_in_trajectory(well_data: Dict[str, Any],
trajectory: Trajectory,
point_mds: List[float],
measure_units: EMeasureUnits
):
for point_md in point_mds:
mds, mds_map = [], {}
for i, point in enumerate(trajectory):
mds.append(point['md'])
mds_map[point['md']] = i
nearest_mds = get_nearest_values(
value=point_md,
input_list=mds
)
left_point_md, right_point_md = nearest_mds
left_point = trajectory[mds_map[left_point_md]]
right_point = trajectory[mds_map[right_point_md]]
interpolated_point = interpolate_trajectory_point(
left_point=left_point,
right_point=right_point,
md=point_md,
well=well_data,
measure_units=measure_units,
)
trajectory.insert(mds_map[right_point_md], interpolated_point)
def calculate_segment_vs_tvds(segments: List[Dict[str, Any]],
assembled_segments_data: Dict[str, Any]
) -> List[Dict[str, Any]]:
for i, segment in enumerate(segments):
if i < len(segments) - 1:
segment['end_md'] = segments[i + 1]['md']
for horizon_shift in segment['horizon_shifts'].values():
horizon_tvd = assembled_segments_data['horizons'][horizon_shift['uuid']]['tvd']
horizon_shift['start_vs'] = segment['vs']
horizon_shift['start_tvd'] = horizon_tvd + horizon_shift['start']
horizon_shift['end_vs'] = segments[i + 1]['vs']
horizon_shift['end_tvd'] = horizon_tvd + horizon_shift['end']
# Remove pseudo-segment with last trajectory point
del segments[-1]
return segments
def get_horizons_and_landing_md(well: Well,
calculated_trajectory: Any,
top_horizon_name: str,
base_horizon_name: str,
landing_point_topset_name: str,
landing_point_top_name: str
) -> Tuple[Interpretation, Horizon, Horizon, float]:
interpretation = well.starred_interpretation
if not interpretation:
raise Exception(f'Starred interpretation in the well "{well.name}" not found.')
top_horizon = (
interpretation.horizons.find_by_name(top_horizon_name) if top_horizon_name
else interpretation.starred_horizon_top.name
)
if not top_horizon:
raise Exception(
f'Top of zone in the interpretation "{interpretation.name}" in the well "{well.name}" not found.'
)
base_horizon = (
interpretation.horizons.find_by_name(base_horizon_name) if base_horizon_name
else interpretation.starred_horizon_bottom.name
)
if not base_horizon:
raise Exception(
f'Bottom of zone in the interpretation "{interpretation.name}" in the well "{well.name}" not found.'
)
landing_md: float = 0.0
if landing_point_topset_name and landing_point_top_name:
topset = well.topsets.find_by_name(landing_point_topset_name)
top = topset.tops.find_by_name(landing_point_top_name)
landing_md = top.md
else:
for point in calculated_trajectory:
if radians_to_degrees(point['incl']) >= LANDING_INCLINATION:
landing_md = point['md']
break
if not landing_md:
raise Exception(f'Landing point not found for the well "{well.name}".')
assembled_segments = interpretation.get_assembled_segments_data()['segments']
interpretation_start_md = assembled_segments[0]['md']
landing_md = max(landing_md, interpretation_start_md)
return interpretation, top_horizon, base_horizon, landing_md
def in_polygon(point_x: float,
point_y: float,
top_horizon_start_x: float,
top_horizon_start_y: float,
top_horizon_end_x: float,
top_horizon_end_y: float,
base_horizon_start_x: float,
base_horizon_start_y: float,
base_horizon_end_x: float,
base_horizon_end_y: float,
) -> bool:
xp = []
yp = []
xp.append(top_horizon_start_x)
xp.append(top_horizon_end_x)
xp.append(base_horizon_end_x)
xp.append(base_horizon_start_x)
yp.append(top_horizon_start_y)
yp.append(top_horizon_end_y)
yp.append(base_horizon_end_y)
yp.append(base_horizon_start_y)
result = 0
for i in range(len(xp)):
if (
((yp[i] <= point_y < yp[i - 1]) or (yp[i - 1] <= point_y < yp[i])) and
(point_x > (xp[i - 1] - xp[i]) * (point_y - yp[i]) / (yp[i - 1] - yp[i]) + xp[i])
):
result = 1 - result
return bool(result)
def is_point_inside_horizons_shifts(point: TrajectoryPoint,
top_horizon_shift: Dict[str, Any],
base_horizon_shift: Dict[str, Any]
) -> bool:
return in_polygon(
point_x=point['vs'],
point_y=point['tvd'],
top_horizon_start_x=top_horizon_shift['start_vs'],
top_horizon_start_y=top_horizon_shift['start_tvd'],
top_horizon_end_x=top_horizon_shift['end_vs'],
top_horizon_end_y=top_horizon_shift['end_tvd'],
base_horizon_start_x=base_horizon_shift['start_vs'],
base_horizon_start_y=base_horizon_shift['start_tvd'],
base_horizon_end_x=base_horizon_shift['end_vs'],
base_horizon_end_y=base_horizon_shift['end_tvd'],
)
def calculate_lines_intersection(x1: float,
y1: float,
x2: float,
y2: float,
x3: float,
y3: float,
x4: float,
y4: float,
find_outside_segment: bool = False,
) -> Tuple[Optional[float], Optional[float]]:
# Check if none of the lines are of length 0
if (x1 == x2 and y1 == y2) or (x3 == x4 and y3 == y4):
return None, None
denominator = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)
# Lines are parallel
if denominator == 0:
return None, None
ua = ((x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)) / denominator
ub = ((x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3)) / denominator
# Is the intersection along the segments
if (ua < 0 or ua > 1 or ub < 0 or ub > 1) and not find_outside_segment:
return None, None
# Return an object with the x and y coordinates of the intersection
x = x1 + ua * (x2 - x1)
y = y1 + ua * (y2 - y1)
return x, y
def calculate_intersection_points_with_horizon(point: TrajectoryPoint,
right_point: TrajectoryPoint,
top_horizon: Dict[str, Any]
):
vs, tvd = calculate_lines_intersection(
point['vs'],
point['tvd'],
right_point['vs'],
right_point['tvd'],
top_horizon['start_vs'],
top_horizon['start_tvd'],
top_horizon['end_vs'],
top_horizon['end_tvd'],
)
return {
'vs': vs,
'tvd': tvd
}
def is_in_segment(segment: Dict[str, Any], left_point: TrajectoryPoint, right_point: TrajectoryPoint) -> bool:
return (
segment['md'] <= left_point['md'] <= segment['end_md']
and segment['md'] <= right_point['md'] <= segment['end_md']
)
def get_length_in_piece(
left_point: TrajectoryPoint,
right_point: TrajectoryPoint,
segment: Dict[str, Any],
top_horizon_uuid: str,
base_horizon_uuid: str
):
top_horizon = segment['horizon_shifts'][top_horizon_uuid]
base_horizon = segment['horizon_shifts'][base_horizon_uuid]
left_point_inside = is_point_inside_horizons_shifts(
point=left_point,
top_horizon_shift=top_horizon,
base_horizon_shift=base_horizon
)
right_point_inside = is_point_inside_horizons_shifts(
point=right_point,
top_horizon_shift=top_horizon,
base_horizon_shift=base_horizon
)
if not left_point_inside and not right_point_inside:
return 0
if left_point_inside and right_point_inside:
return right_point['md'] - left_point['md']
# Calculate intersected part of piece
top_point = calculate_intersection_points_with_horizon(left_point, right_point, top_horizon)
base_point = calculate_intersection_points_with_horizon(left_point, right_point, base_horizon)
if left_point_inside and not right_point_inside:
start_point = (
top_point if top_point['vs'] is not None
else base_point
)
end_point = {
'vs': left_point['vs'],
'tvd': left_point['tvd']
}
elif not left_point_inside and right_point_inside:
start_point = (
top_point if top_point['vs'] is not None
else base_point
)
end_point = {
'vs': right_point['vs'],
'tvd': right_point['tvd']
}
else:
start_point = top_point
end_point = base_point
return calc_hypotenuse_length(end_point['vs'] - start_point['vs'], end_point['tvd'] - start_point['tvd'])
def calc_zone_statistics(well: Well,
calculated_trajectory: Any,
top_horizon_name: str,
base_horizon_name: str,
landing_point_topset: str,
landing_point_top: str,
measure_units: EMeasureUnits
) -> Dict[str, float]:
interpretation, top_horizon, base_horizon, landing_md = get_horizons_and_landing_md(
well=well,
calculated_trajectory=calculated_trajectory,
top_horizon_name=top_horizon_name,
base_horizon_name=base_horizon_name,
landing_point_topset_name=landing_point_topset,
landing_point_top_name=landing_point_top
)
well_data = well.to_dict(get_converted=False)
assembled_segments_data = interpretation.get_assembled_segments_data()
segments = get_segments(
well=well_data,
assembled_segments=assembled_segments_data['segments'],
calculated_trajectory=calculated_trajectory,
measure_units=measure_units
)
tvds_segments = calculate_segment_vs_tvds(segments, assembled_segments_data)
zone_start_md = max(landing_md, tvds_segments[0]['md'])
interpolated_trajectory = interpolate_trajectory(
well_data=well_data,
trajectory=calculated_trajectory,
measure_units=measure_units
)
point_mds = [segment['md'] for segment in tvds_segments]
point_mds.append(zone_start_md)
insert_points_in_trajectory(
well_data=well_data,
trajectory=interpolated_trajectory,
point_mds=point_mds,
measure_units=measure_units
)
restricted_trajectory = restrict_trajectory(trajectory=calculated_trajectory, start_md=zone_start_md - DELTA)
total_length = restricted_trajectory[-1]['md'] - restricted_trajectory[0]['md']
in_zone_length = 0
for point_index, point in enumerate(restricted_trajectory):
if point_index < len(restricted_trajectory) - 1:
segment_index = next(
(index for index, segment in enumerate(tvds_segments) if is_in_segment(
segment,
point,
restricted_trajectory[point_index + 1])),
None
)
if segment_index is None:
continue
in_zone_length += get_length_in_piece(
left_point=point,
right_point=restricted_trajectory[point_index + 1],
segment=tvds_segments[segment_index],
top_horizon_uuid=top_horizon.uuid,
base_horizon_uuid=base_horizon.uuid
)
return {
'in_zone': in_zone_length,
'in_zone_percent': in_zone_length / total_length * 100
}
def bulk_calc_zone_statistics(project_name: str,
well_names: str,
top_horizon: str,
base_horizon: str,
landing_point_topset: str,
landing_point_top: str
) -> Dict[str, Dict[str, Any]]:
solo_client = SoloClient(
client_id=environ.get('ROGII_SOLO_CLIENT_ID'),
client_secret=environ.get('ROGII_SOLO_CLIENT_SECRET'),
papi_domain_name=environ.get('ROGII_SOLO_PAPI_DOMAIN_NAME')
)
solo_client.set_project_by_name(project_name)
statistics = {}
for well_name in well_names:
well = solo_client.project.wells.find_by_name(well_name)
if well is None:
print(f'Well "{well_name}" not found.')
continue
well_data = well.to_dict(get_converted=False)
calculated_trajectory = calculate_trajectory(
raw_trajectory=well.trajectory.to_dict(get_converted=False),
well=well_data,
measure_units=solo_client.project.measure_unit
)
try:
statistics[well.name] = calc_zone_statistics(
well=well,
calculated_trajectory=calculated_trajectory,
top_horizon_name=top_horizon,
base_horizon_name=base_horizon,
landing_point_topset=landing_point_topset,
landing_point_top=landing_point_top,
measure_units=solo_client.project.measure_unit
)
except Exception as exception:
print(f'Warning! Statistics for well "{well.name}" is not calculated.', exception)
return statistics
if __name__ == '__main__':
# Put horizon names for top and base if it's not starred
script_settings = {
'project_name': '',
'well_names': [],
'top_horizon': '',
'base_horizon': '',
'landing_point_topset': '',
'landing_point_top': ''
}
if (
(not script_settings['landing_point_topset'] and script_settings['landing_point_top']) or
(script_settings['landing_point_topset'] and not script_settings['landing_point_top'])
):
raise Exception('Set correct data for both topset and top, please.')
zone_statistics = bulk_calc_zone_statistics(**script_settings)
for well_name in zone_statistics:
print(f'Well "{well_name}" is {zone_statistics[well_name]}') | /rogii-solo-0.4.0.tar.gz/rogii-solo-0.4.0/examples/bulk_zone_statistics_calculation.py | 0.845847 | 0.268209 | bulk_zone_statistics_calculation.py | pypi |
import math
from bisect import bisect_left
from os import environ
from typing import Any, Dict, List, Tuple
import numpy as np
import plotly
import plotly.graph_objs as go
from scipy import stats
import rogii_solo.well
from rogii_solo import SoloClient
from rogii_solo.calculations.base import get_nearest_values
from rogii_solo.calculations.interpretation import interpolate_trajectory_point
from rogii_solo.calculations.trajectory import calculate_trajectory
from rogii_solo.interpretation import Interpretation
def get_interpolated_trajectory(solo_client: SoloClient, well: 'rogii_solo.well.Well') -> List[Dict[str, float]]:
well_data = well.to_dict()
calculated_trajectory = calculate_trajectory(
raw_trajectory=well.trajectory.to_dict(),
well=well_data,
measure_units=solo_client.project.measure_unit
)
# get md range for interpolation
md_range = range(int(calculated_trajectory[0]['md']), int(calculated_trajectory[-1]['md']) + 1)
mds, mds_map = [], {}
interpolated_trajectory = []
for i, point in enumerate(calculated_trajectory):
mds.append(point['md'])
mds_map[point['md']] = i
for md in md_range:
nearest_mds = get_nearest_values(value=md, input_list=mds)
if len(nearest_mds) < 2:
interpolated_point = calculated_trajectory[0]
else:
left_point_md, right_point_md = nearest_mds
left_point = calculated_trajectory[mds_map[left_point_md]]
right_point = calculated_trajectory[mds_map[right_point_md]]
interpolated_point = interpolate_trajectory_point(
left_point=left_point,
right_point=right_point,
md=md,
well=well_data,
measure_units=solo_client.project.measure_unit,
)
interpolated_trajectory.append(interpolated_point)
return interpolated_trajectory
def get_horizons(interpretation: Interpretation, md_step: int) -> List[Dict[str, Any]]:
horizons = interpretation.get_tvt_data(md_step)
if not horizons:
raise Exception('Horizons\' data not found.')
return horizons
def get_horizon_tvts(interpretation: Interpretation) -> List[Dict[str, Any]]:
horizon_tvts = []
horizons_data = interpretation.assembled_segments['horizons'].values()
for horizon in horizons_data:
horizon_tvts.append(
{
'name': horizon['name'],
'uuid': horizon['uuid'],
'tvt': horizon['tvd'],
}
)
return horizon_tvts
def get_horizons_data_by_md(horizons: List[Dict[str, Any]], md: float) -> Dict[str, Any]:
mds = [horizon_data['md'] for horizon_data in horizons]
idx = bisect_left(mds, md)
idx = idx if idx < len(mds) else -1
return horizons[idx]
def add_tvt_to_trajectory(trajectory: List[Dict[str, float]],
horizons: List[Dict[str, Any]]
) -> List[Dict[str, float]]:
start_md = horizons[0]['md']
end_md = horizons[-1]['md']
for point in trajectory:
md = point['md']
if md < start_md or md > end_md:
point['tvt'] = math.nan
continue
horizons_data = get_horizons_data_by_md(horizons=horizons, md=md)
point['tvt'] = horizons_data['tvt']
return trajectory
def get_trajectory_tvt_by_md(trajectory: List[Dict[str, float]], md: float) -> float:
mds = [point['md'] for point in trajectory]
idx = bisect_left(mds, md)
return trajectory[idx]['tvt'] if idx < len(mds) else trajectory[-1]['tvt']
def filter_log(log: List[Dict[str, Any]], filter_window: int, idx: int) -> float:
filtered_value = log[idx]['data']
for j in range(-1 * filter_window, filter_window):
filtered_value = filtered_value + log[idx + j]['data']
return filtered_value / (filter_window * 2 + 1)
def get_heatmap_data(trajectory: List[Dict[str, float]],
filter_window: int,
x_log: List[Dict[str, Any]],
tvt_min: int,
tvt_max: int,
bins: int
) -> Tuple[Any, Any, Any, float, float]:
x, y = [], []
for i in range(filter_window, len(x_log) - 1 - filter_window):
md = x_log[i]['md']
tvt = get_trajectory_tvt_by_md(trajectory=trajectory, md=md)
value = filter_log(log=x_log, filter_window=filter_window, idx=i)
if md < trajectory[-1]['md'] and tvt_min < tvt < tvt_max and not math.isnan(value):
x.append(value)
y.append(tvt)
if not x or not y:
raise Exception(
'Warning! Data arrays are empty. '
'Try to extend the TVT range (tvt_min, tvt_max) or check logs for values and MD ranges.'
)
histogram2d, xedges2, yedges2, _ = stats.binned_statistic_2d(
x=x,
y=y,
values=y,
statistic='count',
bins=bins
)
histogram2d = histogram2d.T
for i in range(bins):
max_val = max(histogram2d[i])
if max_val > 0:
for j in range(bins):
histogram2d[i][j] = histogram2d[i][j] / max_val
return histogram2d, xedges2, yedges2, x[-1], y[-1]
def get_horizon_scatters(xedges2: Any,
yedges2: Any,
horizons: List[Dict[str, Any]],
zero_horizon_uuid: str
) -> List[go.Scatter]:
tvt_margin = 0.1
y_min, y_max = np.nanmin(yedges2), np.nanmax(yedges2)
x_min, x_max = np.nanmin(xedges2), np.nanmax(xedges2)
data = []
zero_tvt = 0
for horizon in horizons:
if horizon['uuid'] == zero_horizon_uuid:
zero_tvt = horizon['tvt']
break
for horizon in horizons:
tvt = horizon['tvt'] - zero_tvt
if y_min - tvt_margin <= tvt <= y_max + tvt_margin:
data.append(
go.Scatter(
x=[x_min, x_max],
y=[tvt, tvt],
name=horizon['name'],
line={'dash': 'dot'},
mode='lines+text',
text=['', horizon['name']],
textposition='top left',
showlegend=False,
textfont={'size': 14, 'color': 'rgb(0, 175, 0)'}
)
)
return data
def get_last_rop_point_scatter(last_x: float, last_y: float) -> go.Scatter:
return go.Scatter(
x=[last_x, ],
y=[last_y, ],
mode='markers',
marker={
'color': 'White',
'size': 20,
'line': {'width': 2, 'color': 'Red'},
},
showlegend=False
)
def refine_log_points(log_points: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
return [point for point in log_points if point['data'] is not None]
def build_tvt_rop_heatmap(script_settings: Dict[str, Any]):
project_name, well_name = script_settings['project_name'], script_settings['well_name']
x_log_name, filter_window = script_settings['x_log'], script_settings['filter_window']
tvt_min, tvt_max = script_settings['tvt_min'], script_settings['tvt_max']
bins = script_settings['bins']
solo_client = SoloClient(
client_id=environ.get('ROGII_SOLO_CLIENT_ID'),
client_secret=environ.get('ROGII_SOLO_CLIENT_SECRET'),
papi_domain_name=environ.get('ROGII_SOLO_PAPI_DOMAIN_NAME')
)
solo_client.set_project_by_name(project_name)
well = solo_client.project.wells.find_by_name(well_name)
if well is None:
raise Exception(f'Well "{well_name}" not found.')
interpretation = well.starred_interpretation
if not interpretation:
raise Exception('Starred interpretation not found.')
interpretation_data = interpretation.to_dict()
# get trajectory and change its representation for convenience of calculations
trajectory = get_interpolated_trajectory(solo_client=solo_client, well=well)
# get horizons data with provided step by md
horizons = get_horizons(interpretation=interpretation, md_step=1)
# add tvts from horizon data
trajectory = add_tvt_to_trajectory(trajectory=trajectory, horizons=horizons)
x_log = well.logs.find_by_name(x_log_name)
if not x_log:
raise Exception(f'Log "{x_log_name}" not found.')
# start plotting
data = []
histogram2d, xedges2, yedges2, last_x, last_y = get_heatmap_data(
trajectory=trajectory,
filter_window=filter_window,
x_log=refine_log_points(x_log.to_dict()['points']),
tvt_min=tvt_min,
tvt_max=tvt_max,
bins=bins
)
data.append(go.Heatmap(x=xedges2, y=yedges2, z=histogram2d, showscale=False))
horizon_tvts = get_horizon_tvts(interpretation)
horizon_scatters = get_horizon_scatters(
xedges2=xedges2,
yedges2=yedges2,
horizons=horizon_tvts,
zero_horizon_uuid=interpretation_data['meta']['properties']['zero_horizon_uuid']
)
data.extend(horizon_scatters)
last_rop_point_scatter = get_last_rop_point_scatter(last_x, last_y)
data.append(last_rop_point_scatter)
layout = go.Layout(
font={'size': 16},
yaxis={
'zeroline': False,
'title': 'TVT',
'range': [yedges2[-1], yedges2[0]],
# set yaxes tick value format to xxxxx, not to xx.xxk
'tickformatstops': [{'dtickrange': [-1000000, 1000000], 'value': ':d'}, ],
'showticklabels': True,
'tickcolor': 'rgb(127, 127, 127)',
'ticks': 'outside'
},
xaxis={
'zeroline': False,
'title': x_log_name,
'dtick': 25,
'showticklabels': True,
'tickcolor': 'rgb(127, 127, 127)',
'ticks': 'outside',
'range': [xedges2[0], xedges2[-1]]
}
)
figure = go.Figure(data=data, layout=layout)
config = {'showLink': True, 'linkText': "Edit Plot", 'scrollZoom': True}
plotly.offline.plot(figure, filename='./tmpplot.html', config=config)
if __name__ == '__main__':
script_settings = {
'project_name': '',
'well_name': '',
'x_log': '',
'tvt_min': -1,
'tvt_max': 1,
'filter_window': 5,
'bins': 60
}
build_tvt_rop_heatmap(script_settings) | /rogii-solo-0.4.0.tar.gz/rogii-solo-0.4.0/examples/tvt_rop_heatmap/tvt_rop_heatmap.py | 0.72662 | 0.332961 | tvt_rop_heatmap.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rogin_gaussian_binomial-0.1.tar.gz/rogin_gaussian_binomial-0.1/rogin_gaussian_binomial/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
from gym import spaces
import json
from typing import Dict, Iterable, List, Tuple, Union
from rogue_gym_python._rogue_gym import ParallelGameState, PlayerState
from .rogue_env import ImageSetting, RogueEnv
class ParallelRogueEnv:
"""Special executor to exec rogue-gym parallely.
"""
metadata = RogueEnv.metadata
SYMBOLS = RogueEnv.SYMBOLS
ACTION_MEANINGS = RogueEnv.ACTION_MEANINGS
ACTIONS = RogueEnv.ACTIONS
ACTION_LEN = len(ACTIONS)
def __init__(
self,
config_dicts: Iterable[dict],
max_steps: int = 1000,
image_setting: ImageSetting = ImageSetting(),
) -> None:
self.game = ParallelGameState(max_steps, [json.dumps(d) for d in config_dicts])
self.result = None
self.max_steps = max_steps
self.steps = 0
self.action_space = spaces.discrete.Discrete(self.ACTION_LEN)
self.observation_space = \
image_setting.detect_space(*self.game.screen_size(), self.game.symbols())
self.image_setting = image_setting
self.states = self.game.states()
self.num_workers = len(config_dicts)
def get_key_to_action(self) -> Dict[str, str]:
return self.ACION_MEANINGS
def get_configs(self) -> dict:
config = self.game.dump_config()
return json.loads(config)
def step(
self,
action: Union[Iterable[int], str]
) -> Tuple[List[PlayerState], List[float], List[bool], List[dict]]:
"""
Do action.
@param actions(string):
key board inputs to rogue(e.g. "hjk" or "hh>")
"""
if isinstance(action, str) and len(action) == self.num_workers:
action = [ord(c) for c in action]
else:
try:
action = [ord(self.ACTIONS[x]) for x in action]
except Exception:
raise ValueError("Invalid action: {}".format(action))
states = self.game.step(action)
rewards = [max(0, after.gold - before.gold) for before, after in zip(self.states, states)]
done = [s.is_terminal for s in states]
self.states = states
return self.states, rewards, done, [{}] * self.num_workers
def reset(self) -> List[PlayerState]:
"""reset game state"""
self.states = self.game.reset()
return self.states
def close(self) -> None:
self.game.close()
def seed(self, seeds: List[int]) -> None:
self.game.seed(seeds) | /rogue_gym-0.0.2-cp37-cp37m-macosx_10_13_x86_64.whl/rogue_gym/envs/parallel.py | 0.76782 | 0.291214 | parallel.py | pypi |
from enum import Enum, Flag
import gym
from gym import spaces
import json
import numpy as np
from numpy import ndarray
from typing import Dict, List, NamedTuple, Optional, Tuple, Union
from rogue_gym_python import _rogue_gym as rogue_gym_inner
from rogue_gym_python._rogue_gym import GameState, PlayerState
class StatusFlag(Flag):
EMPTY = 0b000_000_000
DUNGEON_LEVEL = 0b000_000_001
HP_CURRENT = 0b000_000_010
HP_MAX = 0b000_000_100
STR_CURRENT = 0b000_001_000
STR_MAX = 0b000_010_000
DEFENSE = 0b000_100_000
PLAYER_LEVEL = 0b001_000_000
EXP = 0b010_000_000
HUNGER = 0b100_000_000
FULL = 0b111_111_111
def count_one(self) -> int:
s, val = 0, self.value
for _ in range(9):
s += val & 1
val >>= 1
return s
def symbol_image(self, state: PlayerState) -> ndarray:
self.__check_input(state)
return state.symbol_image(flag=self.value)
def symbol_image_with_hist(self, state: PlayerState) -> ndarray:
self.__check_input(state)
return state.symbol_image_with_hist(flag=self.value)
def gray_image(self, state: PlayerState) -> ndarray:
self.__check_input(state)
return state.gray_image(flag=self.value)
def gray_image_with_hist(self, state: PlayerState) -> ndarray:
self.__check_input(state)
return state.gray_image_with_hist(flag=self.value)
def status_vec(self, state: PlayerState) -> List[int]:
self.__check_input(state)
return state.status_vec(flag=self.value)
def __check_input(self, state: PlayerState) -> None:
if not isinstance(state, PlayerState):
raise TypeError("Needs PlayerState, but {} was given".format(type(state)))
class DungeonType(Enum):
GRAY = 1
SYMBOL = 2
class ImageSetting(NamedTuple):
dungeon: DungeonType = DungeonType.SYMBOL
status: StatusFlag = StatusFlag.FULL
includes_hist: bool = False
def dim(self, channels: int) -> int:
s = channels if self.dungeon == DungeonType.SYMBOL else 1
s += self.status.count_one()
s += 1 if self.includes_hist else 0
return s
def detect_space(self, h: int, w: int, symbols: int) -> gym.Space:
return spaces.box.Box(
low=0,
high=1,
shape=(self.dim(symbols), h, w),
dtype=np.float32,
)
def expand(self, state: PlayerState) -> ndarray:
if not isinstance(state, PlayerState):
raise TypeError("Needs PlayerState, but {} was given".format(type(state)))
if self.dungeon == DungeonType.SYMBOL:
if self.includes_hist:
return self.status.symbol_image_with_hist(state)
else:
return self.status.symbol_image(state)
else:
if self.includes_hist:
return self.status.gray_image_with_hist(state)
else:
return self.status.gray_image(state)
class RogueEnv(gym.Env):
metadata = {'render.modes': ['human', 'ascii']}
# defined in core/src/tile.rs
SYMBOLS = [
' ', '@', '#', '.', '-',
'%', '+', '^', '!', '?',
']', ')', '/', '*', ':',
'=', ',', 'A', 'B', 'C',
'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R',
'S', 'T', 'U', 'V', 'W',
'X', 'Y', 'Z',
]
# Same as data/keymaps/ai.json
ACTION_MEANINGS = {
'.': 'NO_OPERATION',
'h': 'MOVE_LEFT',
'j': 'MOVE_UP',
'k': 'MOVE_DOWN',
'l': 'MOVE_RIGHT',
'n': 'MOVE_RIGHTDOWN',
'b': 'MOVE_LEFTDOWN',
'u': 'MOVE_RIGHTUP',
'y': 'MOVE_LEFTUP',
'>': 'DOWNSTAIR',
's': 'SEARCH',
}
ACTIONS = [
'.', 'h', 'j', 'k', 'l', 'n',
'b', 'u', 'y', '>', 's',
]
ACTION_LEN = len(ACTIONS)
def __init__(
self,
config_path: Optional[str] = None,
config_dict: dict = {},
max_steps: int = 1000,
image_setting: ImageSetting = ImageSetting(),
**kwargs,
) -> None:
super().__init__()
if config_path:
with open(config_path, 'r') as f:
config = f.read()
else:
config_dict.update(kwargs)
config = json.dumps(config_dict)
self.game = GameState(max_steps, config)
self.result = None
self.action_space = spaces.discrete.Discrete(self.ACTION_LEN)
self.observation_space = \
image_setting.detect_space(*self.game.screen_size(), self.game.symbols())
self.image_setting = image_setting
self.__cache()
def __cache(self) -> None:
self.result = self.game.prev()
def screen_size(self) -> Tuple[int, int]:
"""
returns (height, width)
"""
return self.game.screen_size()
def get_key_to_action(self) -> Dict[str, str]:
return self.ACION_MEANINGS
def get_dungeon(self) -> List[str]:
return self.result.dungeon
def get_config(self) -> dict:
config = self.game.dump_config()
return json.loads(config)
def save_config(self, fname: str) -> None:
with open(fname, 'w') as f:
f.write(self.game.dump_config())
def save_actions(self, fname: str) -> None:
with open(fname, 'w') as f:
f.write(self.game.dump_history())
def replay(self, interval_ms: int = 100) -> None:
if not hasattr(rogue_gym_inner, 'replay'):
raise RuntimeError('Currently replay is only supported on UNIX')
rogue_gym_inner.replay(self.game, interval_ms)
print()
def play_cli(self) -> None:
if not hasattr(rogue_gym_inner, 'play_cli'):
raise RuntimeError('CLI playing is only supported on UNIX')
rogue_gym_inner.play_cli(self.game)
print()
def state_to_image(
self,
state: PlayerState,
setting: Optional[ImageSetting] = None
) -> ndarray:
"""Convert PlayerState to 3d array, according to setting or self.expand_setting
"""
if setting is None:
setting = self.image_setting
return setting.expand(state)
def __step_str(self, actions: str) -> int:
for act in actions:
self.game.react(ord(act))
return len(actions)
def step(self, action: Union[int, str]) -> Tuple[PlayerState, float, bool, dict]:
"""
Do action.
@param actions(string):
key board inputs to rogue(e.g. "hjk" or "hh>")
"""
gold_before = self.result.gold
if isinstance(action, str):
self.__step_str(action)
else:
try:
s = self.ACTIONS[action]
self.__step_str(s)
except Exception as e:
raise ValueError("Invalid action: {} causes {}".format(action, e))
self.__cache()
reward = self.result.gold - gold_before
return self.result, reward, self.result.is_terminal, {}
def seed(self, seed: int) -> None:
"""
Set seed.
This seed is not used till the game is reseted.
@param seed(int): seed value for RNG
"""
self.game.set_seed(seed)
def render(self, mode: str = 'human', close: bool = False) -> None:
"""
STUB
"""
print(self.result)
def reset(self) -> PlayerState:
"""reset game state"""
self.game.reset()
self.__cache()
return self.result
def __repr__(self):
return self.result.__repr__()
@property
def unwrapped(self):
return self | /rogue_gym-0.0.2-cp37-cp37m-macosx_10_13_x86_64.whl/rogue_gym/envs/rogue_env.py | 0.851984 | 0.336985 | rogue_env.py | pypi |
from .rogue_env import PlayerState, RogueEnv
from .parallel import ParallelRogueEnv
from gym import Env, Wrapper
from typing import Iterable, List, Tuple, Union
def check_rogue_env(env: Env) -> None:
if not isinstance(env.unwrapped, RogueEnv):
raise ValueError('env have to be a wrapper of RoguEnv')
class StairRewardEnv(Wrapper):
def __init__(self, env: Env, stair_reward: float = 50.0) -> None:
check_rogue_env(env)
self.stair_reward = stair_reward
self.current_level = 1
super().__init__(env)
def step(self, action: Union[int, str]) -> Tuple[PlayerState, float, bool, None]:
state, reward, end, info = self.env.step(action)
current = self.unwrapped.result.status['dungeon_level']
if self.current_level < current:
self.current_level = current
reward += self.stair_reward
return state, reward, end, info
def reset(self) -> PlayerState:
self.current_level = 1
return super().reset()
def __repr__(self):
return super().__repr__()
class FirstFloorEnv(StairRewardEnv):
def step(self, action: Union[int, str]) -> Tuple[PlayerState, float, bool, None]:
state, reward, end, info = super().step(action)
if self.current_level == 2:
end = True
return state, reward, end, info
def __repr__(self):
return super().__repr__()
class StairRewardParallel(ParallelRogueEnv):
def __init__(self, *args, **kwargs) -> None:
self.stair_reward = 50.0 # default reward
if 'stair_reward' in kwargs:
self.stair_reward = kwargs['stair_reward']
del kwargs['stair_reward']
super().__init__(*args, **kwargs)
self.current_levels = [1] * self.num_workers
def step(
self,
action: Union[Iterable[int], str]
) -> Tuple[List[PlayerState], List[float], List[bool], List[dict]]:
state, reward, end, info = super().step(action)
for i in range(self.num_workers):
level = state[i].status['dungeon_level']
if self.current_levels[i] < level:
reward[i] += self.stair_reward
self.current_levels[i] = level
return state, reward, end, info | /rogue_gym-0.0.2-cp37-cp37m-macosx_10_13_x86_64.whl/rogue_gym/envs/wrappers.py | 0.817938 | 0.202917 | wrappers.py | pypi |
from copy import deepcopy
from dataclasses import dataclass
from typing import Dict, List, Optional
import numpy as np
import ragged_buffer
from entity_gym.env import ObsSpace
from ragged_buffer import RaggedBufferF32
@dataclass
class TranslationConfig:
"""Settings for translating position features.
Attributes:
reference_entity: Entity type of the entity which will be placed at the origin.
position_features: Names of position features used for translation.
rotation_vec_features: Names of that gives the direction of the reference entity in radians. All entities are rotated by this value.
rotation_angle_feature: Name of feature that gives the direction of the reference entity in radians. All entities are rotated by this value.
add_dist_feature: Adds a feature that is the distance to the reference entity.
"""
reference_entity: str
position_features: List[str]
rotation_vec_features: Optional[List[str]] = None
rotation_angle_feature: Optional[str] = None
add_dist_feature: bool = False
def __post_init__(self) -> None:
assert (
self.rotation_vec_features is None or self.rotation_angle_feature is None
), "Only one of rotation_vec_features and rotation_angle_feature can be specified"
class TranslatePositions(TranslationConfig):
def __init__(
self,
cfg: TranslationConfig,
obs_space: ObsSpace,
):
super().__init__(**cfg.__dict__)
self.feature_indices = {
entity_name: [
entity.features.index(feature_name)
for feature_name in cfg.position_features
]
for entity_name, entity in obs_space.entities.items()
if entity_name != cfg.reference_entity
and all(
[
feature_name in entity.features
for feature_name in cfg.position_features
]
)
}
self.reference_indices = [
obs_space.entities[cfg.reference_entity].features.index(feature_name)
for feature_name in cfg.position_features
]
self.orientation_vec_indices = (
[
obs_space.entities[cfg.reference_entity].features.index(feature_name)
for feature_name in cfg.rotation_vec_features
]
if cfg.rotation_vec_features is not None
else None
)
self.orientation_angle_index = (
obs_space.entities[cfg.reference_entity].features.index(
cfg.rotation_angle_feature
)
if cfg.rotation_angle_feature is not None
else None
)
self.reference_entity = cfg.reference_entity
self.add_dist_feature = cfg.add_dist_feature
def apply(self, entities: Dict[str, RaggedBufferF32]) -> None:
if self.reference_entity not in entities:
return
reference_entity = entities[self.reference_entity]
origin = reference_entity[:, :, self.reference_indices]
if self.orientation_vec_indices is not None:
orientation: Optional[RaggedBufferF32] = reference_entity[
:, :, self.orientation_vec_indices
]
elif self.orientation_angle_index is not None:
angle = reference_entity[:, :, self.orientation_angle_index].as_array()
orientation = RaggedBufferF32.from_array(
np.hstack([np.cos(angle), np.sin(angle)]).reshape(-1, 1, 2)
)
# TODO: ragged_buffer.translate_rotate assumes that all input arguments are views, so apply identity view
orientation = orientation[:, :, :]
else:
orientation = None
for entity_name, indices in self.feature_indices.items():
if entity_name in entities:
if orientation is not None:
ragged_buffer.translate_rotate(
entities[entity_name][:, :, indices],
origin,
orientation,
)
else:
feats = entities[entity_name][:, :, indices]
feats -= origin
if self.add_dist_feature:
# TODO: efficiency
ea = entities[entity_name].as_array()
np.linalg.norm(ea[:, indices], axis=1).reshape(-1, 1)
entities[entity_name] = RaggedBufferF32.from_flattened(
np.concatenate(
[ea, np.linalg.norm(ea[:, indices], axis=1).reshape(-1, 1)],
axis=1,
),
entities[entity_name].size1(),
)
def transform_obs_space(self, obs_space: ObsSpace) -> ObsSpace:
if self.add_dist_feature:
obs_space = deepcopy(obs_space)
for entity_name in self.feature_indices.keys():
features = list(obs_space.entities[entity_name].features)
features.append("TranslatePositions.distance")
obs_space.entities[entity_name].features = features
return obs_space | /rogue-net-0.1.9.tar.gz/rogue-net-0.1.9/rogue_net/translate_positions.py | 0.785473 | 0.562958 | translate_positions.py | pypi |
import logging
import math
from dataclasses import dataclass
from typing import Literal, Mapping, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch_scatter
from entity_gym.env.environment import ObsSpace
from ragged_buffer import RaggedBufferI64
from torch.nn import functional as F
from rogue_net.relpos_encoding import RelposEncoding, RelposEncodingConfig
logger = logging.getLogger(__name__)
@dataclass
class TransformerConfig:
"""Transformer network hyperparameters.
:param embd_pdrop: Dropout probability for embedding layer.
:param resid_pdrop: Dropout probability for residual branches.
:param attn_pdrop: Dropout probability for attention.
:param n_layer: Number of transformer layers.
:param n_head: Number of attention heads.
:param d_model: Dimension of embedding.
:param pooling: Replace attention layer with ``"mean"``, ``"max"``, or ``"meanmax"`` pooling.
:param relpos_encoding: Relative positional encoding settings.
"""
embd_pdrop: float = 0.0
resid_pdrop: float = 0.0
attn_pdrop: float = 0.0
n_layer: int = 2
n_head: int = 2
d_model: int = 32
pooling: Optional[Literal["mean", "max", "meanmax"]] = None
relpos_encoding: Optional[RelposEncodingConfig] = None
class Pool(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, config: TransformerConfig) -> None:
super().__init__()
assert config.pooling is not None
# projections
self.prepool = nn.Linear(config.d_model, config.d_model)
if config.pooling == "meanmax":
self.proj = nn.Linear(2 * config.d_model, config.d_model)
else:
self.proj = nn.Linear(config.d_model, config.d_model)
# regularization
self.resid_drop = nn.Dropout(config.resid_pdrop)
self.reduction_op = config.pooling
def forward(
self, x: torch.Tensor, batch_index: torch.Tensor, shape: RaggedBufferI64
) -> torch.Tensor:
x = self.prepool(x)
if "mean" in self.reduction_op:
xmean = torch_scatter.scatter(
src=x, dim=0, index=batch_index, reduce="mean"
)
xpool = xmean
if "max" in self.reduction_op:
xmax = torch_scatter.scatter(src=x, dim=0, index=batch_index, reduce="max")
xpool = xmax
if "meanmax" in self.reduction_op:
xpool = torch.cat([xmean, xmax], dim=1)
x = self.proj(xpool)
return self.resid_drop(x[batch_index]) # type: ignore
class RaggedAttention(nn.Module):
"""
A ragged multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(
self, config: TransformerConfig, relpos_encoding: Optional[RelposEncoding]
) -> None:
super().__init__()
assert config.d_model % config.n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(config.d_model, config.d_model)
self.query = nn.Linear(config.d_model, config.d_model)
self.value = nn.Linear(config.d_model, config.d_model)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.d_model, config.d_model)
self.n_head = config.n_head
self.relpos_encoding = relpos_encoding
def forward(
self,
x: torch.Tensor,
batch_index: torch.Tensor,
shape: RaggedBufferI64,
visible: Optional[torch.Tensor] = None,
) -> torch.Tensor:
# For more details on the implementation, see: https://github.com/entity-neural-network/incubator/pull/119
device = x.device
padpack = shape.padpack()
# TODO: only compute indices once
if padpack is None:
nbatch = shape.size0()
nseq = shape.size1(0) if shape.items() > 0 else 0
x = x.reshape(nbatch, nseq, x.size(-1))
if visible is not None:
attn_mask: Optional[torch.Tensor] = (
visible.reshape(nbatch, nseq, 1) > visible.reshape(nbatch, 1, nseq)
).unsqueeze(1)
else:
attn_mask = None
else:
(
padpack_index,
padpack_batch,
padpack_inverse_index,
) = padpack
tpadpack_index = torch.tensor(
padpack_index, dtype=torch.long, device=device
)
x = x[tpadpack_index]
tpadpack_batch = torch.tensor(padpack_batch, device=device)
attn_mask = (
tpadpack_batch.unsqueeze(2) != tpadpack_batch.unsqueeze(1)
).unsqueeze(1)
if visible is not None:
visible = visible[tpadpack_index]
attn_mask.logical_or_(
(visible.unsqueeze(2) > visible.unsqueeze(1)).unsqueeze(1)
)
B, T, C = x.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = (
self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
) # (B, nh, T, hs)
q = (
self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
) # (B, nh, T, hs)
v = (
self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
) # (B, nh, T, hs)
# full self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
# Relative positional encoding (keys)
if self.relpos_encoding is not None:
att += self.relpos_encoding.relattn_logits(q)
if attn_mask is not None:
att = att.masked_fill(attn_mask, -1e9)
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
# Relative positional encoding (values)
if self.relpos_encoding is not None:
y += self.relpos_encoding.relpos_values(att, x)
y = (
y.transpose(1, 2).contiguous().view(B, T, C)
) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
if padpack is None:
return y.reshape(batch_index.size(0), y.size(-1)) # type: ignore
else:
return y.reshape(y.size(0) * y.size(1), y.size(2))[torch.tensor(padpack_inverse_index, dtype=torch.long, device=device)] # type: ignore
class Block(nn.Module):
def __init__(
self, config: TransformerConfig, relpos_encoding: Optional[RelposEncoding]
) -> None:
super().__init__()
self.ln1 = nn.LayerNorm(config.d_model)
self.ln2 = nn.LayerNorm(config.d_model)
if config.pooling is not None:
self.attn: Union[Pool, RaggedAttention] = Pool(config)
else:
self.attn = RaggedAttention(config, relpos_encoding)
self.mlp = nn.Sequential(
nn.Linear(config.d_model, 4 * config.d_model),
nn.GELU(),
nn.Linear(4 * config.d_model, config.d_model),
nn.Dropout(config.resid_pdrop),
)
def forward(
self,
x: torch.Tensor,
batch_index: torch.Tensor,
shape: RaggedBufferI64,
visible: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x = x + self.attn(self.ln1(x), batch_index, shape, visible)
x = x + self.mlp(self.ln2(x))
return x
class Transformer(nn.Module):
def __init__(self, config: TransformerConfig, obs_space: ObsSpace) -> None:
super().__init__()
if config.relpos_encoding is not None:
self.relpos_encoding: Optional[RelposEncoding] = RelposEncoding(
config.relpos_encoding,
obs_space,
dmodel=config.d_model,
dhead=config.d_model // config.n_head,
)
else:
self.relpos_encoding = None
self.drop = nn.Dropout(config.embd_pdrop)
self.blocks = nn.Sequential(
*[Block(config, self.relpos_encoding) for _ in range(config.n_layer)]
)
self.apply(self._init_weights)
logger.info(
"number of parameters: %e", sum(p.numel() for p in self.parameters())
)
def _init_weights(self, module: nn.Module) -> None:
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(
self,
x: torch.Tensor,
batch_index: torch.Tensor,
shape: RaggedBufferI64,
input_feats: Mapping[str, torch.Tensor],
index_map: torch.Tensor,
entity_type: torch.Tensor,
visible: Optional[torch.Tensor],
) -> torch.Tensor:
x = self.drop(x)
if self.relpos_encoding is not None:
device = x.device
padpack = shape.padpack()
if padpack is None:
tpadpack_index = None
else:
tpadpack_index = torch.tensor(
padpack[0], dtype=torch.long, device=device
)
relkeysvals: Optional[
Tuple[torch.Tensor, torch.Tensor]
] = self.relpos_encoding.keys_values(
input_feats,
index_map,
tpadpack_index,
shape,
entity_type,
)
self.relpos_encoding.cached_rkvs = relkeysvals
else:
relkeysvals = None
for block in self.blocks:
x = block(x, batch_index, shape, visible)
return x | /rogue-net-0.1.9.tar.gz/rogue-net-0.1.9/rogue_net/transformer.py | 0.926802 | 0.411377 | transformer.py | pypi |
from typing import Dict, Mapping, Optional, Tuple
import numpy as np
import ragged_buffer
import torch
from entity_gym.env import ObsSpace
from entity_gym.simple_trace import Tracer
from ragged_buffer import RaggedBufferF32, RaggedBufferI64
from torch import nn
from rogue_net.input_norm import InputNorm
from rogue_net.translate_positions import TranslatePositions, TranslationConfig
class EntityEmbedding(nn.Module):
def __init__(
self,
obs_space: ObsSpace,
feature_transforms: Optional[TranslationConfig],
d_model: int,
) -> None:
super().__init__()
if feature_transforms is not None:
self.feature_transforms: Optional[TranslatePositions] = TranslatePositions(
feature_transforms, obs_space
)
obs_space = self.feature_transforms.transform_obs_space(obs_space)
else:
self.feature_transforms = None
embeddings: Dict[str, nn.Module] = {}
for name, entity in obs_space.entities.items():
if entity.features:
embeddings[name] = nn.Sequential(
InputNorm(len(entity.features)),
nn.Linear(len(entity.features), d_model),
nn.ReLU(),
nn.LayerNorm(d_model),
)
else:
embeddings[name] = FeaturelessEmbedding(d_model)
self.embeddings = nn.ModuleDict(embeddings)
def forward(
self,
entities: Mapping[str, RaggedBufferF32],
tracer: Tracer,
device: torch.device,
) -> Tuple[
torch.Tensor,
torch.Tensor,
RaggedBufferI64,
Mapping[str, torch.Tensor],
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
entity_embeds = []
index_offsets = {}
index_offset = 0
entity_type = []
if "__global__" in entities:
globals = entities["__global__"]
entities = {
label: ragged_buffer.cat([feats, globals], dim=2)
if label != "__global__"
else feats
for label, feats in entities.items()
}
if self.feature_transforms:
entities = {name: feats.clone() for name, feats in entities.items()}
self.feature_transforms.apply(entities)
tentities = {
entity: torch.tensor(features.as_array(), device=device)
for entity, features in entities.items()
}
for i, (entity, embedding) in enumerate(self.embeddings.items()):
# We may have environment states that do not contain every possible entity
if entity in entities:
batch = tentities[entity]
emb = embedding(batch)
entity_embeds.append(emb)
entity_type.append(
torch.full((emb.size(0), 1), float(i), device=device)
)
index_offsets[entity] = index_offset
index_offset += batch.size(0)
x = torch.cat(entity_embeds)
with tracer.span("ragged_metadata"):
real_entities = {name: entities[name] for name in self.embeddings.keys()}
lengths = sum(entity.size1() for entity in real_entities.values())
batch_index = np.concatenate(
[
entity.indices(0).as_array().flatten()
for entity in real_entities.values()
]
)
index_map = ragged_buffer.cat(
[
entity.flat_indices() + index_offsets[name]
for name, entity in real_entities.items()
if name in self.embeddings
],
dim=1,
)
tindex_map = torch.tensor(index_map.as_array().flatten(), device=device)
tbatch_index = torch.tensor(batch_index, device=device)
tlengths = torch.tensor(lengths, device=device)
x = x[tindex_map]
entity_types = torch.cat(entity_type)[tindex_map]
tbatch_index = tbatch_index[tindex_map]
return (
x,
tbatch_index,
index_map,
tentities,
tindex_map,
entity_types,
tlengths,
)
class FeaturelessEmbedding(nn.Module):
def __init__(self, d_model: int):
super().__init__()
self.d_model = d_model
self.embedding = nn.Parameter(torch.randn(1, d_model))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.embedding.repeat(x.size(0), 1) | /rogue-net-0.1.9.tar.gz/rogue-net-0.1.9/rogue_net/embedding.py | 0.941385 | 0.345712 | embedding.py | pypi |
import math
from typing import Optional, Tuple
import numpy as np
import numpy.typing as npt
import torch
from entity_gym.env import VecActionMask, VecSelectEntityActionMask
from ragged_buffer import RaggedBufferI64
from torch import nn
from torch.distributions.categorical import Categorical
from rogue_net.ragged_tensor import RaggedTensor
class PaddedSelectEntityActionHead(nn.Module):
"""
Action head for selecting entities.
See https://github.com/entity-neural-network/incubator/pull/109 for more details.
"""
def __init__(self, d_model: int, d_qk: int) -> None:
super().__init__()
self.d_model = d_model
self.d_qk = d_qk
self.query_proj = nn.Linear(d_model, d_qk)
self.key_proj = nn.Linear(d_model, d_qk)
def forward(
self,
x: RaggedTensor,
index_offsets: RaggedBufferI64,
mask: VecActionMask,
prev_actions: Optional[RaggedBufferI64],
) -> Tuple[
torch.Tensor, npt.NDArray[np.int64], torch.Tensor, torch.Tensor, torch.Tensor
]:
assert isinstance(
mask, VecSelectEntityActionMask
), f"Expected SelectEntityActionMaskBatch, got {type(mask)}"
device = x.data.device
actor_lengths = mask.actors.size1()
if len(mask.actors) == 0:
return (
torch.zeros((0), dtype=torch.int64, device=device),
actor_lengths,
torch.zeros((0), dtype=torch.float32, device=device),
torch.zeros((0), dtype=torch.float32, device=device),
torch.zeros((0), dtype=torch.float32, device=device),
)
actors = torch.tensor(
(mask.actors + index_offsets).as_array(), device=device
).squeeze(-1)
actor_embeds = x.data[actors]
queries = self.query_proj(actor_embeds).squeeze(1)
max_actors = actor_lengths.max()
# TODO: can omit rows that are only padding
padded_queries = torch.zeros(
len(actor_lengths) * max_actors, self.d_qk, device=device
)
qindices = torch.tensor(
(mask.actors.indices(1) + mask.actors.indices(0) * max_actors)
.as_array()
.flatten(),
device=device,
)
padded_queries[qindices] = queries
padded_queries = padded_queries.view(len(actor_lengths), max_actors, self.d_qk)
query_mask = torch.zeros(len(actor_lengths) * max_actors, device=device)
query_mask[qindices] = 1
query_mask = query_mask.view(len(actor_lengths), max_actors)
actee_lengths = mask.actees.size1()
actees = torch.tensor(
(mask.actees + index_offsets).as_array(), device=device
).squeeze(-1)
actee_embeds = x.data[actees]
keys = self.key_proj(actee_embeds).squeeze(1)
max_actees = actee_lengths.max()
padded_keys = torch.ones(
len(actee_lengths) * max_actees, self.d_qk, device=device
)
kindices = torch.tensor(
(mask.actees.indices(1) + mask.actees.indices(0) * max_actees)
.as_array()
.flatten(),
device=device,
)
padded_keys[kindices] = keys
padded_keys = padded_keys.view(len(actee_lengths), max_actees, self.d_qk)
key_mask = torch.zeros(len(actee_lengths) * max_actees, device=device)
key_mask[kindices] = 1
key_mask = key_mask.view(len(actee_lengths), max_actees)
logits = torch.bmm(padded_queries, padded_keys.transpose(1, 2)) * (
1.0 / math.sqrt(self.d_qk)
)
logits_mask = torch.bmm(query_mask.unsqueeze(2), key_mask.unsqueeze(1))
# Firstly mask off the conditions that are not available. This is the typical masked transformer approach
logits = logits.masked_fill(logits_mask == 0, -1e9)
dist = Categorical(logits=logits)
if prev_actions is None:
action = dist.sample()
else:
action = torch.tensor(prev_actions.as_array(), device=device).flatten()
padded_actions = torch.zeros(
(logits.size(0) * logits.size(1)), dtype=torch.long, device=device
)
padded_actions[qindices] = action
action = padded_actions.view(len(actor_lengths), max_actors)
logprob = dist.log_prob(action)
entropy = dist.entropy()
return (
action.flatten()[qindices],
actor_lengths,
logprob.flatten()[qindices],
entropy.flatten()[qindices],
dist.logits,
) | /rogue-net-0.1.9.tar.gz/rogue-net-0.1.9/rogue_net/select_entity_action_head.py | 0.884208 | 0.419053 | select_entity_action_head.py | pypi |
import dataclasses
from dataclasses import dataclass
from typing import Dict, List, Mapping, Optional, Tuple, Type, TypeVar
import numpy as np
import numpy.typing as npt
import torch
import torch.nn as nn
import torch_scatter
from entity_gym.env import ActionSpace, ObsSpace, VecActionMask
from entity_gym.env.environment import (
CategoricalActionSpace,
Entity,
GlobalCategoricalActionSpace,
SelectEntityActionSpace,
)
from entity_gym.simple_trace import Tracer
from ragged_buffer import (
RaggedBuffer,
RaggedBufferBool,
RaggedBufferF32,
RaggedBufferI64,
)
from rogue_net.categorical_action_head import CategoricalActionHead
from rogue_net.embedding import EntityEmbedding
from rogue_net.ragged_tensor import RaggedTensor
from rogue_net.select_entity_action_head import PaddedSelectEntityActionHead
from rogue_net.transformer import Transformer, TransformerConfig
from rogue_net.translate_positions import TranslationConfig
ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True)
def tensor_dict_to_ragged(
rb_cls: Type[RaggedBuffer[ScalarType]],
d: Dict[str, torch.Tensor],
lengths: Dict[str, np.ndarray],
) -> Dict[str, RaggedBuffer[ScalarType]]:
result = {}
for k, v in d.items():
flattened = v.cpu().numpy()
if flattened.ndim == 1:
flattened = flattened.reshape(-1, 1)
result[k] = rb_cls.from_flattened(flattened, lengths[k])
return result
@dataclass
class RogueNetConfig(TransformerConfig):
"""RogueNet network parameters.
:param embd_pdrop: Dropout probability for embedding layer.
:param resid_pdrop: Dropout probability for residual branches.
:param attn_pdrop: Dropout probability for attention.
:param n_layer: Number of transformer layers.
:param n_head: Number of attention heads.
:param d_model: Dimension of embedding.
:param pooling: Replace attention layer with ``"mean"``, ``"max"``, or ``"meanmax"`` pooling.
:param relpos_encoding: Relative positional encoding settings.
:param d_qk: Dimension of keys and queries in select-entity action heads.
:param translation: Settings for transforming all position features to be centered on one entity..
"""
d_qk: int = 16
translation: Optional[TranslationConfig] = None
class RogueNet(nn.Module):
def __init__(
self,
cfg: RogueNetConfig,
obs_space: ObsSpace,
action_space: Dict[str, ActionSpace],
regression_heads: Optional[Dict[str, int]] = None,
):
super().__init__()
global_features = obs_space.global_features
_obs_space = dataclasses.replace(obs_space, global_features=[])
if len(global_features) > 0:
_obs_space.entities = {
label: Entity(entity.features + global_features)
for label, entity in _obs_space.entities.items()
}
if any(
isinstance(a, GlobalCategoricalActionSpace) for a in action_space.values()
):
_obs_space.entities = {k: v for k, v in _obs_space.entities.items()}
_obs_space.entities["__global__"] = Entity(features=global_features)
self.d_model = cfg.d_model
self.action_space = action_space
self.obs_space = obs_space
self._obs_space = _obs_space
self.embedding = EntityEmbedding(_obs_space, cfg.translation, cfg.d_model)
self.backbone = Transformer(cfg, _obs_space)
self.action_heads = create_action_heads(action_space, cfg.d_model, cfg.d_qk)
self.auxiliary_heads = (
nn.ModuleDict(
{
name: regression_head(cfg.d_model, d_out)
for name, d_out in regression_heads.items()
}
)
if regression_heads is not None
else None
)
self.obs_filter: Dict[str, npt.NDArray[np.int64]] = {}
def device(self) -> torch.device:
return next(self.parameters()).device
def batch_and_embed(
self,
entities: Mapping[str, RaggedBufferF32],
visible: Mapping[str, RaggedBufferBool],
tracer: Tracer,
) -> RaggedTensor:
with tracer.span("embedding"):
# Ensure consistent dictionary ordering
entities = {
name: entities[name][:, :, self.obs_filter[name]] # type: ignore
if name in self.obs_filter
else entities[name]
for name in list(self.obs_space.entities.keys()) + ["__global__"]
if name in entities
}
(
x,
tbatch_index,
index_map,
tentities,
tindex_map,
entity_types,
tlengths,
) = self.embedding(entities, tracer, self.device())
with tracer.span("visibility_mask"):
if len(visible) > 0:
visibilities = []
for etype, feats in entities.items():
if etype in visible:
visibilities.append(
torch.tensor(
visible[etype].as_array(), device=self.device()
).view(-1)
)
else:
visibilities.append(
torch.ones(
feats.items(),
dtype=torch.bool,
device=self.device(),
)
)
tvisible: Optional[torch.Tensor] = torch.cat(visibilities, dim=0)[
tindex_map
]
else:
tvisible = None
with tracer.span("backbone"):
x = self.backbone(
x,
tbatch_index,
index_map,
tentities,
tindex_map,
entity_types,
tvisible,
)
return RaggedTensor(
x,
tbatch_index,
tlengths,
)
def get_auxiliary_head(
self,
entities: Mapping[str, RaggedBufferF32],
visible: Mapping[str, RaggedBufferBool],
head_name: str,
tracer: Tracer,
) -> torch.Tensor:
x = self.batch_and_embed(entities, visible, tracer)
pooled = torch_scatter.scatter(
src=x.data, dim=0, index=x.batch_index, reduce="mean"
)
return self.auxiliary_heads[head_name](pooled) # type: ignore
def get_action_and_auxiliary(
self,
entities: Mapping[str, RaggedBufferF32],
visible: Mapping[str, RaggedBufferBool],
action_masks: Mapping[str, VecActionMask],
tracer: Tracer,
prev_actions: Optional[Dict[str, RaggedBufferI64]] = None,
) -> Tuple[
Dict[str, RaggedBufferI64], # actions
Dict[str, torch.Tensor], # chosen action probabilities
Dict[str, torch.Tensor], # entropy
Dict[str, npt.NDArray[np.int64]], # number of actors in each frame
Dict[str, torch.Tensor], # auxiliary head values
Dict[str, torch.Tensor], # full logits
]:
actions = {}
probs: Dict[str, torch.Tensor] = {}
entropies: Dict[str, torch.Tensor] = {}
logits: Dict[str, torch.Tensor] = {}
with tracer.span("batch_and_embed"):
x = self.batch_and_embed(entities, visible, tracer)
tracer.start("action_heads")
index_offsets = RaggedBufferI64.from_array(
torch.cat([torch.tensor([0]).to(self.device()), x.lengths[:-1]])
.cumsum(0)
.cpu()
.numpy()
.reshape(-1, 1, 1)
)
actor_counts: Dict[str, np.ndarray] = {}
for action_name, action_head in self.action_heads.items():
action, count, logprob, entropy, logit = action_head(
x,
index_offsets,
action_masks[action_name],
prev_actions[action_name] if prev_actions is not None else None,
)
actor_counts[action_name] = count
actions[action_name] = action
probs[action_name] = logprob
entropies[action_name] = entropy
if logit is not None:
logits[action_name] = logit
tracer.end("action_heads")
tracer.start("auxiliary_heads")
if self.auxiliary_heads:
pooled = torch.zeros(
x.lengths.size(0), x.data.size(1), device=x.data.device
)
torch_scatter.scatter(
src=x.data,
dim=0,
index=x.batch_index,
reduce="mean",
out=pooled,
)
auxiliary_values = {
name: module(pooled) for name, module in self.auxiliary_heads.items()
}
else:
auxiliary_values = {}
tracer.end("auxiliary_heads")
return (
prev_actions
or tensor_dict_to_ragged(RaggedBufferI64, actions, actor_counts),
probs,
entropies,
actor_counts,
auxiliary_values,
logits,
)
def set_obs_filter(self, obs_space: ObsSpace) -> None:
self.obs_filter = {}
if obs_space == self.obs_space:
return
for key, entity in self.obs_space.entities.items():
if key not in obs_space.entities:
raise ValueError(f"Missing entity {key} in obs space")
expected_feats = entity.features
received_feats = obs_space.entities[key].features
if expected_feats != received_feats:
self.obs_filter[key] = construct_obs_filter(
expected_feats, received_feats
)
if self.obs_space.global_features != obs_space.global_features:
self.obs_filter["__global__"] = construct_obs_filter(
self.obs_space.global_features, obs_space.global_features
)
def construct_obs_filter(
expected_features: List[str], received_feats: List[str]
) -> npt.NDArray[np.int64]:
# Ensure that all expected features are present with efficient set intersection
missing_features = set(expected_features) - set(received_feats)
if missing_features:
raise ValueError(f"Missing features: {missing_features}. ")
# Construct indices to select features from the received features
indices = []
for feat in expected_features:
indices.append(received_feats.index(feat))
return np.array(indices, dtype=np.int64)
def regression_head(d_model: int, d_out: int) -> nn.Module:
projection = nn.Linear(d_model, d_out)
projection.weight.data.fill_(0.0)
projection.bias.data.fill_(0.0)
return projection
def create_action_heads(
action_space: Dict[str, ActionSpace], d_model: int, d_qk: int
) -> nn.ModuleDict:
action_heads: Dict[str, nn.Module] = {}
for name, space in action_space.items():
if isinstance(space, CategoricalActionSpace) or isinstance(
space, GlobalCategoricalActionSpace
):
action_heads[name] = CategoricalActionHead(d_model, len(space))
elif isinstance(space, SelectEntityActionSpace):
action_heads[name] = PaddedSelectEntityActionHead(d_model, d_qk)
else:
raise ValueError(f"Unknown action space {space}")
return nn.ModuleDict(action_heads) | /rogue-net-0.1.9.tar.gz/rogue-net-0.1.9/rogue_net/rogue_net.py | 0.934152 | 0.331498 | rogue_net.py | pypi |
import math
from dataclasses import dataclass, field
from typing import List, Literal, Mapping, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from entity_gym.env import ObsSpace
from ragged_buffer import RaggedBufferI64
from rogue_net.input_norm import InputNorm
@dataclass(frozen=True, eq=False)
class RelposEncodingConfig:
"""Settings for relative position encoding.
:param extent: Each integer relative position in the interval [-extent, extent] receives a positional embedding, with positions outside the interval snapped to the closest end.
:param position_features: Names of position features used for relative position encoding.
:param scale: Relative positions are divided by the scale before being assigned an embedding.
:param per_entity_values: Whether to use per-entity embeddings for relative positional values.
:param exclude_entities: List of entity types to exclude from relative position encoding.
:param key_relpos_projection: Adds a learnable projection from the relative position/distance to the relative positional keys.
:param value_relpos_projection: Adds a learnable projection from the relative position/distance to the relative positional values.
:param per_entity_projections: Uses a different learned projection per entity type for the `key_relpos_projection` and `value_relpos_projection`.
:param radial: Buckets all relative positions by their angle. The `extent` is interpreted as the number of buckets.
:param distance: Buckets all relative positions by their distance. The `extent` is interpreted as the number of buckets.
:param rotation_vec_features: Name of features that give a unit orientation vector for each entity by which to rotate relative positions.
:param rotation_angle_feature: Name of feature that gives an angle in radians by which to rotate relative positions.
:param interpolate: Whether to interpolate between the embeddings of neighboring positions.
"""
extent: List[int]
position_features: List[str]
scale: float = 1.0
per_entity_values: bool = False
exclude_entities: List[str] = field(default_factory=list)
value_relpos_projection: bool = False
key_relpos_projection: bool = False
per_entity_projections: bool = False
radial: bool = False
distance: bool = False
rotation_vec_features: Optional[List[str]] = None
rotation_angle_feature: Optional[str] = None
interpolate: bool = False
value_gate: Literal["linear", "relu", "gelu", "sigmoid", None] = "relu"
enable_negative_distance_weight_bug: bool = False
def __post_init__(self) -> None:
if self.radial and self.distance:
assert (
len(self.extent) == 2
), "Polar relative position encoding expects two extent values (number of angle buckets and number of distance buckets)"
elif self.radial:
assert (
len(self.extent) == 1
), "Radial relative position encoding expects a single extent value (number of angle buckets)"
assert (
self.rotation_angle_feature is not None
or self.rotation_vec_features is not None
), "Radial relative position encoding requires `rotation_angle_feature` or `rotation_vec_features` to be set"
elif self.distance:
assert (
len(self.extent) == 1
), "Distance relative position encoding expects a single extent value (number of distance buckets)"
else:
assert len(self.extent) == len(
self.position_features
), "Relative position encoding expects a extent value for each position feature"
assert (
self.rotation_vec_features is None or self.rotation_angle_feature is None
), "Only one of rotation_vec_features and rotation_angle_feature can be specified"
class RelposEncoding(nn.Module, RelposEncodingConfig):
def __init__(
self, config: RelposEncodingConfig, obs_space: ObsSpace, dmodel: int, dhead: int
) -> None:
nn.Module.__init__(self)
RelposEncodingConfig.__init__(self, **config.__dict__)
self.n_entity = len(obs_space.entities)
if self.radial and self.distance:
angles, distances = self.extent
strides = [1.0, angles]
positions = angles * distances
elif self.radial or self.distance:
strides = [1.0]
positions = self.extent[0]
else:
strides = []
positions = 1
for extent in self.extent:
strides.append(float(positions))
positions *= 2 * extent + 1
self.positions = positions
self.register_buffer("strides", torch.tensor(strides).unsqueeze(0))
self.register_buffer(
"extent_tensor", torch.tensor(self.extent).view(1, 1, 1, -1).long()
)
# TODO: tune embedding init scale
self.keys = nn.Embedding(self.positions, dhead)
self.values = nn.Embedding(
self.positions * self.n_entity
if config.per_entity_values
else self.positions,
dhead,
)
self.distance_values = nn.Embedding(self.n_entity, dhead)
self.keys.weight.data.normal_(mean=0.0, std=0.05)
self.values.weight.data.normal_(mean=0.0, std=0.2)
self.distance_values.weight.data.normal_(mean=0.0, std=0.2)
self.position_feature_indices = {
entity_name: torch.LongTensor(
[
entity.features.index(feature_name)
for feature_name in config.position_features
]
)
for entity_name, entity in obs_space.entities.items()
if entity_name not in self.exclude_entities
}
self.orientation_vec_indices = (
{
entity_name: torch.LongTensor(
[
entity.features.index(feature_name)
for feature_name in self.rotation_vec_features
]
)
for entity_name, entity in obs_space.entities.items()
if entity_name not in self.exclude_entities
and all(
feature in entity.features for feature in self.rotation_vec_features
)
}
if self.rotation_vec_features is not None
else None
)
self.orientation_angle_index = (
{
entity_name: entity.features.index(self.rotation_angle_feature)
for entity_name, entity in obs_space.entities.items()
if entity_name not in self.exclude_entities
and self.rotation_angle_feature in entity.features
}
if self.rotation_angle_feature is not None
else None
)
if self.value_relpos_projection:
if self.per_entity_projections:
self.vproj: nn.Module = nn.ModuleDict(
{
entity_name: nn.Linear(3, dhead)
for entity_name in self.position_feature_indices
}
)
else:
self.vproj = nn.Linear(3, dhead)
if self.key_relpos_projection:
if self.per_entity_projections:
self.kproj: nn.Module = nn.ModuleDict(
{
entity_name: nn.Linear(3, dhead)
for entity_name in self.position_feature_indices
}
)
else:
self.kproj = nn.Linear(3, dhead)
if self.key_relpos_projection or self.value_relpos_projection:
self.relpos_norm = InputNorm(3)
if self.value_gate is not None:
self.value_gate_proj = nn.Linear(dmodel, dhead)
self.cached_rkvs: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
self.global_entity = "__global__" in obs_space.entities
def relattn_logits(self, queries: torch.Tensor) -> torch.Tensor:
assert self.cached_rkvs is not None
relkeys = self.cached_rkvs[0] # (B, T, T, dhead)
# Broadcast and sum over last dimension (dot product of queries with relative keys)
relattn: torch.Tensor = torch.einsum("bhsd,bstd->bhst", queries, relkeys) * (
1.0 / math.sqrt(relkeys.size(-1))
) # (B, nh, T, T)
return relattn
def relpos_values(self, att: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
assert self.cached_rkvs is not None
relvals = self.cached_rkvs[1] # (B, T_query, T_target, dhead)
if self.value_gate is not None:
vgate = self.value_gate_proj(x) # (B, T_target, dhead)
if self.value_gate == "relu":
vgate = F.relu(vgate)
elif self.value_gate == "gelu":
vgate = F.gelu(vgate)
elif self.value_gate == "sigmoid":
vgate = torch.sigmoid(vgate)
relvals = torch.einsum("bqtd,btd->bqtd", relvals, vgate)
rely: torch.Tensor = torch.einsum(
"bhst,bstd->bhsd", att, relvals
) # (B, nh, T, T)
return rely
def keys_values(
self,
# Dict from entity name to raw input features
x: Mapping[str, torch.Tensor],
# Maps entities ordered first by type to entities ordered first by frame
index_map: torch.Tensor,
# Maps flattened embeddings to packed/padded tensor with fixed sequence lengths
packpad_index: Optional[torch.Tensor],
# Ragged shape of the flattened embeddings tensor
shape: RaggedBufferI64,
# Type of each entity
entity_type: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
if "__global__" in x and not self.global_entity:
x = {k: v for k, v in x.items() if k != "__global__"}
relative_positions, entity_type = self._relative_positions(
x, index_map, packpad_index, shape, entity_type
)
torientation = (
self._orientations(x, index_map, packpad_index, shape)
if self.radial
else None
)
if self.interpolate:
indices_weights = self._interpolated_partition(
relative_positions, torientation
)
keys: torch.Tensor = sum( # type: ignore
self.keys(indices) * weights.unsqueeze(-1)
for indices, weights in indices_weights
)
if self.per_entity_values:
per_entity_indices_weights = [
(
indices
+ (entity_type * self.positions)
.transpose(2, 1)
.long()
.repeat(1, indices.size(2), 1),
weights,
)
for indices, weights in indices_weights
]
else:
per_entity_indices_weights = indices_weights
values: torch.Tensor = sum( # type: ignore
self.values(indices) * weights.unsqueeze(-1)
for indices, weights in per_entity_indices_weights
)
else:
indices = self._partition(relative_positions, torientation)
# Batch x Seq x Seq x d_model
keys = self.keys(indices)
if self.per_entity_values:
per_entity_type_indices = indices + (
entity_type * self.positions
).transpose(2, 1).long().repeat(1, indices.size(2), 1)
else:
per_entity_type_indices = indices
values = self.values(per_entity_type_indices)
if self.value_relpos_projection or self.key_relpos_projection:
# TODO: torch.norm deprecated, does this do the right thing?
dist = relative_positions.norm(p=2, dim=-1).unsqueeze(-1)
relpos_dist = torch.cat([relative_positions, dist], dim=-1)
norm_relpos_dist = self.relpos_norm(relpos_dist)
if self.per_entity_projections:
# TODO: efficiency
if self.value_relpos_projection:
for i, vproj in enumerate(self.vproj.values()): # type: ignore
v = vproj(norm_relpos_dist)
v[entity_type.squeeze(-1) != 0, :, :] = 0.0
values += v
if self.key_relpos_projection:
for i, kproj in enumerate(self.kproj.values()): # type: ignore
k = kproj(norm_relpos_dist)
k[entity_type.squeeze(-1) != 0, :, :] = 0.0
keys += k
else:
if self.value_relpos_projection:
values += self.vproj(norm_relpos_dist)
if self.key_relpos_projection:
keys += self.kproj(norm_relpos_dist)
return keys, values
def _relative_positions(
self,
x: Mapping[str, torch.Tensor],
index_map: torch.Tensor,
packpad_index: Optional[torch.Tensor],
shape: RaggedBufferI64,
entity_type: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
positions = []
for entity_name, features in x.items():
if entity_name not in self.exclude_entities:
positions.append(
features[:, self.position_feature_indices[entity_name]]
)
else:
# TODO: add padding or something?
raise NotImplementedError(
"exclude_entities not implemented for relative position encoding"
)
# Flat tensor of positions
tpos = torch.cat(positions, dim=0)
# Flat tensor of positions ordered by sample
tpos = tpos[index_map]
# Padded/packed Batch x Seq x Pos tensor of positions
if packpad_index is not None:
tpos = tpos[packpad_index]
entity_type = entity_type[packpad_index]
else:
size1 = shape.size1(0) if len(tpos) > 0 else 0
tpos = tpos.reshape(shape.size0(), size1, 2)
entity_type = entity_type.reshape(shape.size0(), size1, 1)
# Batch x Seq(q) x Seq(k) x Pos relative positions
return tpos.unsqueeze(2) - tpos.unsqueeze(1), entity_type
def _orientations(
self,
x: Mapping[str, torch.Tensor],
index_map: torch.Tensor,
packpad_index: Optional[torch.Tensor],
shape: RaggedBufferI64,
) -> torch.Tensor:
"""
Maps a sequence of relative positions to indices.
"""
# Get entity orientations
orientations = []
if self.orientation_angle_index is not None:
for entity_name, feature in x.items():
feature_index = self.orientation_angle_index.get(entity_name)
if feature_index is None:
orientations.append(
torch.zeros_like(feature[:, 0], device=index_map.device)
)
else:
orientations.append(feature[:, feature_index])
torientation = torch.cat(orientations, dim=0)
elif self.orientation_vec_indices is not None:
for entity_name, feature in x.items():
feature_indices = self.orientation_vec_indices.get(entity_name)
if feature_indices is None:
orientations.append(
torch.zeros_like(feature[:, 0], device=index_map.device)
)
else:
orientation_vec = feature[:, feature_indices]
orientation = torch.atan2(
orientation_vec[:, 1], orientation_vec[:, 0]
)
orientations.append(orientation)
torientation = torch.cat(orientations, dim=0)[:, None]
else:
raise ValueError("No orientation information")
torientation = torientation[index_map]
if packpad_index is not None:
return torientation[packpad_index].view(
packpad_index.size(0), packpad_index.size(1), 1
)
else:
size1 = shape.size1(0) if shape.items() > 0 else 0
return torientation.reshape(shape.size0(), size1, 1)
def _partition(
self,
relative_positions: torch.Tensor, # Batch x Seq(q) x Seq(k) x Pos relative positions
torientation: Optional[torch.Tensor],
) -> torch.Tensor:
"""
Maps a sequence of relative positions to indices.
"""
if self.radial and self.distance:
return self._polar_partition(relative_positions, torientation)
elif self.radial:
return self._radial_partition(relative_positions, torientation)
elif self.distance:
return self._distance_partition(relative_positions)
else:
return self._grid_partition(relative_positions, torientation)
def _interpolated_partition(
self,
relative_positions: torch.Tensor, # Batch x Seq(q) x Seq(k) x Pos relative positions
torientation: Optional[torch.Tensor],
) -> List[Tuple[torch.Tensor, torch.Tensor]]:
"""
Maps a sequence of relative positions to indices.
"""
if self.radial and self.distance:
return self._interpolated_polar_partition(relative_positions, torientation)
if self.radial:
return self._interpolated_radial_partition(relative_positions, torientation)
elif self.distance:
return self._interpolated_distance_partition(relative_positions)
else:
raise NotImplementedError("interpolated grid partition not implemented")
def _grid_partition(
self,
relative_positions: torch.Tensor, # Batch x Seq(k) x Seq(q) x Pos relative positions
torientation: Optional[torch.Tensor],
) -> torch.Tensor:
assert torientation is None, "Not implemented for non-radial"
clamped_positions = torch.max(
torch.min(
self.extent_tensor, # type: ignore
relative_positions * (1.0 / self.scale),
),
-self.extent_tensor, # type: ignore
)
positive_positions = clamped_positions + self.extent_tensor
return (positive_positions * self.strides).sum(dim=-1).round().long()
def _radial_partition(
self,
relative_positions: torch.Tensor, # Batch x Seq(k) x Seq(q) x Pos relative positions
torientation: Optional[torch.Tensor],
) -> torch.Tensor:
angles = torch.atan2(
relative_positions[:, :, :, 1], relative_positions[:, :, :, 0]
)
# We need to be careful about ensuring that we don't create indices that fall outside of the extent.
# Specifically, taking the modulo of a small negative number can round up to the modulus:
# (torch.tensor([-1e-12], dtype=torch.float32) % torch.tensor([2]).long()).long() == torch.tensor([2]).long()
# We can avoid this by ensuring that `angles` is always positive.
if torientation is not None:
angles = angles - torientation + 2 * math.pi
return (angles / (2 * math.pi) * self.extent[0] % self.extent[0]).long()
def _interpolated_radial_partition(
self,
relative_positions: torch.Tensor, # Batch x Seq(k) x Seq(q) x Pos relative positions
torientation: Optional[torch.Tensor],
) -> List[Tuple[torch.Tensor, torch.Tensor]]:
angles = torch.atan2(
relative_positions[:, :, :, 1], relative_positions[:, :, :, 0]
)
if torientation is not None:
angles = angles - torientation + 2 * math.pi
norm_angles = angles / (2 * math.pi) * self.extent[0] % self.extent[0]
index1 = norm_angles.long()
index2 = (index1 + 1) % self.extent[0]
weight1 = (index2 - norm_angles) % 1
weight2 = 1.0 - weight1
return [(index1, weight1), (index2, weight2)]
def _distance_partition(
self,
relative_positions: torch.Tensor, # Batch x Seq(k) x Seq(q) x Pos relative positions
) -> torch.Tensor:
distances: torch.Tensor = torch.linalg.norm(relative_positions, dim=-1)
return torch.min(
(distances * (1.0 / self.scale)).long(),
# For polar relative positions, distance extent is last element.
self.extent_tensor[0, 0, 0, -1] - 1, # type: ignore
)
def _interpolated_distance_partition(
self,
relative_positions: torch.Tensor, # Batch x Seq(k) x Seq(q) x Pos relative positions
) -> List[Tuple[torch.Tensor, torch.Tensor]]:
distances: torch.Tensor = torch.linalg.norm(relative_positions, dim=-1) * (
1.0 / self.scale
)
index1 = torch.min(
distances.long(),
self.extent_tensor[0, 0, 0, -1] - 1, # type: ignore
)
index2 = torch.min(
index1 + 1,
self.extent_tensor[0, 0, 0, -1] - 1, # type: ignore
)
if self.enable_negative_distance_weight_bug:
weight1 = index2 - distances
else:
weight1 = torch.clamp(index2 - distances, 0.0)
weight2 = 1.0 - weight1
return [(index1, weight1), (index2, weight2)]
def _polar_partition(
self,
relative_positions: torch.Tensor,
torientation: Optional[torch.Tensor],
) -> torch.Tensor:
aindices = self._radial_partition(relative_positions, torientation)
dindices = self._distance_partition(relative_positions)
indices = (torch.stack([aindices, dindices], dim=-1) * self.strides.long()).sum( # type: ignore
dim=-1
)
return indices
def _interpolated_polar_partition(
self,
relative_positions: torch.Tensor,
torientation: Optional[torch.Tensor],
) -> List[Tuple[torch.Tensor, torch.Tensor]]:
(aindex1, aweight1), (aindex2, aweight2) = self._interpolated_radial_partition(
relative_positions, torientation
)
(dindex1, dweight1), (
dindex2,
dweight2,
) = self._interpolated_distance_partition(relative_positions)
indices1 = (torch.stack([aindex1, dindex1], dim=-1) * self.strides.long()).sum(dim=-1) # type: ignore
weights1 = aweight1 * dweight1
indices2 = (torch.stack([aindex2, dindex1], dim=-1) * self.strides.long()).sum(dim=-1) # type: ignore
weights2 = aweight2 * dweight1
indices3 = (torch.stack([aindex1, dindex2], dim=-1) * self.strides.long()).sum(dim=-1) # type: ignore
weights3 = aweight1 * dweight2
indices4 = (torch.stack([aindex2, dindex2], dim=-1) * self.strides.long()).sum(dim=-1) # type: ignore
weights4 = aweight2 * dweight2
return [
(indices1, weights1),
(indices2, weights2),
(indices3, weights3),
(indices4, weights4),
] | /rogue-net-0.1.9.tar.gz/rogue-net-0.1.9/rogue_net/relpos_encoding.py | 0.905584 | 0.639806 | relpos_encoding.py | pypi |
from typing import Optional, Tuple
import numpy as np
import numpy.typing as npt
import torch
from entity_gym.env import VecActionMask, VecCategoricalActionMask
from ragged_buffer import RaggedBufferI64
from torch import nn
from torch.distributions.categorical import Categorical
from rogue_net.ragged_tensor import RaggedTensor
class CategoricalActionHead(nn.Module):
def __init__(self, d_model: int, n_choice: int) -> None:
super().__init__()
self.d_model = d_model
self.n_choice = n_choice
self.proj = layer_init(nn.Linear(d_model, n_choice), std=0.01)
def forward(
self,
x: RaggedTensor,
index_offsets: RaggedBufferI64,
mask: VecActionMask,
prev_actions: Optional[RaggedBufferI64],
) -> Tuple[
torch.Tensor, npt.NDArray[np.int64], torch.Tensor, torch.Tensor, torch.Tensor
]:
assert isinstance(
mask, VecCategoricalActionMask
), f"Expected CategoricalActionMaskBatch, got {type(mask)}"
device = x.data.device
lengths = mask.actors.size1()
if len(mask.actors) == 0:
return (
torch.zeros((0), dtype=torch.int64, device=device),
lengths,
torch.zeros((0), dtype=torch.float32, device=device),
torch.zeros((0), dtype=torch.float32, device=device),
torch.zeros((0, self.n_choice), dtype=torch.float32, device=device),
)
actors = (
torch.tensor((mask.actors + index_offsets).as_array())
.to(x.data.device)
.squeeze(-1)
)
actor_embeds = x.data[actors]
logits = self.proj(actor_embeds)
# Apply masks from the environment
if mask.mask is not None and mask.mask.size0() > 0:
reshaped_masks = torch.tensor(
mask.mask.as_array().reshape(logits.shape)
).to(x.data.device)
logits = logits.masked_fill(reshaped_masks == 0, -float("inf"))
dist = Categorical(logits=logits)
if prev_actions is None:
action = dist.sample()
else:
action = torch.tensor(prev_actions.as_array().squeeze(-1)).to(x.data.device)
logprob = dist.log_prob(action)
entropy = dist.entropy()
return action, lengths, logprob, entropy, dist.logits
def layer_init(
layer: nn.Module,
std: float = np.sqrt(2),
bias_const: float = 0.0,
) -> nn.Module:
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const) # type: ignore
return layer | /rogue-net-0.1.9.tar.gz/rogue-net-0.1.9/rogue_net/categorical_action_head.py | 0.958377 | 0.498962 | categorical_action_head.py | pypi |
from typing import Optional
import torch
from torch import nn
class InputNorm(nn.Module):
"""
Computes a running mean/variance of input features and performs normalization.
Adapted from https://www.johndcook.com/blog/standard_deviation/
"""
# Pretend that `count` is a float to make MyPy happy
count: float
def __init__(self, num_features: int, cliprange: float = 5) -> None:
super().__init__()
self.cliprange = cliprange
self.register_buffer("count", torch.tensor(0.0))
self.register_buffer("mean", torch.zeros(num_features))
self.register_buffer("squares_sum", torch.zeros(num_features))
self.fp16 = False
self._stddev: Optional[torch.Tensor] = None
self._dirty = True
self._frozen = False
def freeze(self) -> None:
"""
Freeze the running statistics, thus the normalization.
"""
self._frozen = True
def unfreeze(self) -> None:
"""
Unfreeze the running statistics, thus the normalization.
"""
self._frozen = False
def update(self, input: torch.Tensor) -> None:
self._dirty = True
count = input.numel() // input.size(-1)
if count == 0:
return
dreduce = tuple(range(0, input.dim() - 1))
mean = input.mean(dim=dreduce)
square_sum = ((input - mean) * (input - mean)).sum(dim=dreduce)
if self.count == 0:
self.count += count
self.mean = mean
self.squares_sum = square_sum
else:
# This does not follow directly Welford's method since it is a batched update
# Instead we consider computing the statistics of two sets, A="current set so far" B="current batch"
# See Chan, Tony F.; Golub, Gene H.; LeVeque, Randall J. (1979), "Updating Formulae and a Pairwise Algorithm for Computing Sample Variances.", Technical Report STAN-CS-79-773, Department of Computer Science, Stanford University.
delta = mean - self.mean
self.mean += delta * count / (count + self.count)
self.squares_sum += square_sum + torch.square(
delta
) * count * self.count / (count + self.count)
self.count += count
def forward(self, input: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
if self.training and not self._frozen:
self.update(input)
if self.count > 1:
input = (input - self.mean) / self.stddev()
input = torch.clamp(input, -self.cliprange, self.cliprange)
return input.half() if self.fp16 else input
def enable_fp16(self) -> None:
# Convert buffers back to fp32, fp16 has insufficient precision and runs into overflow on squares_sum
self.float()
self.fp16 = True
def stddev(self) -> torch.Tensor:
if self._dirty or self._stddev is None:
sd = torch.sqrt(self.squares_sum / (self.count - 1))
sd[sd == 0] = 1
self._stddev = sd
self._dirty = False
return self._stddev | /rogue-net-0.1.9.tar.gz/rogue-net-0.1.9/rogue_net/input_norm.py | 0.96556 | 0.636381 | input_norm.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rohan_asnanis_distribution-0.1.tar.gz/rohan_asnanis_distribution-0.1/Python Package(PyPi) - rohan_asnanis_distribution/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
from plone.app.registry.browser.controlpanel import (
ControlPanelFormWrapper,
RegistryEditForm,
)
from plone.app.z3cform.widget import AjaxSelectFieldWidget
from plone.autoform import directives
from plone.restapi.controlpanels import RegistryConfigletPanel
from plone.z3cform import layout
from rohberg.elasticsearchblocks import _
from rohberg.elasticsearchblocks.interfaces import IRohbergElasticsearchblocksLayer
from plone import schema
from zope.component import adapter
from zope.interface import Interface
class IVoltoSearchkitBlockControlPanel(Interface):
testsearch_elasticsearch_url = schema.TextLine(
title=_(
"elasticsearch url",
),
default="https://localhost:9200",
required=False,
readonly=False,
)
testsearch_elasticsearch_index = schema.TextLine(
title=_(
"elasticsearch index",
),
default="plone2020",
required=False,
readonly=False,
)
testsearch_backend = schema.TextLine(
title=_(
"backend",
),
default="http://127.0.0.1:8080/Plone",
required=False,
readonly=False,
)
testsearch_frontend = schema.TextLine(
title=_(
"frontend",
),
default="http://myproject.example.com/",
required=False,
readonly=False,
)
allowed_content_types = schema.List(
title=_("Allowed types"),
value_type=schema.TextLine(),
required=False,
missing_value=[],
default=[],
)
directives.widget(
"allowed_content_types",
AjaxSelectFieldWidget,
vocabulary="plone.app.vocabularies.UserFriendlyTypes",
)
allowed_review_states = schema.List(
title=_("Allowed states"),
value_type=schema.TextLine(),
required=False,
missing_value=[],
default=[],
)
directives.widget(
"allowed_review_states",
AjaxSelectFieldWidget,
vocabulary="plone.app.vocabularies.WorkflowStates",
)
class VoltoSearchkitBlockControlPanel(RegistryEditForm):
schema = IVoltoSearchkitBlockControlPanel
schema_prefix = "rohberg.elasticsearchblocks.volto_searchkit_block_control_panel"
label = _("Volto Searchkit Block Control Panel")
VoltoSearchkitBlockControlPanelView = layout.wrap_form(
VoltoSearchkitBlockControlPanel, ControlPanelFormWrapper
)
@adapter(Interface, IRohbergElasticsearchblocksLayer)
class VoltoSearchkitBlockControlPanelConfigletPanel(RegistryConfigletPanel):
"""Control Panel endpoint"""
schema = IVoltoSearchkitBlockControlPanel
configlet_id = "volto_searchkit_block_control_panel-controlpanel"
configlet_category_id = "Products"
title = _("Volto Searchkit Block Control Panel")
group = ""
schema_prefix = "rohberg.elasticsearchblocks.volto_searchkit_block_control_panel" | /rohberg.elasticsearchblocks-1.0.0.tar.gz/rohberg.elasticsearchblocks-1.0.0/src/rohberg/elasticsearchblocks/controlpanels/volto_searchkit_block_control_panel/controlpanel.py | 0.549399 | 0.206734 | controlpanel.py | pypi |
from typing import Any, Optional
import importlib.util
import sys
import os
import logging
from importlib.metadata import version
PACKAGES_HAVE_BEEN_CHECKED = False
def check_python(logger: Optional[logging.Logger] = None) -> None:
"""
This function checks that the python version in use is 3+.
Args:
logger: logs the package check
"""
if sys.version_info[0] == 3:
if sys.version_info[1] >= 9:
print(f"\033[92m\u2714\033[0m Python version ", end="")
print(f"{sys.version_info[0]}.{sys.version_info[1]}", end="")
print(f".{sys.version_info[2]}")
if logger is not None:
logger.info(
f"\033[92m\u2714\033[0m Python version "
f"{sys.version_info[0]}.{sys.version_info[1]}"
f".{sys.version_info[2]}"
)
else:
print(
f"you are using python{sys.version_info[0]}" f".{sys.version_info[1]}"
)
print(f"this code was tested with python 3.9.7")
print(f"you might need to upgrade your version")
if logger is not None:
logger.info(
f"you are using python{sys.version_info[0]}"
f".{sys.version_info[1]}"
)
logger.info(f"this code was tested with python 3.9.7")
logger.info(f"you might need to upgrade your version")
else:
print(f"you are using python{sys.version_info[0]}")
print(f"this code only works in python 3")
if logger is not None:
logger.info(f"you are using python{sys.version_info[0]}")
logger.info(f"this code only works in python 3")
def check_if_a_module_exists(module_name: str) -> bool:
"""
This function checks if a module can be loaded
"""
if module_name in sys.modules:
return True
elif (importlib.util.find_spec(module_name)) is not None:
return True
else:
return False
def check_module_version(module_name: str) -> Any:
"""
This function gets the version of a module
Args:
module_name: name of the python module to check
"""
if check_if_a_module_exists(module_name=module_name):
if module_name == "batch_normalization_folding":
return version("tensorflow-batchnorm-folding")
module = importlib.import_module(module_name)
if module_name == "dotenv":
return "0.20.0"
elif module_name == "nvidia_smi":
return "7.352.0"
elif module_name == "official":
return "2.9.2"
return module.__version__
return None
def create_model_folder() -> None:
"""
This functions creates a folder where we can save the quantized models
"""
if "quantized_models" not in os.listdir(".."):
os.mkdir("../quantized_models")
def compare_versions(v1, v2) -> bool:
"""
This function comapres two versions, usual use consist in
ensuring that a module has a satisfactory version for stable use
Args:
module_name: name of the python module to check
"""
if v1 is None:
return False
if v1[0] > v2[0]:
return True
elif v1[0] == v2[0]:
if v1[1] > v2[1]:
return True
elif v1[1] == v2[1]:
if v1[2] >= v2[2]:
return True
return False
def check_packages(logger: Optional[logging.Logger] = None) -> None:
"""
checks that all the required packages are installed
Args:
logger: logs the package check
"""
global PACKAGES_HAVE_BEEN_CHECKED
if not PACKAGES_HAVE_BEEN_CHECKED:
print()
check_python(logger=logger)
module_v_int = []
module_v_str = []
pip_commands = [
"pip install --upgrade tensorflow",
"pip install --upgrade scikit-image",
"pip install --upgrade opencv-python",
"pip install --upgrade Pillow",
]
missing_modules = False
packages_to_check = ["tensorflow", "skimage", "cv2", "PIL"]
recommended_version = [(2, 10, 0), (0, 19, 3), (4, 6, 0), (9, 2, 0)]
for package in packages_to_check:
module_version = check_module_version(module_name=package)
if module_version is None:
module_v_int.append(None)
module_v_str.append("-")
missing_modules = True
else:
module_v_str.append(module_version)
module_v_int.append([int(e) for e in module_version.split(".")])
pip_commands_to_run = []
column_0 = ["package"]
column_1 = ["current version"]
column_2 = ["recommended version"]
column_3 = ["checks"]
for (version, v_str, reco_version, pip_command, name) in zip(
module_v_int,
module_v_str,
recommended_version,
pip_commands,
packages_to_check,
):
if compare_versions(version, reco_version):
column_3.append("\u2714")
else:
column_3.append("\u2717")
pip_commands_to_run.append(pip_command)
column_2.append(f"{reco_version[0]}.{reco_version[1]}.{reco_version[2]}")
column_1.append(v_str)
column_0.append(name)
l0 = len(max(column_0, key=len))
l1 = len(max(column_1, key=len))
l2 = len(max(column_2, key=len))
l3 = len(max(column_3, key=len))
limiter = "+" + (l0 + l1 + l2 + l3 + 11) * "-" + "+"
print(limiter)
if logger is not None:
logger.info(limiter)
for cpt, (s0, s1, s2, s3) in enumerate(
zip(column_0, column_1, column_2, column_3)
):
if cpt == 0:
color = "\033[1m"
end_c = "\033[0m"
color_ = ""
end_c_ = ""
assert_color = ""
assert_end_c = ""
else:
color = ""
end_c = ""
color_ = "\033[4m"
end_c_ = "\033[0m"
assert_color = "\033[92m"
if s3 == "\u2717":
assert_color = "\033[91m"
assert_end_c = "\033[0m"
print(
f"| {color}{s0.center(l0)}{end_c} | {color}{s1.center(l1)}{end_c} "
f"| {color}{(color_ + s2 + end_c_).center(l2 + len(end_c_) + len(color_))}{end_c} "
f"| {assert_color}{color}{s3.center(l3)}{end_c}{assert_end_c} |"
)
if logger is not None:
logger.info(
f"| {color}{s0.center(l0)}{end_c} | {color}{s1.center(l1)}{end_c} "
f"| {color}{(color_ + s2 + end_c_).center(l2 + len(end_c_) + len(color_))}{end_c} "
f"| {assert_color}{color}{s3.center(l3)}{end_c}{assert_end_c} |"
)
print(limiter)
if logger is not None:
logger.info(limiter)
PACKAGES_HAVE_BEEN_CHECKED = True
if len(pip_commands_to_run) != 0:
print("to install/upgrade missing dependencies please use:")
if logger is not None:
logger.info("to install/upgrade missing dependencies please use:\n")
for pip in pip_commands_to_run:
print(f"\t{pip}")
if logger is not None:
logger.info(f"\t{pip}")
if missing_modules:
sys.exit()
if __name__ == "__main__":
check_packages() | /roi_tanh_tensorflow-0.0.4.tar.gz/roi_tanh_tensorflow-0.0.4/src/roi_tanh/utils/packages.py | 0.605333 | 0.204898 | packages.py | pypi |
from .fastwarping import FastTanhWarping
from typing import Tuple, List
import tensorflow as tf
import numpy as np
from PIL import Image
def labels2boxes(
input: np.ndarray,
left_eye_class: List[int] = [2, 4],
right_eye_class: List[int] = [3, 5],
nose_class: List[int] = [6],
mouth_class: List[int] = [7, 8, 9],
) -> tf.Tensor:
"""
extracts bbox from labels
Args:
input: input labels
left_eye_class: class indices associated with the left eye
right_eye_class: class indices associated with the right eye
nose_class: class indices associated with the nose
mouth_class: class indices associated with the mouth
"""
images = np.transpose(input, axes=(2, 0, 1))
leye_image = np.sum(images[left_eye_class], axis=0)
reye_image = np.sum(images[right_eye_class], axis=0)
nose_image = np.sum(images[nose_class], axis=0)
mouth_image = np.sum(images[mouth_class], axis=0)
leye = Image.fromarray(leye_image).getbbox()
reye = Image.fromarray(reye_image).getbbox()
nose = Image.fromarray(nose_image).getbbox()
mouth = Image.fromarray(mouth_image).getbbox()
if leye is None:
leye = [0, 0, 0, 0]
if reye is None:
reye = [0, 0, 0, 0]
if nose is None:
nose = [0, 0, 0, 0]
if mouth is None:
mouth = [0, 0, 0, 0]
boxes = np.array((leye, reye, nose, mouth))
assert boxes.shape == (4, 4)
return boxes
def sharpen_labels(label: tf.Tensor) -> tf.Tensor:
"""
ensures that the labels are ones an zeros
Args:
label: one hot labels (bilinear)
"""
return tf.math.round(label)
def TanhWarping(
image: np.ndarray, label: np.ndarray, size: Tuple[int]
) -> Tuple[tf.Tensor, tf.Tensor]:
"""
apply tanh warping based on
https://openaccess.thecvf.com/content_CVPR_2019/papers/Lin_Face_Parsing_With_RoI_Tanh-Warping_CVPR_2019_paper.pdf
Args:
image: images to rectify
label: one hot labels
size: output target size
"""
boxes = labels2boxes(np.array(label))
warped_image = FastTanhWarping(image=image, boxes=boxes, output_size=size)
warped_labels = FastTanhWarping(image=label, boxes=boxes, output_size=size)
warped_labels = sharpen_labels(warped_labels)
return warped_image, warped_labels | /roi_tanh_tensorflow-0.0.4.tar.gz/roi_tanh_tensorflow-0.0.4/src/roi_tanh/warping/warping.py | 0.944472 | 0.74062 | warping.py | pypi |
from .grid_sampler import bilinear_sampler
import tensorflow as tf
from skimage import transform as trans
import numpy as np
from typing import Tuple, Optional
def atanh(x: tf.Tensor) -> tf.Tensor:
"""
Implements the atanh function
Args:
x: tensor to activate element-wise.
"""
return 0.5 * tf.math.log((1 + x) / (1 - x))
def apply_mat_tensor(coords: np.ndarray, matrix: np.ndarray) -> np.ndarray:
"""
Applies the matrix tensor to the coordinates
Args:
coords: array of coordinates to modify
matrix: matrix array for coords change
"""
matrix.astype(np.float32)
coords = np.array(coords, copy=False, ndmin=2)
perm = np.arange(len(coords.shape))
perm[0] = 1
perm[1] = 0
x, y = np.transpose(coords, axes=perm)
src = np.stack([x, y, np.ones_like(x)], axis=0)
dst = src.T @ matrix.T
dst[dst[:, 2] == 0, 2] = np.finfo(float).eps
dst[:, :2] /= dst[:, 2:3]
return dst[:, :2]
def boxes2landmark(boxes):
"""
converts boxes to landmarks from which we will derive the similarity transform
Args:
boxes: bounding box of the face
"""
landmarks = []
for i in range(boxes.shape[0] - 1):
cen_x = (boxes[i][0] + boxes[i][2]) / 2.0
cen_y = (boxes[i][1] + boxes[i][3]) / 2.0
landmarks.append((cen_x, cen_y))
mouth1 = (boxes[3][0], boxes[3][3])
landmarks.append(mouth1)
mouth2 = (boxes[3][2], boxes[3][3])
landmarks.append(mouth2)
return np.array(landmarks)
def get_warped_coords(
corrds: np.ndarray,
tform: trans.SimilarityTransform,
tform2: trans.SimilarityTransform,
) -> tf.Tensor:
"""
computes the warped coordinates using tanh roi
Args:
corrds: initial coordinates
tform: first transformation to apply
tform2: second transformation to apply
"""
matrix1 = np.linalg.inv(tform.params)
matrix2 = tform2.params
grid = apply_mat_tensor(
atanh(tf.clip_by_value(apply_mat_tensor(corrds, matrix2), -0.9999, 0.9999)),
matrix1,
)
return grid
def get_inversed_coords(
corrds: np.ndarray,
tform: trans.SimilarityTransform,
tform2: trans.SimilarityTransform,
) -> tf.Tensor:
"""
computes the warped coordinates using inverted tanh roi
Args:
corrds: initial coordinates
tform: first transformation to apply
tform2: second transformation to apply
"""
matrix1 = tform.params
matrix2 = np.linalg.inv(tform2.params)
grid = apply_mat_tensor(tf.math.tanh(apply_mat_tensor(corrds, matrix1)), matrix2)
return grid
def get_coords(
tform: trans.SimilarityTransform,
tform2: trans.SimilarityTransform,
out_shape: Tuple[int],
mode: str = "warp",
) -> tf.Tensor:
"""
extract coords
Args:
out_shape: shape of the output
tform: first transformation to apply
tform2: second transformation to apply
mode: mode to use for the method
"""
cols, rows = out_shape[0], out_shape[1]
tf_coords = np.indices((cols, rows), dtype=np.float32).reshape(2, -1).T
if mode == "warp":
tf_coords = get_warped_coords(
tf_coords,
tform=tform,
tform2=tform2,
)
elif mode == "inverse":
tf_coords = get_inversed_coords(
tf_coords,
tform=tform,
tform2=tform2,
)
tf_coords = tf.transpose(
tf.reshape(tf.transpose(tf_coords), (-1, cols, rows)), perm=(0, 2, 1)
)
return tf_coords
def coords2grid(coords: tf.Tensor, in_image_shape: Tuple[int]) -> tf.Tensor:
"""
converts coords to grip for grid sampling
Args:
coords: coordiantes to convert
in_image_shape: image shape
"""
ih, iw = in_image_shape[0], in_image_shape[1]
coords = (2 * coords) / tf.expand_dims(
tf.expand_dims((tf.stack([np.float32(ih) - 1, np.float32(iw) - 1])), axis=-1),
axis=-1,
) - 1
grid = tf.expand_dims(tf.transpose(coords, (1, 2, 0)), axis=0)
return grid
def FastTanhWarping(
image: tf.Tensor, boxes: np.ndarray, output_size: Optional[Tuple[int]] = None
):
"""
applies fast tanh warping using skimage transforms
Args:
boxes: bounding box of the face
output_size: size of the output iamge
"""
tform = trans.SimilarityTransform()
dst = np.array([[-0.25, -0.1], [0.25, -0.1], [0.0, 0.1], [-0.15, 0.4], [0.15, 0.4]])
tform2 = trans.SimilarityTransform(
scale=1.0 / 256.0, rotation=0, translation=(-1, -1)
)
landmarks = boxes2landmark(boxes=boxes)
tform.estimate(landmarks, dst)
if output_size is None:
output_size = np.array(image).shape
corrds = get_coords(tform, tform2, output_size, mode="warp")
grid = coords2grid(corrds, np.array(image).shape)
warped_image = bilinear_sampler(
img=np.expand_dims(image, axis=0), x=grid[..., 0], y=grid[..., 1]
)
return warped_image | /roi_tanh_tensorflow-0.0.4.tar.gz/roi_tanh_tensorflow-0.0.4/src/roi_tanh/warping/fastwarping.py | 0.961353 | 0.81604 | fastwarping.py | pypi |
import tensorflow as tf
def get_pixel_value(img: tf.Tensor, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
"""
Utility function to get pixel value for coordinate
vectors x and y from a 4D tensor image.
Input
-----
- img: tensor of shape (B, H, W, C)
- x: flattened tensor of shape (B*H*W,)
- y: flattened tensor of shape (B*H*W,)
Returns
-------
- output: tensor of shape (B, H, W, C)
"""
shape = tf.shape(x)
batch_size = shape[0]
height = shape[1]
width = shape[2]
batch_idx = tf.range(0, batch_size)
batch_idx = tf.reshape(batch_idx, (batch_size, 1, 1))
b = tf.tile(batch_idx, (1, height, width))
indices = tf.stack([b, y, x], 3)
return tf.gather_nd(img, indices)
def bilinear_sampler(img: tf.Tensor, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
"""
Performs bilinear sampling of the input images according to the
normalized coordinates provided by the sampling grid. Note that
the sampling is done identically for each channel of the input.
To test if the function works properly, output image should be
identical to input image when theta is initialized to identity
transform.
Input
-----
- img: batch of images in (B, H, W, C) layout.
- grid: x, y which is the output of affine_grid_generator.
Returns
-------
- out: interpolated images according to grids. Same size as grid.
"""
H = tf.shape(img)[1]
W = tf.shape(img)[2]
max_y = tf.cast(H - 1, "int32")
max_x = tf.cast(W - 1, "int32")
zero = tf.zeros([], dtype="int32")
# rescale x and y to [0, W-1/H-1]
x = tf.cast(x, "float32")
y = tf.cast(y, "float32")
x = 0.5 * ((x + 1.0) * tf.cast(max_x - 1, "float32"))
y = 0.5 * ((y + 1.0) * tf.cast(max_y - 1, "float32"))
# grab 4 nearest corner points for each (x_i, y_i)
x0 = tf.cast(tf.floor(x), "int32")
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), "int32")
y1 = y0 + 1
# clip to range [0, H-1/W-1] to not violate img boundaries
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
# get pixel value at corner coords
Ia = get_pixel_value(img, x0, y0)
Ib = get_pixel_value(img, x0, y1)
Ic = get_pixel_value(img, x1, y0)
Id = get_pixel_value(img, x1, y1)
# recast as float for delta calculation
x0 = tf.cast(x0, "float32")
x1 = tf.cast(x1, "float32")
y0 = tf.cast(y0, "float32")
y1 = tf.cast(y1, "float32")
# calculate deltas
wa = (x1 - x) * (y1 - y)
wb = (x1 - x) * (y - y0)
wc = (x - x0) * (y1 - y)
wd = (x - x0) * (y - y0)
# add dimension for addition
wa = tf.expand_dims(wa, axis=3)
wb = tf.expand_dims(wb, axis=3)
wc = tf.expand_dims(wc, axis=3)
wd = tf.expand_dims(wd, axis=3)
# compute output
out = tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id])
return tf.squeeze(out) | /roi_tanh_tensorflow-0.0.4.tar.gz/roi_tanh_tensorflow-0.0.4/src/roi_tanh/warping/grid_sampler.py | 0.912641 | 0.775945 | grid_sampler.py | pypi |
# ROICaT <img src="logo.png" width="300" title="ROICaT" alt="ROICaT" align="right" vspace = "60">
[](https://github.com/RichieHakim/ROICaT/actions/workflows/build.yml)
**R**egion **O**f **I**nterest **C**lassification **a**nd **T**racking
A simple-to-use Python package for classifying images of cells and tracking them across imaging sessions/planes.
For technical support, please visit the support forum here: [https://groups.google.com/g/roicat_support](), or the github issues page here: [ISSUES](https://github.com/RichieHakim/ROICaT/issues).
With this package, you can:
- **Classify cells** into different categories (e.g. neurons, glia, etc.) using a simple GUI.
- **Track cells** across imaging sessions/planes using a jupyter notebook or script.
We have found that ROICaT is capable of classifying cells with accuracy comparable to human relabeling performance, and tracking cells with higher accuracy than any other methods we have tried. Paper coming soon.
## Table of contents
- [Announcements](#Announcements)<br>
- [Installation](#Installation)<br>
- [How to use ROICaT](#HowTo)<br>
- [Frequently Asked Questions](#FAQs)<br>
- [TODO](#TODO)<br>
## Announcements
- **TRACKING:** Try it out in the demo notebook [here](https://github.com/RichieHakim/ROICaT/blob/main/notebooks/tracking/tracking_interactive_notebook.ipynb) or the demo script [here](https://github.com/RichieHakim/ROICaT/blob/main/notebooks/tracking/tracking_scripted_notebook.ipynb).
- **CLASSIFICATION:** still in Alpha. Contact me if you want to help test it.
- To help with development or beta test releases, please contact: rhakim@g.harvard.edu
# Installation
ROICaT works on Windows, MacOS, and Linux. If you have any issues during the installation process, please make a [github issue](https://github.com/RichieHakim/ROICaT/issues) with the error.
### 0. Requirements
- Currently, ROICaT designed to be used with **Suite2p** output data (stat.npy and ops.npy files) and **CaImAn** output data (results.h5 files), but any image data can be used (see this [NOTEBOOK](https://github.com/RichieHakim/ROICaT/blob/main/notebooks/jupyter/other/demo_custom_data_importing.ipynb) for details on using non-standard data).
- [Anaconda](https://www.anaconda.com/distribution/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)<br>
- GCC >= 5.4.0, ideally == 9.2.0. Google how to do this on your operating system. For unix/linux: check with `gcc --version`.<br>
- On some Linux servers (like Harvard's O2 server), you may need to load modules instead of installing. To load conda, gcc, try: `module load conda3/latest gcc/9.2.0` or similar.<br>
- **Optional:** [CUDA compatible NVIDIA GPU](https://developer.nvidia.com/cuda-gpus) and [drivers](https://developer.nvidia.com/cuda-toolkit-archive). Using a GPU can increase ROICaT speeds ~5-50x, though without it, ROICaT will still run reasonably quick. GPU support is not available for Macs.<br>
### 1. (Recommended) Create a new conda environment
```
conda create -n ROICaT python=3.11
conda activate ROICaT
```
### 2. Clone the repo
```
git clone https://github.com/RichieHakim/ROICaT
cd path/to/ROICaT/directory
```
### 3. Install ROICaT
Optional: `pip install --upgrade pip`<br>
```
pip install --user -v -e .[all]
```
Note: if you are using a zsh terminal, change command to: `pip3 install --user -v -e '.[all]'`
#### Troubleshooting (Windows)
If you receive the error: `ERROR: Could not build wheels for hdbscan, which is required to install pyproject.toml-based projects` on Windows, make sure that you have installed Microsoft C++ Build Tools. If not, download from [here](https://visualstudio.microsoft.com/visual-cpp-build-tools/) and run the commands:
```
cd path/to/vs_buildtools.exe
vs_buildtools.exe --norestart --passive --downloadThenInstall --includeRecommended --add Microsoft.VisualStudio.Workload.NativeDesktop --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Workload.MSBuildTools
```
Then, try proceeding with the installation by rerunning the pip install commands above.
([Source](https://stackoverflow.com/questions/64261546/how-to-solve-error-microsoft-visual-c-14-0-or-greater-is-required-when-inst))
#### Troubleshooting (GPU support)
GPU support is not required. Windows users will often need to manually install a CUDA version of pytorch (see below). Note that you can check your nvidia driver version using the shell command: `nvidia-smi` if you have drivers installed.
Use the following command to check your PyTorch version and if it is GPU enabled:
```
python -c "import torch, torchvision; print(f'Using versions: torch=={torch.__version__}, torchvision=={torchvision.__version__}'); print(f'torch.cuda.is_available() = {torch.cuda.is_available()}')"
```
**Outcome 1:** Output expected if GPU is enabled:
```
Using versions: torch==X.X.X+cuXXX, torchvision==X.X.X+cuXXX
torch.cuda.is_available() = True
```
This is the ideal outcome. You are using a <u>CUDA</u> version of PyTorch and your GPU is enabled.
**Outcome 2:** Output expected if <u>non-CUDA</u> version of PyTorch is installed:
```
Using versions: torch==X.X.X, torchvision==X.X.X
OR
Using versions: torch==X.X.X+cpu, torchvision==X.X.X+cpu
torch.cuda.is_available() = False
```
If a <u>non-CUDA</u> version of PyTorch is installed, please follow the instructions here: https://pytorch.org/get-started/locally/ to install a CUDA version. If you are using a GPU, make sure you have a [CUDA compatible NVIDIA GPU](https://developer.nvidia.com/cuda-gpus) and [drivers](https://developer.nvidia.com/cuda-toolkit-archive) that match the same version as the PyTorch CUDA version you choose. All CUDA 11.x versions are intercompatible, so if you have CUDA 11.8 drivers, you can install `torch==2.0.1+cu117`.
**Outcome 3:** Output expected if GPU is not available:
```
Using versions: torch==X.X.X+cuXXX, torchvision==X.X.X+cuXXX
torch.cuda.is_available() = False
```
If a CUDA version of PyTorch is installed but GPU is not available, make sure you have a [CUDA compatible NVIDIA GPU](https://developer.nvidia.com/cuda-gpus) and [drivers](https://developer.nvidia.com/cuda-toolkit-archive) that match the same version as the PyTorch CUDA version you choose. All CUDA 11.x versions are intercompatible, so if you have CUDA 11.8 drivers, you can install `torch==2.0.1+cu117`.
### 4. Use ROICaT<br>
- Beginner: Run a Jupyter Notebook: [Notebooks](https://github.com/RichieHakim/ROICaT/tree/main/notebooks)<br>
- Advanced: Make a parameter file and run in command line: `python -m ROICaT`. See [TODO: link to how-to] for details.<br>
# <a name="HowTo"></a>How to use ROICaT
***Ways to use ROICaT:***
- **Easy:** Try out ROICaT on Google Colab: [TODO: Link]
- **Intermediate:** Run it on your own computer. See [Installation](#Installation) for how to install.
- Using provided Jupyter Notebook(s): [Notebooks](https://github.com/RichieHakim/ROICaT/tree/main/notebooks).
- Using command line: `python -m ROICaT`. See [TODO: link to how-to] for details.
- **Advanced:** Train a new ROInet model using the provided Jupyter Notebook [TODO: link]. Or contribute to the code base! This is a big collaborative effort, so please feel free to send a pull request or open an issue.
***General workflow:***
- **Pass ROIs through ROInet:** Images of the ROIs are passed through a neural network and outputs a feature vector for each image describing what the ROI looks like.
- **Classification:** The feature vectors can then be used to classify ROIs:
- A simple classifier can be trained using user supplied labeled data (e.g. an array of images of ROIs and a corresponding array of labels for each ROI).
- Alternatively, classification can be done by projecting the feature vectors into a lower-dimensional space using UMAP and then simply circling the region of space to classify the ROIs.
- **Tracking**: The feature vectors can be combined with information about the position of the ROIs to track the ROIs across imaging sessions/planes.
# <a name="FAQs"></a>Frequently asked questions:
# TODO:
- Unify model training into this repo
- Improve classification notebooks
- Try Bokeh for interactive plots
- Integration tests
- Port demo notebooks to CoLab
- make reference API
- make nice README.md with gifs
| /roicat-1.1.0.tar.gz/roicat-1.1.0/README.rst | 0.407569 | 0.888614 | README.rst | pypi |
# ROICaT <img src="logo.png" width="300" title="ROICaT" alt="ROICaT" align="right" vspace = "60">
[](https://github.com/RichieHakim/ROICaT/actions/workflows/build.yml)
**R**egion **O**f **I**nterest **C**lassification **a**nd **T**racking
A simple-to-use Python package for classifying images of cells and tracking them across imaging sessions/planes.
For technical support, please visit the support forum here: [https://groups.google.com/g/roicat_support](), or the github issues page here: [ISSUES](https://github.com/RichieHakim/ROICaT/issues).
With this package, you can:
- **Classify cells** into different categories (e.g. neurons, glia, etc.) using a simple GUI.
- **Track cells** across imaging sessions/planes using a jupyter notebook or script.
We have found that ROICaT is capable of classifying cells with accuracy comparable to human relabeling performance, and tracking cells with higher accuracy than any other methods we have tried. Paper coming soon.
## Table of contents
- [Announcements](#Announcements)<br>
- [Installation](#Installation)<br>
- [How to use ROICaT](#HowTo)<br>
- [Frequently Asked Questions](#FAQs)<br>
- [TODO](#TODO)<br>
## Announcements
- **TRACKING:** Try it out in the demo notebook [here](https://github.com/RichieHakim/ROICaT/blob/main/notebooks/tracking/tracking_interactive_notebook.ipynb) or the demo script [here](https://github.com/RichieHakim/ROICaT/blob/main/notebooks/tracking/tracking_scripted_notebook.ipynb).
- **CLASSIFICATION:** still in Alpha. Contact me if you want to help test it.
- To help with development or beta test releases, please contact: rhakim@g.harvard.edu
# Installation
ROICaT works on Windows, MacOS, and Linux. If you have any issues during the installation process, please make a [github issue](https://github.com/RichieHakim/ROICaT/issues) with the error.
### 0. Requirements
- Currently, ROICaT designed to be used with **Suite2p** output data (stat.npy and ops.npy files) and **CaImAn** output data (results.h5 files), but any image data can be used (see this [NOTEBOOK](https://github.com/RichieHakim/ROICaT/blob/main/notebooks/jupyter/other/demo_custom_data_importing.ipynb) for details on using non-standard data).
- [Anaconda](https://www.anaconda.com/distribution/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)<br>
- GCC >= 5.4.0, ideally == 9.2.0. Google how to do this on your operating system. For unix/linux: check with `gcc --version`.<br>
- On some Linux servers (like Harvard's O2 server), you may need to load modules instead of installing. To load conda, gcc, try: `module load conda3/latest gcc/9.2.0` or similar.<br>
- **Optional:** [CUDA compatible NVIDIA GPU](https://developer.nvidia.com/cuda-gpus) and [drivers](https://developer.nvidia.com/cuda-toolkit-archive). Using a GPU can increase ROICaT speeds ~5-50x, though without it, ROICaT will still run reasonably quick. GPU support is not available for Macs.<br>
### 1. (Recommended) Create a new conda environment
```
conda create -n ROICaT python=3.11
conda activate ROICaT
```
### 2. Clone the repo
```
git clone https://github.com/RichieHakim/ROICaT
cd path/to/ROICaT/directory
```
### 3. Install ROICaT
Optional: `pip install --upgrade pip`<br>
```
pip install --user -v -e .[all]
```
Note: if you are using a zsh terminal, change command to: `pip3 install --user -v -e '.[all]'`
#### Troubleshooting (Windows)
If you receive the error: `ERROR: Could not build wheels for hdbscan, which is required to install pyproject.toml-based projects` on Windows, make sure that you have installed Microsoft C++ Build Tools. If not, download from [here](https://visualstudio.microsoft.com/visual-cpp-build-tools/) and run the commands:
```
cd path/to/vs_buildtools.exe
vs_buildtools.exe --norestart --passive --downloadThenInstall --includeRecommended --add Microsoft.VisualStudio.Workload.NativeDesktop --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Workload.MSBuildTools
```
Then, try proceeding with the installation by rerunning the pip install commands above.
([Source](https://stackoverflow.com/questions/64261546/how-to-solve-error-microsoft-visual-c-14-0-or-greater-is-required-when-inst))
#### Troubleshooting (GPU support)
GPU support is not required. Windows users will often need to manually install a CUDA version of pytorch (see below). Note that you can check your nvidia driver version using the shell command: `nvidia-smi` if you have drivers installed.
Use the following command to check your PyTorch version and if it is GPU enabled:
```
python -c "import torch, torchvision; print(f'Using versions: torch=={torch.__version__}, torchvision=={torchvision.__version__}'); print(f'torch.cuda.is_available() = {torch.cuda.is_available()}')"
```
**Outcome 1:** Output expected if GPU is enabled:
```
Using versions: torch==X.X.X+cuXXX, torchvision==X.X.X+cuXXX
torch.cuda.is_available() = True
```
This is the ideal outcome. You are using a <u>CUDA</u> version of PyTorch and your GPU is enabled.
**Outcome 2:** Output expected if <u>non-CUDA</u> version of PyTorch is installed:
```
Using versions: torch==X.X.X, torchvision==X.X.X
OR
Using versions: torch==X.X.X+cpu, torchvision==X.X.X+cpu
torch.cuda.is_available() = False
```
If a <u>non-CUDA</u> version of PyTorch is installed, please follow the instructions here: https://pytorch.org/get-started/locally/ to install a CUDA version. If you are using a GPU, make sure you have a [CUDA compatible NVIDIA GPU](https://developer.nvidia.com/cuda-gpus) and [drivers](https://developer.nvidia.com/cuda-toolkit-archive) that match the same version as the PyTorch CUDA version you choose. All CUDA 11.x versions are intercompatible, so if you have CUDA 11.8 drivers, you can install `torch==2.0.1+cu117`.
**Outcome 3:** Output expected if GPU is not available:
```
Using versions: torch==X.X.X+cuXXX, torchvision==X.X.X+cuXXX
torch.cuda.is_available() = False
```
If a CUDA version of PyTorch is installed but GPU is not available, make sure you have a [CUDA compatible NVIDIA GPU](https://developer.nvidia.com/cuda-gpus) and [drivers](https://developer.nvidia.com/cuda-toolkit-archive) that match the same version as the PyTorch CUDA version you choose. All CUDA 11.x versions are intercompatible, so if you have CUDA 11.8 drivers, you can install `torch==2.0.1+cu117`.
### 4. Use ROICaT<br>
- Beginner: Run a Jupyter Notebook: [Notebooks](https://github.com/RichieHakim/ROICaT/tree/main/notebooks)<br>
- Advanced: Make a parameter file and run in command line: `python -m ROICaT`. See [TODO: link to how-to] for details.<br>
# <a name="HowTo"></a>How to use ROICaT
***Ways to use ROICaT:***
- **Easy:** Try out ROICaT on Google Colab: [TODO: Link]
- **Intermediate:** Run it on your own computer. See [Installation](#Installation) for how to install.
- Using provided Jupyter Notebook(s): [Notebooks](https://github.com/RichieHakim/ROICaT/tree/main/notebooks).
- Using command line: `python -m ROICaT`. See [TODO: link to how-to] for details.
- **Advanced:** Train a new ROInet model using the provided Jupyter Notebook [TODO: link]. Or contribute to the code base! This is a big collaborative effort, so please feel free to send a pull request or open an issue.
***General workflow:***
- **Pass ROIs through ROInet:** Images of the ROIs are passed through a neural network and outputs a feature vector for each image describing what the ROI looks like.
- **Classification:** The feature vectors can then be used to classify ROIs:
- A simple classifier can be trained using user supplied labeled data (e.g. an array of images of ROIs and a corresponding array of labels for each ROI).
- Alternatively, classification can be done by projecting the feature vectors into a lower-dimensional space using UMAP and then simply circling the region of space to classify the ROIs.
- **Tracking**: The feature vectors can be combined with information about the position of the ROIs to track the ROIs across imaging sessions/planes.
# <a name="FAQs"></a>Frequently asked questions:
# TODO:
- Unify model training into this repo
- Improve classification notebooks
- Try Bokeh for interactive plots
- Integration tests
- Port demo notebooks to CoLab
- make reference API
- make nice README.md with gifs
| /roicat-1.1.0.tar.gz/roicat-1.1.0/README.md | 0.407569 | 0.888614 | README.md | pypi |
=================
Welcome to ROILoc
=================
ROILoc is a registration-based ROI locator, based on the MNI152 09c Sym template, and the CerebrA Atlas. It'll center and crop T1 or T2 MRIs around a given ROI.
Results are saved in "LPI-" (or "RAS+") format.
.. image:: https://raw.githubusercontent.com/clementpoiret/ROILoc/main/example.png
:width: 800
:alt: Example: using ROILoc for Hippocampus
If the results aren't correct, please consider performing BET/Skull Stripping on your subject's MRI beforehand, then pass ``-b True`` afterward.
You can use FSL or ANTs to perform BET. I personnally also had great and fast results from `deepbrain <https://github.com/iitzco/deepbrain>`_ which depends on TensorFlow 1.X.
It requires the following packages:
- ANTs (Can be a system installation or anaconda installation),
- ANTsPyX,
- importlib_resources,
- Pandas,
- Rich.
CLI
***
usage: roiloc [-h] -p PATH -i INPUTPATTERN [-r ROI [ROI ...]] -c CONTRAST [-b]
[-t TRANSFORM] [-m MARGIN [MARGIN ...]] [--rightoffset RIGHTOFFSET [RIGHTOFFSET ...]]
[--leftoffset LEFTOFFSET [LEFTOFFSET ...]] [--mask MASK]
[--extracrops EXTRACROPS [EXTRACROPS ...]] [--savesteps]
arguments::
-h, --help show this help message and exit
-p PATH, --path PATH <Required> Input images path.
-i INPUTPATTERN, --inputpattern INPUTPATTERN
<Required> Pattern to find input images in input path
(e.g.: `**/*t1*.nii.gz`).
-r ROI [ROI ...], --roi ROI [ROI ...]
ROI included in CerebrA. See
`roiloc/MNI/cerebra/CerebrA_LabelDetails.csv` for more
details. Default: 'Hippocampus'.
-c CONTRAST, --contrast CONTRAST
<Required> Contrast of the input MRI. Can be `t1` or
`t2`.
-b, --bet Flag use the BET version of the MNI152 template.
-t TRANSFORM, --transform TRANSFORM
Type of registration. See `https://antspy.readthedocs.
io/en/latest/registration.html` for the complete list
of options. Default: `AffineFast`
-m MARGIN [MARGIN ...], --margin MARGIN [MARGIN ...]
Margin to add around the bounding box in voxels. It
has to be a list of 3 integers, to control the margin
in the three axis (0: left/right margin, 1: post/ant
margin, 2: inf/sup margin). Default: [8,8,8]
--rightoffset RIGHTOFFSET [RIGHTOFFSET ...]
Offset to add to the bounding box of the right ROI in
voxels. It has to be a list of 3 integers, to control
the offset in the three axis (0: from left to right,
1: from post to ant, 2: from inf to sup).
Default: [0,0,0]
--leftoffset LEFTOFFSET [LEFTOFFSET ...]
Offset to add to the bounding box of the left ROI in
voxels. It has to be a list of 3 integers, to control
the offset in the three axis (0: from left to right,
1: from post to ant, 2: from inf to sup).
Default: [0,0,0]
--mask MASK Pattern for brain tissue mask to improve registration
(e.g.: `sub_*bet_mask.nii.gz`). If providing a BET
mask, please also pass `-b` to use a BET MNI template.
--extracrops EXTRACROPS [EXTRACROPS ...]
Pattern for other files to crop (e.g. manual
segmentation: '*manual_segmentation_left*.nii.gz').
--savesteps Flag to save intermediate files (e.g. registered
atlas).
Python API
**********
Even if the CLI interface is the main use case, a Python API is also available since v0.2.0.
The API syntax retakes sklearn's API syntax, with a ``RoiLocator`` class, having ``fit``, ``transform``, ``fit_transform`` and ``inverse_transform`` methods as seen below.
.. code-block:: python
import ants
from roiloc.locator import RoiLocator
image = ants.image_read("./sub00_t2w.nii.gz",
reorient="LPI")
locator = RoiLocator(contrast="t2", roi="hippocampus", bet=False)
# Fit the locator and get the transformed MRIs
right, left = locator.fit_transform(image)
# Coordinates can be obtained through the `coords` attribute
print(locator.get_coords())
# Let 'model' be a segmentation model of the hippocampus
right_seg = model(right)
left_seg = model(left)
# Transform the segmentation back to the original image
right_seg = locator.inverse_transform(right_seg)
left_seg = locator.inverse_transform(left_seg)
# Save the resulting segmentations in the original space
ants.image_write(right_seg, "./sub00_hippocampus_right.nii.gz")
ants.image_write(left_seg, "./sub00_hippocampus_left.nii.gz")
Installation
************
1/ Be sure to have a working ANTs installation: `see on GitHub <https://github.com/ANTsX/ANTs>`_,
2/ Simply run ``pip install roiloc`` (at least python 3.7).
Example:
********
Let's say I have a main database folder, containing one subfolder for each subject. In all those subjects folders, all of them have a t2w mri called ``tse.nii.gz`` and a brain mask call ``brain_mask.nii``.
Therefore, to extract both left and right hippocampi (``Hippocampus``), I can run:
``roiloc -p "~/Datasets/MemoDev/ManualSegmentation/" -i "**/tse.nii.gz" -r "hippocampus" -c "t2" -b -t "AffineFast" -m 16 2 16 --mask "*brain_mask.nii``
Supported Registrations
***********************
(Taken from ANTsPyX's doc)
- ``Translation``: Translation transformation.
- ``Rigid``: Rigid transformation: Only rotation and translation.
- ``Similarity``: Similarity transformation: scaling, rotation and translation.
- ``QuickRigid``: Rigid transformation: Only rotation and translation. May be useful for quick visualization fixes.
- ``DenseRigid``: Rigid transformation: Only rotation and translation. Employs dense sampling during metric estimation.
- ``BOLDRigid``: Rigid transformation: Parameters typical for BOLD to BOLD intrasubject registration.
- ``Affine``: Affine transformation: Rigid + scaling.
- ``AffineFast``: Fast version of Affine.
- ``BOLDAffine``: Affine transformation: Parameters typical for BOLD to BOLD intrasubject registration.
- ``TRSAA``: translation, rigid, similarity, affine (twice). please set regIterations if using this option. this would be used in cases where you want a really high quality affine mapping (perhaps with mask).
- ``ElasticSyN``: Symmetric normalization: Affine + deformable transformation, with mutual information as optimization metric and elastic regularization.
- ``SyN``: Symmetric normalization: Affine + deformable transformation, with mutual information as optimization metric.
- ``SyNRA``: Symmetric normalization: Rigid + Affine + deformable transformation, with mutual information as optimization metric.
- ``SyNOnly``: Symmetric normalization: no initial transformation, with mutual information as optimization metric. Assumes images are aligned by an inital transformation. Can be useful if you want to run an unmasked affine followed by masked deformable registration.
- ``SyNCC``: SyN, but with cross-correlation as the metric.
- ``SyNabp``: SyN optimized for abpBrainExtraction.
- ``SyNBold``: SyN, but optimized for registrations between BOLD and T1 images.
- ``SyNBoldAff``: SyN, but optimized for registrations between BOLD and T1 images, with additional affine step.
- ``SyNAggro``: SyN, but with more aggressive registration (fine-scale matching and more deformation). Takes more time than SyN.
- ``TVMSQ``: time-varying diffeomorphism with mean square metric
- ``TVMSQC``: time-varying diffeomorphism with mean square metric for very large deformation
Supported ROIs
**************
- Caudal Anterior Cingulate,
- Caudal Middle Frontal,
- Cuneus,
- Entorhinal,
- Fusiform,
- Inferior Parietal,
- Inferior temporal,
- Isthmus Cingulate,
- Lateral Occipital,
- Lateral Orbitofrontal,
- Lingual,
- Medial Orbitofrontal,
- Middle Temporal,
- Parahippocampal,
- Paracentral,
- Pars Opercularis,
- Pars Orbitalis,
- Pars Triangularis,
- Pericalcarine,
- Postcentral,
- Posterior Cingulate,
- Precentral,
- Precuneus,
- Rostral Anterior Cingulate,
- Rostral Middle Frontal,
- Superior Frontal,
- Superior Parietal,
- Superior Temporal,
- Supramarginal,
- Transverse Temporal,
- Insula,
- Brainstem,
- Third Ventricle,
- Fourth Ventricle,
- Optic Chiasm,
- Lateral Ventricle,
- Inferior Lateral Ventricle,
- Cerebellum Gray Matter,
- Cerebellum White Matter,
- Thalamus,
- Caudate,
- Putamen,
- Pallidum,
- Hippocampus,
- Amygdala,
- Accumbens Area,
- Ventral Diencephalon,
- Basal Forebrain,
- Vermal lobules I-V,
- Vermal lobules VI-VII,
- Vermal lobules VIII-X.
Cite this work
**************
If you use this software, please cite it as below.
authors:
- family-names: Poiret
- given-names: Clément
- orcid: https://orcid.org/0000-0002-1571-2161
title: clementpoiret/ROILoc: Zenodo Release
version: v0.2.4
date-released: 2021-09-14
Example:
``Clément POIRET. (2021). clementpoiret/ROILoc: Zenodo Release (v0.2.4). Zenodo. https://doi.org/10.5281/zenodo.5506959``
| /ROILoc-0.3.0.tar.gz/ROILoc-0.3.0/README.rst | 0.903134 | 0.683314 | README.rst | pypi |
from __future__ import annotations
import datetime as dt
from packaging import version
from .model import RointeProduct
DEFAULT_TIME_ZONE: dt.tzinfo = dt.timezone.utc
def now(time_zone=None) -> dt.datetime:
"""Get now in specified time zone."""
return dt.datetime.now(time_zone or DEFAULT_TIME_ZONE)
def find_max_fw_version(data, device_class: str, product_version: str) -> str | None:
"""Finds the latest FW version for a specific device class and version"""
if device_class in data:
if (
product_version in data[device_class]
and "end_user" in data[device_class][product_version]
):
root = data[device_class][product_version]["end_user"]
max_version = None
for entry in root:
ptr = version.parse(entry)
if max_version is None or ptr > max_version:
max_version = ptr
return str(max_version)
return None
def build_update_map(firmware_data: dict) -> dict[RointeProduct, dict[str, str]]:
"""
Builds an update map for each device.
Output is a dict of [product, [existing_version, target_version]
Where [target_version] is the next version the product can be updated to.
and [existing_verion] is the product's current version.
"""
fw_map = {}
for entry in RointeProduct:
fw_map[entry] = build_product_fw_map(entry, firmware_data)
return fw_map
def build_product_fw_map(product: RointeProduct, firmware_data: dict) -> dict[str, str]:
"""Builds the upgrade map for a specific product."""
if product.device_type not in firmware_data:
return None
root_node = firmware_data[product.device_type]
if product.version not in root_node:
return None
product_versions = root_node[product.version]["end_user"]
upgrade_map = {}
for version_entry in product_versions:
new_version = product_versions[version_entry].get("firmware_new_version", None)
if new_version:
upgrade_map[version_entry] = new_version
return upgrade_map
def get_product_by_type_version(
product_type: str, product_version: str
) -> RointeProduct | None:
"""Find the product model by its type and version."""
for entry in RointeProduct:
if entry.device_type == product_type and entry.version == product_version:
return entry
return None | /rointe-sdk-1.3.4.tar.gz/rointe-sdk-1.3.4/rointesdk/utils.py | 0.728265 | 0.407333 | utils.py | pypi |
from __future__ import annotations
from datetime import datetime
from rointesdk.utils import get_product_by_type_version
from .model import ScheduleMode, RointeProduct
from .dto import EnergyConsumptionData
from . import utils
class RointeDevice:
"""Represent a Rointe device from the API."""
id: str
name: str
serialnumber: str
type: str
# This represents the model of a particular product. Not the FW version.
product_version: str
firmware_version: str
latest_firmware_version: str
nominal_power: int
power: bool
# Describes the preset: ICE, ECO, Comfort, None
preset: str
mode: str
temp: float
temp_calc: float
temp_probe: float
# preset temperatures
comfort_temp: float
eco_temp: float
ice_temp: float
# User mode
um_max_temp: float
um_min_temp: float
user_mode: bool
ice_mode: bool
# Schedule
schedule: list[str]
schedule_day: int
schedule_hour: int
energy_data: EnergyConsumptionData
last_sync_datetime_app: datetime
last_sync_datetime_device: datetime
hass_available: bool
def __init__(
self,
device_id: str,
device_info: dict,
energy_data: EnergyConsumptionData,
latest_fw: str | None,
) -> None:
"""Initialize the device from the rointe's json blob."""
self.id = device_id
self.type = device_info["data"]["type"]
self.product_version = str.lower(device_info["data"]["product_version"])
self.serialnumber = device_info["serialnumber"]
self.update_data(device_info, energy_data, latest_fw)
def update_data(
self,
device_info: dict,
energy_data: EnergyConsumptionData,
latest_fw: str | None,
) -> None:
"""Update the device data from a Json object."""
data = device_info["data"]
firmware_data = device_info.get("firmware")
self.name = data["name"]
self.nominal_power = int(data["nominal_power"])
self.power = bool(data["power"])
self.preset = data["status"]
self.mode = data["mode"]
self.temp = float(data["temp"])
self.temp_calc = float(data["temp_calc"])
self.temp_probe = float(data["temp_probe"])
self.comfort_temp = float(data["comfort"])
self.eco_temp = float(data["eco"])
self.ice_temp = float(data["ice"])
# User mode settings are only valid for V2 radiators.
if self.product_version == "v2":
self.um_max_temp = float(data["um_max_temp"])
self.um_min_temp = float(data["um_min_temp"])
self.user_mode = bool(data["user_mode"])
else:
self.user_mode = False
self.ice_mode = bool(data["ice_mode"])
self.schedule = data["schedule"]
self.schedule_day = data["schedule_day"]
self.schedule_hour = data["schedule_hour"]
self.energy_data = energy_data
self.last_sync_datetime_app = datetime.fromtimestamp(
int(data["last_sync_datetime_app"]) / 1000.0
)
self.last_sync_datetime_device = datetime.fromtimestamp(
int(data["last_sync_datetime_device"]) / 1000.0
)
if firmware_data:
self.firmware_version = firmware_data.get("firmware_version_device")
else:
self.firmware_version = None
self.latest_firmware_version = latest_fw
self.hass_available = True
def get_current_schedule_mode(self) -> ScheduleMode:
"""Return the current schedule mode for the device.
Returns C for Comfort, E for Eco, O for no-schedule
"""
day_time = utils.now()
day_of_week = day_time.weekday() # 0 is Monday
hour_index = day_time.hour
current_mode = self.schedule[day_of_week][hour_index]
if current_mode == "C":
return ScheduleMode.COMFORT
elif current_mode == "E":
return ScheduleMode.ECO
else:
return ScheduleMode.NONE
def user_mode_supported(self) -> bool:
"""Return True if this device supports user mode."""
return self.product_version == "v2"
@property
def rointe_product(self) -> RointeProduct | None:
"""Return the product name."""
return get_product_by_type_version(self.type, self.product_version) | /rointe-sdk-1.3.4.tar.gz/rointe-sdk-1.3.4/rointesdk/device.py | 0.883958 | 0.325427 | device.py | pypi |
import datetime
from typing import Any, Dict, Optional, Union
import pandas as pd
from .gie_raw_client import GieRawClient
from .mappings.agsi_company import AGSICompany
from .mappings.agsi_country import AGSICountry
from .mappings.agsi_facility import AGSIFacility
from .mappings.alsi_company import ALSICompany
from .mappings.alsi_country import ALSICountry
from .mappings.alsi_facility import ALSIFacility
class GiePandasClient(GieRawClient):
"""AGSI/ALSI Pandas Client which queries the API and returns data"""
_FLOATING_COLS = [
"gasInStorage",
"consumption",
"consumptionFull",
"injection",
"withdrawal",
"netWithdrawal",
"workingGasVolume",
"injectionCapacity",
"withdrawalCapacity",
"trend",
"full",
"inventory",
"sendOut",
"dtmi",
"dtrs",
"volume",
]
def _pandas_df_format(
self, json_res: Dict[str, Any], float_cols: Optional[list] = None
) -> pd.DataFrame:
"""Abstract method which transformes json
data to a pandas DataFrame
Parameters
----------
json_res : Dict[str, Any]
Raw data in a Dict format
float_cols : Optional[list], optional
optional col data which have to be parsed to float, by default None
Returns
-------
pd.DataFrame
DataFrame holding the queried data
"""
df = (
pd.DataFrame(json_res["data"])
if "data" in json_res
else pd.DataFrame(json_res)
)
if "gas_day" in json_res:
df.insert(0, "gas_day", json_res["gas_day"], allow_duplicates=True)
if float_cols is not None:
df_cols = [x for x in float_cols if x in df.columns]
if df_cols:
df[df_cols] = df[df_cols].astype("float", errors="ignore")
return df
async def query_agsi_eic_listing(self) -> pd.DataFrame:
"""Return all the AGSI EIC (Energy Identification Code) listing
Returns
-------
pd.DataFrame
DataFrame holding the queried data
"""
json_result = await super().query_agsi_eic_listing()
return self._pandas_df_format(json_result)
async def query_alsi_eic_listing(self) -> pd.DataFrame:
"""Return all the ALSI EIC (Energy Identification Code) listing
Returns
-------
pd.DataFrame
DataFrame holding the queried data
"""
json_result = await super().query_alsi_eic_listing()
return self._pandas_df_format(json_result)
async def query_alsi_news_listing(
self, news_url_item: Optional[Union[int, str]] = None
) -> pd.DataFrame:
"""Return all the ALSI news or a specific country news listings
Parameters
----------
news_url_item : Optional[Union[int, str]], optional
An integer representing a specific country, by default None
Returns
-------
pd.DataFrame
DataFrame holding the queried data
"""
json_result = await super().query_alsi_news_listing(
news_url_item=news_url_item
)
return self._pandas_df_format(json_result)
async def query_agsi_news_listing(
self, news_url_item: Optional[Union[int, str]] = None
) -> pd.DataFrame:
"""Return all the AGSI news or a specific country news listings
Parameters
----------
news_url_item : Optional[Union[int, str]], optional
An integer representing a specific country, by default None
Returns
-------
pd.DataFrame
DataFrame holding the queried data
"""
json_result = await super().query_agsi_news_listing(
news_url_item=news_url_item
)
return self._pandas_df_format(json_result)
async def query_country_agsi_storage(
self,
country: Optional[Union[AGSICountry, str]] = None,
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> pd.DataFrame:
"""Return listing with the AGSI storage data for a
specific country or all countries
Parameters
----------
country : Optional[Union[AGSICountry, str]], optional
Optional country param, by default None
start : Optional[Union[datetime.datetime, str]], optional
Optional starting date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
date : Optional[Union[datetime.datetime, str]], optional
Optional current date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
pd.DataFrame
DataFrame holding queried data
"""
json_result = await super().query_country_agsi_storage(
country=country, start=start, end=end, date=date, size=size
)
return self._pandas_df_format(json_result, self._FLOATING_COLS)
async def query_country_alsi_storage(
self,
country: Optional[Union[ALSICountry, str]] = None,
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> pd.DataFrame:
"""Return listing with the ALSI storage data for
a specific country or all countries
Parameters
----------
country : Optional[Union[ALSICountry, str]], optional
Optional country param, by default None
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
date : Optional[Union[datetime.datetime, str]], optional
Optional current date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
pd.DataFrame
DataFrame holding queried data
"""
json_result = await super().query_country_alsi_storage(
country=country, start=start, end=end, date=date, size=size
)
return self._pandas_df_format(json_result, self._FLOATING_COLS)
async def query_agsi_facility_storage(
self,
facility_name: Union[AGSIFacility, str],
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> pd.DataFrame:
"""Return listing with the AGSI data for a specific facility storage
Parameters
----------
facility_name : Union[AGSIFacility, str]
The name of the facility to query for
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
date : Optional[Union[datetime.datetime, str]], optional
Optional current date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
pd.DataFrame
DataFrame holding queried data
"""
json_result = await super().query_agsi_facility_storage(
facility_name=facility_name,
start=start,
end=end,
date=date,
size=size,
)
return self._pandas_df_format(json_result, self._FLOATING_COLS)
async def query_alsi_facility_storage(
self,
facility_name: Union[ALSIFacility, str],
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> pd.DataFrame:
"""Return listing with the ALSI data for a specific facility storage
Parameters
----------
facility_name : Union[ALSIFacility, str]
The name of the facility to query for
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
date : Optional[Union[datetime.datetime, str]], optional
Optional current date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
pd.DataFrame
DataFrame holding queried data
"""
json_result = await super().query_alsi_facility_storage(
facility_name=facility_name,
start=start,
end=end,
date=date,
size=size,
)
return self._pandas_df_format(json_result, self._FLOATING_COLS)
async def query_agsi_company(
self,
company_name: Union[AGSICompany, str],
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> pd.DataFrame:
"""Returns listing with the AGSI data for a specific company
Parameters
----------
company_name : Union[AGSICompany, str]
The name of the company to query for
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
date : Optional[Union[datetime.datetime, str]], optional
Optional current date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
pd.DataFrame
DataFrame holding queried data
"""
json_result = await super().query_agsi_company(
company_name=company_name,
start=start,
end=end,
date=date,
size=size,
)
return self._pandas_df_format(json_result, self._FLOATING_COLS)
async def query_alsi_company(
self,
company_name: Union[ALSICompany, str],
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> pd.DataFrame:
"""Returns listing with the ALSI data for a specific company
Parameters
----------
company_name : Union[ALSICompany, str]
The name of the company to query for
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
date : Optional[Union[datetime.datetime, str]], optional
Optional current date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
pd.DataFrame
DataFrame holding queried data
"""
json_result = await super().query_alsi_company(
company_name=company_name,
start=start,
end=end,
date=date,
size=size,
)
return self._pandas_df_format(json_result, self._FLOATING_COLS)
async def query_agsi_unavailability(
self,
country: Optional[Union[AGSICountry, str]] = None,
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> pd.DataFrame:
"""Returns the total AGSI unavailability data or
a specific country unavailability
Parameters
----------
country : Optional[Union[AGSICountry, str]], optional
Optional country param, by default None
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
pd.DataFrame
DataFrame holding queried data
"""
json_result = await super().query_agsi_unavailability(
country=country, start=start, end=end, size=size
)
return self._pandas_df_format(json_result, self._FLOATING_COLS)
async def query_alsi_unavailability(
self,
country: Optional[Union[ALSICountry, str]] = None,
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> pd.DataFrame:
"""Returns the total ALSI unavailability data or
a specific country unavailability
Parameters
----------
country : Optional[Union[ALSICountry, str]], optional
Optional country param, by default None
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
pd.DataFrame
DataFrame holding queried data
"""
json_result = await super().query_alsi_unavailability(
country=country, start=start, end=end, size=size
)
return self._pandas_df_format(json_result, self._FLOATING_COLS) | /roiti_gie-1.0.1-py3-none-any.whl/roiti/gie/gie_pandas_client.py | 0.900516 | 0.314682 | gie_pandas_client.py | pypi |
from typing import Union
from .mappings.agsi_company import AGSICompany
from .mappings.agsi_country import AGSICountry
from .mappings.agsi_facility import AGSIFacility
from .mappings.alsi_company import ALSICompany
from .mappings.alsi_country import ALSICountry
from .mappings.alsi_facility import ALSIFacility
def lookup_agsi_company(key: Union[AGSICompany, str]) -> AGSICompany:
"""Key lookup for AGSICompany
If the key is already of type AGSICompany, returns it immediately.
Parameters
----------
key : Union[AGSICompany, str]
The key to use for the lookup.
Returns
-------
AGSICompany
The corresponding instance of AGSICompany for which
the value equals the lookup key.
Raises
------
ValueError
If `key` does not represent a valid company.
"""
if isinstance(key, AGSICompany):
return key
else:
try:
return AGSICompany[key]
except KeyError:
try:
return [obj for obj in AGSICompany if obj.value == key][0]
except IndexError as err:
raise ValueError(
"The company string provided is invalid!"
) from err
# Checking the provided facility string in our base enums
def lookup_facility_agsi(key: Union[AGSIFacility, str]) -> AGSIFacility:
"""Key lookup for AGSIFacility
If the key is already of type AGSIFacility, returns it immediately.
Parameters
----------
key : Union[AGSIFacility, str]
The key to use for the lookup.
Returns
-------
AGSIFacility
The corresponding instance of AGSIFacility for which
the value equals the lookup key.
Raises
------
ValueError
If `key` does not represent a valid company.
"""
if isinstance(key, AGSIFacility):
return key
else:
try:
return AGSIFacility[key]
except KeyError:
try:
return [obj for obj in AGSIFacility if obj.value == key][0]
except IndexError as err:
raise ValueError("The facility provided is invalid!") from err
def lookup_country_agsi(key: Union[AGSICountry, str]) -> AGSICountry:
"""Key lookup for AGSICountry
Parameters
----------
key: Union[AGSICountry, str]
The key to use for the lookup.
Returns
-------
AGSICountry
The corresponding instance of AGSICountry for which
the value equals the lookup key.
Raises
------
ValueError
If 'key' does not represent a valid country.
"""
if isinstance(key, AGSICountry):
return key
else:
try:
return AGSICountry[key]
except KeyError:
try:
return [obj for obj in AGSICountry if obj.value == key][0]
except IndexError as err:
raise ValueError(
"The country string provided is invalid!"
) from err
def lookup_alsi_company(key: Union[ALSICompany, str]) -> ALSICompany:
"""Key lookup for ALSICompany
Parameters
----------
key: Union[ALSICompany, str]
The key to use for the lookup.
Returns
-------
AGSICompany
The corresponding instance of ALSICompany for which
the value equals the lookup key.
Raises
------
ValueError
If 'key' does not represent a valid company.
"""
if isinstance(key, ALSICompany):
return key
else:
try:
return ALSICompany[key]
except KeyError:
try:
return [obj for obj in ALSICompany if obj.value == key][0]
except IndexError as err:
raise ValueError(
"The company string provided is invalid!"
) from err
def lookup_facility_alsi(key: Union[ALSIFacility, str]) -> ALSIFacility:
"""Key lookup for ALSIFacility
If the key is already of type ALSIFacility, returns it immediately.
Parameters
----------
key : Union[ALSIFacility, str]
The key to use for the lookup.
Returns
-------
ALSIFacility
The corresponding instance of ALSIFacility for which
the value equals the lookup key.
Raises
------
ValueError
If `key` does not represent a valid company.
"""
if isinstance(key, ALSIFacility):
return key
else:
try:
return ALSIFacility[key]
except KeyError:
try:
return [obj for obj in ALSIFacility if obj.value == key][0]
except IndexError as err:
raise ValueError(
"The facility string provided is invalid!"
) from err
def lookup_country_alsi(key: Union[ALSICountry, str]) -> ALSICountry:
"""Key lookup for ALSICountry
Parameters
----------
key: Union[ALSICountry, str]
The key to use for the lookup.
Returns
-------
ALSICountry
The corresponding instance of ALSICountry for which
the value equals the lookup key.
Raises
------
ValueError
If 'key' does not represent a valid country.
"""
if isinstance(key, ALSICountry):
return key
else:
try:
return ALSICountry[key]
except KeyError:
try:
return [obj for obj in ALSICountry if obj.value == key][0]
except IndexError as err:
raise ValueError(
"The country string provided is invalid!"
) from err | /roiti_gie-1.0.1-py3-none-any.whl/roiti/gie/lookup_functions.py | 0.871119 | 0.478833 | lookup_functions.py | pypi |
import datetime
import logging
import urllib.parse
from typing import Any, Dict, Optional, Union
import aiohttp
from .exceptions import ApiError
from .lookup_functions import (
lookup_agsi_company,
lookup_alsi_company,
lookup_country_agsi,
lookup_country_alsi,
lookup_facility_agsi,
lookup_facility_alsi,
)
from .mappings.agsi_company import AGSICompany
from .mappings.agsi_country import AGSICountry
from .mappings.agsi_facility import AGSIFacility
from .mappings.alsi_company import ALSICompany
from .mappings.alsi_country import ALSICountry
from .mappings.alsi_facility import ALSIFacility
from .mappings.api_mappings import APIType
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
class GieRawClient:
"""AGSI/ALSI Raw Client which queries the API and returns data"""
def __init__(
self, api_key: str, session: Optional[aiohttp.ClientSession] = None
):
"""Constructor method for our client
Parameters
----------
api_key : str
The key needed for accessing the API
session : Optional[aiohttp.ClientSession], optional
User supplied aiohttp ClientSession, or create a new one if None, by default None
"""
self._logger = logging.getLogger(self.__class__.__name__)
self.api_key = api_key
self.session = (
session
if session is not None
else aiohttp.ClientSession(
raise_for_status=True, headers={"x-key": self.api_key}
)
)
@property
def api_key(self):
return self.__api_key
@api_key.setter
def api_key(self, value):
if not value:
raise ApiError("API key is missing!")
self.__api_key = value
async def query_agsi_eic_listing(self) -> Dict[str, Any]:
"""Return all the AGSI EIC (Energy Identification Code) listing.
Returns
-------
Dict[str, Any]
Object holding the data
"""
self._logger.info("Query AGSI EIC listing started awaiting result..")
return await self.fetch(APIType.AGSI, "about?show=listing")
async def query_alsi_eic_listing(self) -> Dict[str, Any]:
"""Return all the AGSI EIC (Energy Identification Code) listing.
Returns
-------
Dict[str, Any]
Object holding the data
"""
self._logger.info("Query ALSI EIC listing started awaiting result..")
return await self.fetch(APIType.ALSI, "about?show=listing")
async def query_alsi_news_listing(
self, news_url_item: Optional[Union[int, str]] = None
) -> Dict[str, Any]:
"""Return all the ALSI news or a specific country news listings
Parameters
----------
news_url_item : Optional[Union[int, str]], optional
An integer representing a specific country, by default None
Returns
-------
Dict[str, Any]
Object holding the data
"""
self._logger.info("Query ALSI NEWS listing started awaiting result..")
return await self.fetch(
APIType.ALSI, "news", news_url_item=news_url_item
)
async def query_agsi_news_listing(
self, news_url_item: Optional[Union[int, str]] = None
) -> Dict[str, Any]:
"""Return all the AGSI news or a specific country news listings
Parameters
----------
news_url_item : Optional[Union[int, str]], optional
An integer representing a specific country, by default None
Returns
-------
Dict[str, Any]
Object holding the data
"""
self._logger.info("Query AGSI NEWS listing started awaiting result..")
return await self.fetch(
APIType.AGSI, "news", news_url_item=news_url_item
)
async def query_country_agsi_storage(
self,
country: Optional[Union[AGSICountry, str]] = None,
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> Dict[str, Any]:
"""Return listing with the AGSI storage data for
a specific country or all countries
Parameters
----------
country : Optional[Union[ALSICountry, str]], optional
Optional country param, by default None
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
date : Optional[Union[datetime.datetime, str]], optional
Optional current date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
Dict[str, Any]
Object holding queried data
"""
params = None
if country is not None:
country_param = lookup_country_agsi(country)
params = country_param.get_params()
self._logger.info("Query AGSI COUNTRY STORAGE started..")
return await self.fetch(
APIType.AGSI,
params=params,
start=start,
end=end,
date=date,
size=size,
)
async def query_country_alsi_storage(
self,
country: Optional[Union[ALSICountry, str]] = None,
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> Dict[str, Any]:
"""Return listing with the ALSI storage data for
a specific country or all countries
Parameters
----------
country : Optional[Union[ALSICountry, str]], optional
Optional country param, by default None
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
date : Optional[Union[datetime.datetime, str]], optional
Optional current date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
Dict[str, Any]
Object holding queried data
"""
params = None
if country is not None:
country_param = lookup_country_alsi(country)
params = country_param.get_params()
self._logger.info("Query ALSI COUNTRY STORAGE started..")
return await self.fetch(
APIType.AGSI,
params=params,
start=start,
end=end,
date=date,
size=size,
)
async def query_agsi_unavailability(
self,
country: Optional[Union[AGSICountry, str]] = None,
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> Dict[str, Any]:
"""Returns the total AGSI unavailability data or
a specific country unavailability
Parameters
----------
country : Optional[Union[AGSICountry, str]], optional
Optional country param, by default None
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
Dict[str, Any]
Object holding queried data
"""
params = None
if country is not None:
country_param = lookup_country_agsi(country)
params = country_param.get_params()
self._logger.info("Query AGSI UNAVAILABILITY started..")
return await self.fetch(
APIType.AGSI,
endpoint="unavailability",
params=params,
start=start,
end=end,
size=size,
)
async def query_alsi_unavailability(
self,
country: Optional[Union[ALSICountry, str]] = None,
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> Dict[str, Any]:
"""Returns the total ALSI unavailability data or
a specific country unavailability
Parameters
----------
country : Optional[Union[AGSICountry, str]], optional
Optional country param, by default None
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
Dict[str, Any]
Object holding queried data
"""
params = None
if country is not None:
country_param = lookup_country_alsi(country)
params = country_param.get_params()
self._logger.info("Query ALSI UNAVAILABILITY started..")
return await self.fetch(
APIType.ALSI,
endpoint="unavailability",
params=params,
start=start,
end=end,
size=size,
)
async def query_agsi_facility_storage(
self,
facility_name: Union[AGSIFacility, str],
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> Dict[str, Any]:
"""Return listing with the AGSI data for a specific facility storage
Parameters
----------
facility_name : Union[ALSIFacility, str]
The name of the facility to query for
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
date : Optional[Union[datetime.datetime, str]], optional
Optional current date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
Dict[str, Any]
Object holding queried data
"""
facility_param = lookup_facility_agsi(facility_name)
params = facility_param.get_params()
self._logger.info("Query AGSI FACILITY STORAGE started..")
return await self.fetch(
APIType.AGSI,
params=params,
start=start,
end=end,
date=date,
size=size,
)
async def query_alsi_facility_storage(
self,
facility_name: Union[ALSIFacility, str],
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> Dict[str, Any]:
"""Return listing with the ALSI data for a specific facility storage
Parameters
----------
facility_name : Union[ALSIFacility, str]
The name of the facility to query for
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
date : Optional[Union[datetime.datetime, str]], optional
Optional current date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
Dict[str, Any]
Object holding queried data
"""
facility_param = lookup_facility_alsi(facility_name)
params = facility_param.get_params()
self._logger.info("Query ALSI FACILITY STORAGE started..")
return await self.fetch(
APIType.ALSI,
params=params,
start=start,
end=end,
date=date,
size=size,
)
async def query_agsi_company(
self,
company_name: Union[AGSICompany, str],
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> Dict[str, Any]:
"""Returns listing with the AGSI data for a specific company
Parameters
----------
company_name : Union[AGSICompany, str]
The name of the company to query for
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
date : Optional[Union[datetime.datetime, str]], optional
Optional current date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
Dict[str, Any]
Object holding queried data
"""
company_param = lookup_agsi_company(company_name)
params = company_param.get_params()
self._logger.info("Query AGSI COMPANY started..")
return await self.fetch(
APIType.AGSI,
params=params,
start=start,
end=end,
date=date,
size=size,
)
async def query_alsi_company(
self,
company_name: Union[ALSICompany, str],
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
) -> Dict[str, Any]:
"""Returns listing with the ALSI data for a specific company
Parameters
----------
company_name : Union[AGSICompany, str]
The name of the company to query for
start : Optional[Union[datetime.datetime, str]], optional
Optional start date param, by default None
end : Optional[Union[datetime.datetime, str]], optional
Optional end date param, by default None
date : Optional[Union[datetime.datetime, str]], optional
Optional current date param, by default None
size : Optional[Union[int, str]], optional
Optional result size param, by default None
Returns
-------
Dict[str, Any]
Object holding queried data
"""
company_param = lookup_alsi_company(company_name)
params = company_param.get_params()
self._logger.info("Query ALSI COMPANY started..")
return await self.fetch(
APIType.ALSI,
params=params,
start=start,
end=end,
date=date,
size=size,
)
async def fetch(
self,
api_type: Union[APIType, str],
endpoint: Optional[str] = None,
params: Optional[Dict[str, str]] = None,
news_url_item: Optional[Union[int, str]] = None,
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
):
"""Builds the URL and sends requests to the API.
Parameters
----------
api_type: Union[APIType, str],
endpoint: Optional[str] = None,
params: Optional[Dict[str, Any][str, str]] = None,
news_url_item: Optional[Union[int, str]] = None,
start: Optional[Union[datetime.datetime, str]] = None,
end: Optional[Union[datetime.datetime, str]] = None,
date: Optional[Union[datetime.datetime, str]] = None,
size: Optional[Union[int, str]] = None,
Returns
-------
Returns the desired data according to the pointed params.
"""
_params = {
"url": news_url_item,
"from": start,
"to": end,
"date": date,
"size": size,
}
if params is not None:
_params.update(params)
root_url = (
api_type.value if isinstance(api_type, APIType) else api_type
)
final_url = urllib.parse.urljoin(root_url, endpoint)
final_params = {k: v for k, v in _params.items() if v is not None}
async with self.session.get(
final_url,
params=final_params,
) as resp:
self._logger.info("fetching the result..")
return await resp.json()
async def close_session(self) -> None:
"""Close the session."""
if self.session:
await self.session.close() | /roiti_gie-1.0.1-py3-none-any.whl/roiti/gie/gie_raw_client.py | 0.861698 | 0.169406 | gie_raw_client.py | pypi |
import enum
class ALSIFacility(enum.Enum):
"""Enum containing 3 things about an Area: code, country, code company"""
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj._value_ = args[0]
return obj
def __init__(self, _: str, country: str, company: str):
self._country = country
self._company = company
def __str__(self):
return self.value
@property
def company(self):
return self._company
@property
def country(self):
return self._country
@property
def code(self):
return self.value
def get_params(self):
return {
"country": self.country,
"company": self.company,
"facility": self.code,
}
zeebrugge = "21W0000000001245", "BE", "21X000000001006T"
bilbao = "21W0000000000362", "ES", "21X000000001352A"
barcelona = "21W000000000039X", "ES", "21X000000001254A"
cartagena = "21W000000000038Z", "ES", "21X000000001254A"
huelva = "21W0000000000370", "ES", "21X000000001254A"
sagunto = "21W0000000000354", "ES", "18XTGPRS-12345-G"
mugardos = "21W0000000000338", "ES", "18XRGNSA-12345-V"
tvb_virtual_balancing_lng_tank = (
"18W000000000GVMT",
"ES",
"21X0000000013368",
)
fos_tonkin = "63W179356656691A", "FR", "21X0000000010679"
montoir_de_bretagne = "63W631527814486R", "FR", "21X0000000010679"
dunkerque = "21W0000000000451", "FR", "21X000000001331I"
fos_cavaou = "63W943693783886F", "FR", "21X000000001070K"
isle_of_grain = "21W000000000099F", "GB", "21X-GB-A-A0A0A-7"
south_hook = "21W0000000000419", "GB", "21X0000000013554"
revythoussa = "21W000000000040B", "GR", "21X-GR-A-A0A0A-G"
krk_fsru = "31W-0000-G-000-Z", "HR", "31X-LNG-HR-----7"
panigaglia = "59W0000000000011", "IT", "26X00000117915-0"
fsru_olt_offshore_lng_toscana = (
"21W0000000000443",
"IT",
"21X000000001109G",
)
porto_levante = "21W000000000082W", "IT", "21X000000001360B"
fsru_independence = "21W0000000001253", "LT", "21X0000000013740"
rotterdam_gate = "21W0000000000079", "NL", "21X000000001063H"
swinoujscie = "21W000000000096L", "PL", "21X-PL-A-A0A0A-B"
sines = "16WTGNL01------O", "PT", "21X0000000013619"
eemsenergy_lng = "52W000000000001W", "LT", "52X000000000088H" | /roiti_gie-1.0.1-py3-none-any.whl/roiti/gie/mappings/alsi_facility.py | 0.715225 | 0.238173 | alsi_facility.py | pypi |
import enum
class AGSIFacility(enum.Enum):
"""Enum containing 3 things about an Area: code, country, code company"""
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj._value_ = args[0]
return obj
def __init__(self, _: str, country: str, company: str):
self._country = country
self._company = company
def __str__(self):
return self.value
@property
def company(self):
return self._company
@property
def country(self):
return self._country
@property
def code(self):
return self.value
def get_params(self):
return {
"country": self.country,
"company": self.company,
"facility": self.code,
}
ugs_haidach_astora = "21W000000000078N", "AT", "21X000000001160J"
ugs_haidach_gsa = "25W-SPHAID-GAZ-M", "AT", "25X-GSALLC-----E"
vgs_omv_tallesbrunn = "21W000000000081Y", "AT", "25X-OMVGASSTORA5"
rag_puchkirchen_haag = "21W000000000079L", "AT", "23X----100225-1C"
ugs_7_fields_uniper = "21W000000000057V", "AT", "21X000000001127H"
ugs_loenhout = "21Z000000000102A", "BE", "21X-BE-A-A0A0A-Y"
ugs_chiren = "21W000000000031C", "BG", "21X-BG-A-A0A0A-C"
ugs_okoli = "21W000000000077P", "HR", "31X-PSP-OSS-HR-D"
ugs_uhrice = "21W000000000075T", "CZ", "27XG-MNDGS-CZ--R"
ugs_damborice = "21W000000000102F", "CZ", "27X-MORAVIAGS--E"
vgs_rwe_haje = "21W000000000076R", "CZ", "27XG-RWE-GAS-STI"
ugs_dolni_bojanovice = "21W000000000074V", "CZ", "27X-SPPSTORAGE-R"
vgs_gsd_lille_torup_stenlille = (
"45W000000000112V",
"DK",
"21X000000001104T",
)
vgs_saline_tersanne_etrez_manosque = (
"21W000000000084S",
"FR",
"21X000000001083B",
)
vgs_sediane_saintilliers = "21W0000000000710", "FR", "21X000000001083B"
vgs_sediane_b_gournay = "21W0000000000702", "FR", "21X000000001083B"
vgs_serene_atlantique_chemery = (
"63W197197128864M",
"FR",
"21X000000001083B",
)
vgs_serene_nord_trois_fontaines_labbaye = (
"21W000000000073X",
"FR",
"21X000000001083B",
)
vgs_lussagnet_terega = "21W000000000068Q", "FR", "21X-FR-B-A0A0A-J"
ugs_jemgum_h_astora = "21W0000000001148", "DE", "21X000000001160J"
ugs_rehden = "21Z000000000271O", "DE", "21X000000001160J"
vsp_nord_rehden_jemgum = "21W0000000001261", "DE", "21X000000001160J"
ugs_wolfersberg = "21W0000000000184", "DE", "37X0000000000151"
ugs_berlin = "21W0000000001083", "DE", "37X0000000000224"
vgs_ugs_etzel_edf = "37W000000000003M", "DE", "37X000000000152S"
vgs_ugs_etzel_enbw = "11W0-0000-0432-M", "DE", "11X0-0000-0667-8"
ugs_enschede_epe_eneco = "21W000000000012G", "DE", "21X0000000010849"
ugs_frankenthal = "37Z0000000034538", "DE", "**TOBEPROVIDED**"
ugs_etzel_egl_equinor_storage_deutschland = (
"21W000000000100J",
"DE",
"21X000000001368W",
)
ugs_katharina = "21W0000000000281", "DE", "21X000000001297T"
ugs_etzel_ekb = "21Z000000000291I", "DE", "21X000000001080H"
ewe_h = "37W000000000002O", "DE", "21X0000000011756"
ugs_ewe_l = "21W0000000001075", "DE", "21X0000000011756"
ugs_jemgum_h_ewe = "21W0000000000508", "DE", "21X0000000011756"
ugs_nuttermoor_h_2 = "21W000000000104B", "DE", "21X0000000011756"
ugs_nuttermoor_h_3 = "21W000000000103D", "DE", "21X0000000011756"
ugs_nuttermoor_l_gud = "21W0000000001067", "DE", "21X0000000011756"
ugs_rudersdorf_h = "21W000000000048W", "DE", "21X0000000011756"
ugs_kraak = "21W000000000020H", "DE", "21X0000000013805"
ugs_epe_kge = "21W000000000097J", "DE", "21X000000001140P"
ugs_etzel_ese_met = "21W000000000055Z", "DE", "37X000000000047P"
ugs_reckrod = "21W0000000000540", "DE", "37X000000000047P"
vgs_zone_mnd_esg_ugs_stockstadt = (
"37Y000000000386Q",
"DE",
"37X000000000042Z",
)
ugs_eschenfelden_nergie = "21Z000000000321Z", "DE", "11XNERGIE------1"
ugs_inzenham_west = "21W0000000000192", "DE", "21X0000000011748"
ugs_enschede_epe_nuon = "21W000000000005D", "DE", "37X0000000000119"
ugs_etzel_ese_omv = "21W000000000056X", "DE", "25X-OMVGASSTORA5"
innexpool_rwegsw = "21W000000000121B", "DE", "21X000000001262B"
ugs_epe_l_rwegsw = "21W0000000000532", "DE", "21X000000001262B"
ugs_epe_nl_rwegswest = "21W000000000003H", "DE", "21X000000001262B"
ugs_kalle_rwegswest = "21W000000000004F", "DE", "21X000000001262B"
ugs_stassfurt_rwegswest = "21W0000000000265", "DE", "21X000000001262B"
ugs_ronnenberg_empelde = "21Z0000000004002", "DE", "11XSWHANNOVERAG3"
ugs_fronhofen = "21W000000000091V", "DE", "21X000000001072G"
ugs_harsefeld = "21W000000000092T", "DE", "21X000000001072G"
ugs_lesum = "21W000000000090X", "DE", "21X000000001072G"
ugs_peckensen = "21W0000000000273", "DE", "21X000000001072G"
ugs_schmidhausen = "21W000000000089I", "DE", "21X000000001072G"
ugs_uelsen = "21W000000000093R", "DE", "21X000000001072G"
ugs_bremen_lesum_swb = "21W000000000090X", "DE", "11XSWB-BREMEN--I"
ugs_kiel_ronne = "21W0000000001164", "DE", "37X000000000051Y"
ugs_allmenhausen = "21W000000000030E", "DE", "21X000000001307F"
ugs_etzel_egl_total_etzel_gaslager = (
"**TOBEPROVIDED**",
"DE",
"**TOBEPROVIDED**",
)
ugs_epe_trianel = "21W000000000085Q", "DE", "21X000000001310Q"
ugs_bierwang = "21W0000000000613", "DE", "21X000000001127H"
ugs_breitbrunn = "21W0000000000605", "DE", "21X000000001127H"
ugs_epe_uniper_h = "21W000000000066U", "DE", "21X000000001127H"
ugs_epe_uniper_l = "21W000000000065W", "DE", "21X000000001127H"
ugs_eschenfelden_uniper = "21W000000000083U", "DE", "21X000000001127H"
ugs_etzel_erdgas_lager_egl = "21W000000000059R", "DE", "21X000000001127H"
ugs_etzel_ese_uniper_energy_storage = (
"21W0000000000168",
"DE",
"21X000000001127H",
)
ugs_krummhorn = "21W000000000067S", "DE", "21X000000001127H"
ugs_etzel_ese_vgs = "21W000000000120D", "DE", "21X000000001138C"
ugs_jemgum_h_vgs = "21W000000000128Y", "DE", "21X000000001138C"
vgs_storage_hub_bernburg = "21W0000000000427", "DE", "21X000000001138C"
vgs_vtp_storage_gpl = "21W0000000001091", "DE", "21X000000001138C"
ugs_szoreg_1 = "21W000000000086O", "HU", "21X0000000013643"
vgs_mfgt_pusztaederics = "21W000000000087M", "HU", "21X0000000013635"
ugs_kinsale_southwest = "47W000000000245J", "IE", "47X0000000000584"
vgs_edison_stoccaggio_collalto = (
"21W000000000095N",
"IT",
"21X0000000013651",
)
ugs_cornegliano = "59W-IGSTORAGE-0Q", "IT", "59X4-IGSTORAGE-T"
vgs_stogit_fiume_treste = "21Z000000000274I", "IT", "21X000000001250I"
ugs_incukalns = "21W000000000113A", "LV", "21X000000001379R"
ugs_energystock = "21W000000000006B", "NL", "21X000000001057C"
ugs_nuttermoor_h_1 = "21W0000000001059", "NL", "21X0000000011756"
ugs_grijpskerk = "21W000000000001L", "NL", "21X000000001075A"
ugs_norg_langelo = "21W000000000015A", "NL", "21X000000001075A"
ugs_bergermeer = "21W0000000000087", "NL", "21X000000001120V"
ugs_alkmaar = "21W000000000002J", "NL", "21X0000000013732"
gsp_historical_data_prior_to_4_feb_2014 = (
"PRIOR_OSM_000001",
"PL",
"53XPL000000OSMP5",
)
ugs_wierzchowice = "21Z000000000381H", "PL", "53XPL000000OSMP5"
vgs_gim_kawerna_kosakowo = "21Z000000000383D", "PL", "53XPL000000OSMP5"
vgs_gim_sanok_brzeznica = "21Z000000000382F", "PL", "53XPL000000OSMP5"
ugs_carrico = "16ZAS01--------8", "PT", "21X0000000013627"
ugs_targu_mures = "21Z000000000309P", "RO", "21X000000001300T"
ugs_balaceanca = "21Z0000000003111", "RO", "21X-DEPOGAZ-AGSI"
ugs_bilciuresti = "21Z000000000313Y", "RO", "21X-DEPOGAZ-AGSI"
ugs_cetatea_de_balta = "21Z000000000316S", "RO", "21X-DEPOGAZ-AGSI"
ugs_ghercesti = "21Z000000000315U", "RO", "21X-DEPOGAZ-AGSI"
ugs_sarmasel = "21Z000000000314W", "RO", "21X-DEPOGAZ-AGSI"
ugs_urziceni = "21Z0000000003103", "RO", "21X-DEPOGAZ-AGSI"
ugs_lab_incl_gajary_baden = "21W000000000088K", "SK", "42X-NAFTA-SK---U"
ugs_lab_iv_pozagas = "21W000000000047Y", "SK", "42X-POZAGAS-SK-V"
vgs_enagas_serrablo = "21W000000000032A", "ES", "21X0000000013368"
ugs_skallen = "21W0000000000435", "SE", "21X-SE-A-A0A0A-F"
ugs_rough = "21W000000000094P", "GB", "21X000000001022V"
ugs_holehouse_farm_storage = "21Z000000000227R", "GB", "23X-EDFE-------W"
ugs_humbly_grove = "55WHUMBLY1GROVER", "GB", "55XHUMBLYGROVE1H"
ugs_hatfield_moors_storage = "21Z000000000229N", "GB", "23XSCOTTISHPOWEF"
ugs_aldbrough_i = "55WALDBOROUGH00H", "GB", "23X--140207-SSE9"
ugs_atwick = "55WATWICK-SSE00J", "GB", "23X--140207-SSE9"
ugs_stublach = "21W000000000101H", "GB", "48XSTORENGYUK01P"
ugs_holford = "21W000000000112C", "GB", "21X0000000013716" | /roiti_gie-1.0.1-py3-none-any.whl/roiti/gie/mappings/agsi_facility.py | 0.638046 | 0.201479 | agsi_facility.py | pypi |
import enum
class AGSICompany(enum.Enum):
"""ENUM containing 2 things about an Area: code, country"""
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj._value_ = args[0]
return obj
def __init__(self, _: str, country: str):
"""Constructor for the AGSICompany enum
Parameters
----------
country : str
country string parameter needed for the lookup
"""
self._country = country
def __str__(self):
return self.value
@property
def country(self):
return self._country
@property
def code(self):
return self.value
def get_params(self):
return {
"country": self.country,
"company": self.code,
}
astora = "21X000000001160J", "AT"
gsa = "25X-GSALLC-----E", "AT"
omv_gas_storage = "25X-OMVGASSTORA5", "AT"
rag_energy_storage = "23X----100225-1C", "AT"
uniper_energy_storage_at = "21X000000001127H", "AT"
fluxys = "21X-BE-A-A0A0A-Y", "BE"
bulgartransgaz = "21X-BG-A-A0A0A-C", "BG"
psp = "31X-PSP-OSS-HR-D", "HR"
mnd_energy_storage = "27XG-MNDGS-CZ--R", "CZ"
moravia_gas_storage = "27X-MORAVIAGS--E", "CZ"
rwe_gas_storage_cz = "27XG-RWE-GAS-STI", "CZ"
spp_storage = "27X-SPPSTORAGE-R", "CZ"
gsd = "21X000000001104T", "DK"
storengy = "21X000000001083B", "FR"
terega = "21X-FR-B-A0A0A-J", "FR"
astora_germany = "21X000000001160J", "DE"
bayernugs = "37X0000000000151", "DE"
bes = "37X0000000000224", "DE"
edf_gas_deutschland = "37X000000000152S", "DE"
enbw_etzel_speicher = "11X0-0000-0667-8", "DE"
eneco_gasspeicher = "21X0000000010849", "DE"
enovos_storage = "**TOBEPROVIDED**", "DE"
equinor_storage_deutschland = "21X000000001368W", "DE"
erdgasspeicher_peissen = "21X000000001297T", "DE"
ekb = "21X000000001080H", "DE"
ewe_gasspeicher = "21X0000000011756", "DE"
hansewerk = "21X0000000013805", "DE"
kge = "21X000000001140P", "DE"
met_speicher = "37X000000000047P", "DE"
mnd_energy_storage_germany = "37X000000000042Z", "DE"
n_ergie = "11XNERGIE------1", "DE"
nafta_speicher_inzenham = "21X0000000011748", "DE"
nuon_epe_gasspeicher = "37X0000000000119", "DE"
omv_gas_storage_germany = "25X-OMVGASSTORA5", "DE"
rwe_gas_storage_west = "21X000000001262B", "DE"
stadtwerke_hannover = "11XSWHANNOVERAG3", "DE"
storengy_deutschland = "21X000000001072G", "DE"
swb_vertrieb_bremen = "11XSWB-BREMEN--I", "DE"
swkiel_speicher = "37X000000000051Y", "DE"
tep = "21X000000001307F", "DE"
total_etzel_gaslager = "**TOBEPROVIDED**", "DE"
trianel_gasspeicher_epe = "21X000000001310Q", "DE"
uniper_energy_storage = "21X000000001127H", "DE"
vng_gasspeicher_gmbh = "21X000000001138C", "DE"
hexum = "21X0000000013643", "HU"
hgs = "21X0000000013635", "HU"
kinsale_energy = "47X0000000000584", "IE"
edison_stoccaggio = "21X0000000013651", "IT"
igs = "59X4-IGSTORAGE-T", "IT"
stogit = "21X000000001250I", "IT"
conexus_baltic_grid = "21X000000001379R", "LV"
energystock = "21X000000001057C", "NL"
ewe_gasspeicher_nl = "21X0000000011756", "NL"
nam = "21X000000001075A", "NL"
taqa_gas_storage = "21X000000001120V", "NL"
taqa_piek_gas = "21X0000000013732", "NL"
gsp = "53XPL000000OSMP5", "PL"
ren_armazenagem = "21X0000000013627", "PT"
depomures = "21X000000001300T", "RO"
depogaz_ploiesti = "21X-DEPOGAZ-AGSI", "RO"
nafta = "42X-NAFTA-SK---U", "SK"
pozagas = "42X-POZAGAS-SK-V", "SK"
enagas_gts = "21X0000000013368", "ES"
swedegas = "21X-SE-A-A0A0A-F", "SE"
centrica_storage = "21X000000001022V", "GB"
edf = "23X-EDFE-------W", "GB"
humbly_grove_energy = "55XHUMBLYGROVE1H", "GB"
scottish_power = "23XSCOTTISHPOWEF", "GB"
sse_gas_storage = "23X--140207-SSE9", "GB"
storengy_uk = "48XSTORENGYUK01P", "GB"
uniper_energy_storage_ltd = "21X0000000013716", "GB" | /roiti_gie-1.0.1-py3-none-any.whl/roiti/gie/mappings/agsi_company.py | 0.730097 | 0.293696 | agsi_company.py | pypi |
import json
from typing import Union
from agrirouter.messaging.exceptions import WrongFieldError
class AuthorizationToken:
ACCOUNT = 'account'
REGISTRATION_CODE = 'regcode'
EXPIRES = 'expires'
def __init__(self,
*,
account: str = None,
regcode: str = None,
expires: str = None
):
self.account = account
self.regcode = regcode
self.expires = expires
def json_deserialize(self, data: Union[str, dict]) -> None:
data = data if type(data) == dict else json.loads(data)
for key, value in data.items():
if key == self.ACCOUNT:
self.account = value
elif key == self.REGISTRATION_CODE:
self.regcode = value
elif key == self.EXPIRES:
self.expires = value
else:
raise WrongFieldError(f"Unknown field {key} for AuthorizationToken class")
def get_account(self) -> str:
return self.account
def set_account(self, account: str) -> None:
self.account = account
def get_regcode(self) -> str:
return self.regcode
def set_regcode(self, regcode: str) -> None:
self.regcode = regcode
def get_expires(self) -> str:
return self.expires
def set_expires(self, expires: str) -> None:
self.expires = expires
class AuthorizationResultUrl:
def __init__(self,
*,
state: str = None,
signature: str = None,
token: str = None,
decoded_token: AuthorizationToken = None,
error: str = None
):
self.state = state
self.signature = signature
self.token = token
self.decoded_token = decoded_token
self.error = error
def get_state(self) -> str:
return self.state
def set_state(self, state: str) -> None:
self.state = state
def get_signature(self) -> str:
return self.signature
def set_signature(self, signature: str) -> None:
self.signature = signature
def get_token(self) -> str:
return self.token
def set_token(self, token: str) -> None:
self.token = token
def get_error(self) -> str:
return self.error
def set_error(self, error: str) -> None:
self.error = error
def get_decoded_token(self) -> AuthorizationToken:
return self.decoded_token
def set_decoded_token(self, decoded_token: AuthorizationToken) -> None:
self.decoded_token = decoded_token
class AuthorizationResult:
def __init__(self,
*,
authorization_url: str = None,
state: str = None,
):
self.authorization_url = authorization_url
self.state = state
def get_authorization_url(self) -> str:
return self.authorization_url
def set_authorization_url(self, authorization_url: str) -> None:
self.authorization_url = authorization_url
def get_state(self) -> str:
return self.state
def set_state(self, state: str) -> None:
self.state = state | /roj-agrirouter-sdk-python-1.0.6.tar.gz/roj-agrirouter-sdk-python-1.0.6/agrirouter/auth/dto.py | 0.810816 | 0.256518 | dto.py | pypi |
import json
from typing import Union
from agrirouter.messaging.exceptions import WrongFieldError
class ConnectionCriteria:
CLIENT_ID = 'clientId'
COMMANDS = 'commands'
GATEWAY_ID = 'gatewayId'
HOST = 'host'
MEASURES = 'measures'
PORT = 'port'
def __init__(self,
*,
gateway_id: str = None,
measures: str = None,
commands: str = None,
host: str = None,
port: str = None,
client_id: str = None
):
self.gateway_id = gateway_id
self.measures = measures
self.commands = commands
self.host = host
self.port = port
self.client_id = client_id
def json_serialize(self) -> dict:
return {
self.GATEWAY_ID: self.gateway_id,
self.MEASURES: self.measures,
self.COMMANDS: self.commands,
self.HOST: self.host,
self.PORT: self.port,
self.CLIENT_ID: self.client_id
}
def json_deserialize(self, data: Union[str, dict]) -> None:
data = data if type(data) == dict else json.loads(data)
for key, value in data.items():
if key == self.GATEWAY_ID:
self.gateway_id = value
elif key == self.MEASURES:
self.measures = value
elif key == self.COMMANDS:
self.commands = value
elif key == self.HOST:
self.host = value
elif key == self.PORT:
self.port = value
elif key == self.CLIENT_ID:
self.client_id = value
else:
raise WrongFieldError(f"Unknown field {key} for Connection Criteria class")
def get_gateway_id(self) -> str:
return self.gateway_id
def set_gateway_id(self, gateway_id: str) -> None:
self.gateway_id = gateway_id
def get_measures(self) -> str:
return self.measures
def set_measures(self, measures: str) -> None:
self.measures = measures
def get_commands(self) -> str:
return self.commands
def set_commands(self, commands: str) -> None:
self.commands = commands
def get_host(self) -> str:
return self.host
def set_host(self, host: str) -> None:
self.host = host
def get_port(self) -> str:
return self.port
def set_port(self, port: str) -> None:
self.port = port
def get_client_id(self) -> str:
return self.client_id
def set_client_id(self, client_id: str) -> None:
self.client_id = client_id
def __str__(self):
return str(self.json_serialize())
def __repr__(self):
return str(self.json_serialize())
class Authentication:
TYPE = 'type'
SECRET = 'secret'
CERTIFICATE = 'certificate'
def __init__(self,
*,
type: str = None,
secret: str = None,
certificate: str = None,
):
self.type = type
self.secret = secret
self.certificate = certificate
def json_serialize(self) -> dict:
return {
self.TYPE: self.type,
self.SECRET: self.secret,
self.CERTIFICATE: self.certificate,
}
def json_deserialize(self, data: Union[str, dict]) -> None:
data = data if type(data) == dict else json.loads(data)
for key, value in data.items():
if key == self.TYPE:
self.type = value
elif key == self.SECRET:
self.secret = value
elif key == self.CERTIFICATE:
self.certificate = value
else:
raise WrongFieldError(f"Unknown field {key} for Authentication class")
def get_type(self) -> str:
return self.type
def set_type(self, type: str) -> None:
self.type = type
def get_secret(self) -> str:
return self.secret
def set_secret(self, secret: str) -> None:
self.secret = secret
def get_certificate(self) -> str:
return self.certificate
def set_certificate(self, certificate: str) -> None:
self.certificate = certificate
def __str__(self):
return str(self.json_serialize())
def __repr__(self):
return str(self.json_serialize())
class ErrorResponse:
CODE = "code"
MESSAGE = "message"
TARGET = "target"
DETAILS = "details"
def __init__(self,
*,
code: str = None,
message: str = None,
target: str = None,
details: str = None
):
self.code = code
self.message = message
self.target = target
self.details = details
def json_serialize(self) -> dict:
return {
self.CODE: self.code,
self.MESSAGE: self.message,
self.TARGET: self.target,
self.DETAILS: self.details
}
def json_deserialize(self, data: Union[str, dict]) -> None:
data = data if type(data) == dict else json.loads(data)
for key, value in data.items():
if key == self.CODE:
self.code = value
elif key == self.MESSAGE:
self.message = value
elif key == self.TARGET:
self.target = value
elif key == self.DETAILS:
self.details = value
else:
raise WrongFieldError(f"Unknown field {key} for ErrorResponse class")
def get_code(self) -> str:
return self.code
def set_code(self, code: str) -> None:
self.code = code
def get_message(self) -> str:
return self.message
def set_message(self, message: str) -> None:
self.message = message
def get_target(self) -> str:
return self.target
def set_target(self, target: str) -> None:
self.target = target
def get_details(self) -> str:
return self.details
def set_details(self, details: str) -> None:
self.details = details | /roj-agrirouter-sdk-python-1.0.6.tar.gz/roj-agrirouter-sdk-python-1.0.6/agrirouter/onboarding/dto.py | 0.721645 | 0.155623 | dto.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.