repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
transifex/transifex-python-library | txlib/api/base.py | BaseModel._delete | python | def _delete(self, **kwargs):
path = self._construct_path_to_item()
return self._http.delete(path) | Delete a resource from a remote Transifex server. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/api/base.py#L271-L274 | [
"def _construct_path_to_item(self):\n \"\"\"Construct the path to an actual item.\"\"\"\n return self.get_path_to_item_template() % self.get_url_parameters()\n"
] | class BaseModel(object):
"""Base class for Transifex models.
Each model has a list of fields. The model works as a proxy between the
local application and the remote Transifex server.
The user of the class can:
a) retrieve an existing remote instance of the model
by using the static method `MyModel.get(...)`
b) create a new local instance with a set of populated fields
and call `save()` in order to save it to the remote server
c) delete an existing remote instance, by first creating a local instance
and all necessary attributes that identify the object, and then
calling `delete()` on it.
"""
# The URI prefix of all API endpoints for this model
_prefix = ''
# The URI for retrieving a collection of multiple items
# (shouldn't start with a slash)
_path_to_collection = ''
# The URl for retrieving a single item
# (shouldn't start with a slash)
_path_to_item = ''
# All fields defined here will be used for constructing
# the URL of the request
url_fields = set()
# These fields can be modified in POST/PUT requests
writable_fields = set()
# Initially False, set to True when an instance of the class is created
_is_initialized = False
@classmethod
def get(cls, **kwargs):
"""Retrieve an object by making a GET request to Transifex.
Each value in `kwargs` that corresponds to a field
defined in `self.url_fields` will be used in the URL path
of the request, so that a particular entry of this model
is identified and retrieved.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed as kwargs
txlib.http.exceptions.NotFoundError: if the object with these
attributes is not found on the remote server
txlib.http.exceptions.ServerError subclass: depending on
the particular server response
Example:
# Note: also catch exceptions
>>> obj = MyModel.get(attr1=value1, attr2=value2)
"""
fields = {}
for field in cls.url_fields:
value = kwargs.pop(field, None)
if value is None:
cls._handle_wrong_field(field, ATTR_TYPE_URL)
fields[field] = value
# Create an instance of the model class and make the GET request
model = cls(**fields)
model._populate(**kwargs)
return model
def __init__(self, prefix='/api/2/', **url_values):
"""Constructor.
Initializes various variables, setup the HTTP handler and
stores all values
Args:
prefix: The prefix of the urls.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed
"""
self._http = registry.http_handler
self._prefix = prefix
self._modified_fields = {}
self._populated_fields = {}
for field in url_values:
if field in self.url_fields:
setattr(self, field, url_values[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_URL)
# From now on only, only specific attributes can be set
# on this object:
# a) one of the instance variables set above
# b) one of the attributes found in `self.writable_fields`
self._is_initialized = True
def __getattr__(self, name, default=None):
"""Return the value of the field with the given name.
Looks in `self._modified_fields` and `self._populated_fields`.
Raises:
AttributeError: if the requested attribute does not exist
"""
if name in self._modified_fields:
return self._modified_fields[name]
elif name in self._populated_fields:
return self._populated_fields[name]
else:
self._handle_wrong_field(name, ATTR_TYPE_READ)
def __setattr__(self, name, value):
"""Set the value of a field.
This method only allows certain attributes to be set:
a) Any attribute that is defined in `__init__()`
b) Any attribute found in `self.writable_fields`
For the rest it will raise an AttributeError.
For case (a), the attribute is saved directly on this object
For case (b), the attribute is saved in `self.writable_fields`
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
# If __init__() hasn't finished yet, accept anything
if ('_is_initialized' not in self.__dict__) or (name in self.__dict__):
return super(BaseModel, self).__setattr__(name, value)
elif name in self.writable_fields:
self._modified_fields[name] = value
else:
self._handle_wrong_field(name, ATTR_TYPE_WRITE)
def save(self, **fields):
"""Save the instance to the remote Transifex server.
If it was pre-populated, it updates the instance on the server,
otherwise it creates a new object.
Any values given in `fields` will be attempted to be saved
on the object. The same goes for any other values already set
to the object by `model_instance.attr = value`.
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
for field in fields:
if field in self.writable_fields:
setattr(self, field, fields[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_WRITE)
if self._populated_fields:
self._update(**self._modified_fields)
else:
self._create(**self._modified_fields)
def delete(self):
"""Delete the instance from the remote Transifex server."""
self._delete()
def _populate(self, **kwargs):
"""Populate the instance with the values from the server."""
self._populated_fields = self._get(**kwargs)
def _get(self, **kwargs):
"""Get the resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.get(path)
def _create(self, **kwargs):
"""Create a resource in the remote Transifex server."""
path = self._construct_path_to_collection()
# Use the fields for which we have values
for field in self.writable_fields:
try:
value = getattr(self, field)
kwargs[field] = value
except AttributeError:
pass
return self._http.post(path, json.dumps(kwargs))
def _update(self, **kwargs):
"""Update a resource in a remote Transifex server."""
path = self._construct_path_to_item()
if not kwargs:
return
return self._http.put(path, json.dumps(kwargs))
def _construct_path_to_collection(self):
"""Construct the path to an actual collection."""
template = self.get_path_to_collection_template() # flake8 fix
return template % self.get_url_parameters()
def _construct_path_to_item(self):
"""Construct the path to an actual item."""
return self.get_path_to_item_template() % self.get_url_parameters()
def get_url_parameters(self):
"""Create a dictionary of parameters used in URLs for this model."""
url_fields = {}
for field in self.url_fields:
url_fields[field] = getattr(self, field)
return url_fields
def get_path_to_collection_template(self):
"""The URL to access the collection of the model."""
return self._join_subpaths(self._prefix, self._path_to_collection)
def get_path_to_item_template(self):
"""The URL to access a specific item of the model."""
return self._join_subpaths(self._prefix, self._path_to_item)
def _join_subpaths(self, *args):
"""Join subpaths (given as arguments) to form a
well-defined URL path.
"""
return '/'.join(args).replace('///', '/').replace('//', '/')
@classmethod
def _handle_wrong_field(cls, field_name, field_type):
"""Raise an exception whenever an invalid attribute with
the given name was attempted to be set to or retrieved from
this model class.
Assumes that the given field is invalid, without making any checks.
Also adds an entry to the logs.
"""
if field_type == ATTR_TYPE_READ:
field_type = 'readable'
elif field_type == ATTR_TYPE_WRITE:
field_type = 'writable'
elif field_type == ATTR_TYPE_URL:
field_type = 'URL'
else:
raise AttributeError('Invalid attribute type: {}'.format(
field_type
))
msg = '{} has no {} attribute "{}"'.format(
cls.__name__,
field_type,
field_name
)
_logger.error(msg)
raise AttributeError(msg)
|
transifex/transifex-python-library | txlib/api/base.py | BaseModel.get_url_parameters | python | def get_url_parameters(self):
url_fields = {}
for field in self.url_fields:
url_fields[field] = getattr(self, field)
return url_fields | Create a dictionary of parameters used in URLs for this model. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/api/base.py#L285-L290 | null | class BaseModel(object):
"""Base class for Transifex models.
Each model has a list of fields. The model works as a proxy between the
local application and the remote Transifex server.
The user of the class can:
a) retrieve an existing remote instance of the model
by using the static method `MyModel.get(...)`
b) create a new local instance with a set of populated fields
and call `save()` in order to save it to the remote server
c) delete an existing remote instance, by first creating a local instance
and all necessary attributes that identify the object, and then
calling `delete()` on it.
"""
# The URI prefix of all API endpoints for this model
_prefix = ''
# The URI for retrieving a collection of multiple items
# (shouldn't start with a slash)
_path_to_collection = ''
# The URl for retrieving a single item
# (shouldn't start with a slash)
_path_to_item = ''
# All fields defined here will be used for constructing
# the URL of the request
url_fields = set()
# These fields can be modified in POST/PUT requests
writable_fields = set()
# Initially False, set to True when an instance of the class is created
_is_initialized = False
@classmethod
def get(cls, **kwargs):
"""Retrieve an object by making a GET request to Transifex.
Each value in `kwargs` that corresponds to a field
defined in `self.url_fields` will be used in the URL path
of the request, so that a particular entry of this model
is identified and retrieved.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed as kwargs
txlib.http.exceptions.NotFoundError: if the object with these
attributes is not found on the remote server
txlib.http.exceptions.ServerError subclass: depending on
the particular server response
Example:
# Note: also catch exceptions
>>> obj = MyModel.get(attr1=value1, attr2=value2)
"""
fields = {}
for field in cls.url_fields:
value = kwargs.pop(field, None)
if value is None:
cls._handle_wrong_field(field, ATTR_TYPE_URL)
fields[field] = value
# Create an instance of the model class and make the GET request
model = cls(**fields)
model._populate(**kwargs)
return model
def __init__(self, prefix='/api/2/', **url_values):
"""Constructor.
Initializes various variables, setup the HTTP handler and
stores all values
Args:
prefix: The prefix of the urls.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed
"""
self._http = registry.http_handler
self._prefix = prefix
self._modified_fields = {}
self._populated_fields = {}
for field in url_values:
if field in self.url_fields:
setattr(self, field, url_values[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_URL)
# From now on only, only specific attributes can be set
# on this object:
# a) one of the instance variables set above
# b) one of the attributes found in `self.writable_fields`
self._is_initialized = True
def __getattr__(self, name, default=None):
"""Return the value of the field with the given name.
Looks in `self._modified_fields` and `self._populated_fields`.
Raises:
AttributeError: if the requested attribute does not exist
"""
if name in self._modified_fields:
return self._modified_fields[name]
elif name in self._populated_fields:
return self._populated_fields[name]
else:
self._handle_wrong_field(name, ATTR_TYPE_READ)
def __setattr__(self, name, value):
"""Set the value of a field.
This method only allows certain attributes to be set:
a) Any attribute that is defined in `__init__()`
b) Any attribute found in `self.writable_fields`
For the rest it will raise an AttributeError.
For case (a), the attribute is saved directly on this object
For case (b), the attribute is saved in `self.writable_fields`
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
# If __init__() hasn't finished yet, accept anything
if ('_is_initialized' not in self.__dict__) or (name in self.__dict__):
return super(BaseModel, self).__setattr__(name, value)
elif name in self.writable_fields:
self._modified_fields[name] = value
else:
self._handle_wrong_field(name, ATTR_TYPE_WRITE)
def save(self, **fields):
"""Save the instance to the remote Transifex server.
If it was pre-populated, it updates the instance on the server,
otherwise it creates a new object.
Any values given in `fields` will be attempted to be saved
on the object. The same goes for any other values already set
to the object by `model_instance.attr = value`.
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
for field in fields:
if field in self.writable_fields:
setattr(self, field, fields[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_WRITE)
if self._populated_fields:
self._update(**self._modified_fields)
else:
self._create(**self._modified_fields)
def delete(self):
"""Delete the instance from the remote Transifex server."""
self._delete()
def _populate(self, **kwargs):
"""Populate the instance with the values from the server."""
self._populated_fields = self._get(**kwargs)
def _get(self, **kwargs):
"""Get the resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.get(path)
def _create(self, **kwargs):
"""Create a resource in the remote Transifex server."""
path = self._construct_path_to_collection()
# Use the fields for which we have values
for field in self.writable_fields:
try:
value = getattr(self, field)
kwargs[field] = value
except AttributeError:
pass
return self._http.post(path, json.dumps(kwargs))
def _update(self, **kwargs):
"""Update a resource in a remote Transifex server."""
path = self._construct_path_to_item()
if not kwargs:
return
return self._http.put(path, json.dumps(kwargs))
def _delete(self, **kwargs):
"""Delete a resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.delete(path)
def _construct_path_to_collection(self):
"""Construct the path to an actual collection."""
template = self.get_path_to_collection_template() # flake8 fix
return template % self.get_url_parameters()
def _construct_path_to_item(self):
"""Construct the path to an actual item."""
return self.get_path_to_item_template() % self.get_url_parameters()
def get_path_to_collection_template(self):
"""The URL to access the collection of the model."""
return self._join_subpaths(self._prefix, self._path_to_collection)
def get_path_to_item_template(self):
"""The URL to access a specific item of the model."""
return self._join_subpaths(self._prefix, self._path_to_item)
def _join_subpaths(self, *args):
"""Join subpaths (given as arguments) to form a
well-defined URL path.
"""
return '/'.join(args).replace('///', '/').replace('//', '/')
@classmethod
def _handle_wrong_field(cls, field_name, field_type):
"""Raise an exception whenever an invalid attribute with
the given name was attempted to be set to or retrieved from
this model class.
Assumes that the given field is invalid, without making any checks.
Also adds an entry to the logs.
"""
if field_type == ATTR_TYPE_READ:
field_type = 'readable'
elif field_type == ATTR_TYPE_WRITE:
field_type = 'writable'
elif field_type == ATTR_TYPE_URL:
field_type = 'URL'
else:
raise AttributeError('Invalid attribute type: {}'.format(
field_type
))
msg = '{} has no {} attribute "{}"'.format(
cls.__name__,
field_type,
field_name
)
_logger.error(msg)
raise AttributeError(msg)
|
transifex/transifex-python-library | txlib/api/base.py | BaseModel._handle_wrong_field | python | def _handle_wrong_field(cls, field_name, field_type):
if field_type == ATTR_TYPE_READ:
field_type = 'readable'
elif field_type == ATTR_TYPE_WRITE:
field_type = 'writable'
elif field_type == ATTR_TYPE_URL:
field_type = 'URL'
else:
raise AttributeError('Invalid attribute type: {}'.format(
field_type
))
msg = '{} has no {} attribute "{}"'.format(
cls.__name__,
field_type,
field_name
)
_logger.error(msg)
raise AttributeError(msg) | Raise an exception whenever an invalid attribute with
the given name was attempted to be set to or retrieved from
this model class.
Assumes that the given field is invalid, without making any checks.
Also adds an entry to the logs. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/api/base.py#L307-L333 | null | class BaseModel(object):
"""Base class for Transifex models.
Each model has a list of fields. The model works as a proxy between the
local application and the remote Transifex server.
The user of the class can:
a) retrieve an existing remote instance of the model
by using the static method `MyModel.get(...)`
b) create a new local instance with a set of populated fields
and call `save()` in order to save it to the remote server
c) delete an existing remote instance, by first creating a local instance
and all necessary attributes that identify the object, and then
calling `delete()` on it.
"""
# The URI prefix of all API endpoints for this model
_prefix = ''
# The URI for retrieving a collection of multiple items
# (shouldn't start with a slash)
_path_to_collection = ''
# The URl for retrieving a single item
# (shouldn't start with a slash)
_path_to_item = ''
# All fields defined here will be used for constructing
# the URL of the request
url_fields = set()
# These fields can be modified in POST/PUT requests
writable_fields = set()
# Initially False, set to True when an instance of the class is created
_is_initialized = False
@classmethod
def get(cls, **kwargs):
"""Retrieve an object by making a GET request to Transifex.
Each value in `kwargs` that corresponds to a field
defined in `self.url_fields` will be used in the URL path
of the request, so that a particular entry of this model
is identified and retrieved.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed as kwargs
txlib.http.exceptions.NotFoundError: if the object with these
attributes is not found on the remote server
txlib.http.exceptions.ServerError subclass: depending on
the particular server response
Example:
# Note: also catch exceptions
>>> obj = MyModel.get(attr1=value1, attr2=value2)
"""
fields = {}
for field in cls.url_fields:
value = kwargs.pop(field, None)
if value is None:
cls._handle_wrong_field(field, ATTR_TYPE_URL)
fields[field] = value
# Create an instance of the model class and make the GET request
model = cls(**fields)
model._populate(**kwargs)
return model
def __init__(self, prefix='/api/2/', **url_values):
"""Constructor.
Initializes various variables, setup the HTTP handler and
stores all values
Args:
prefix: The prefix of the urls.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed
"""
self._http = registry.http_handler
self._prefix = prefix
self._modified_fields = {}
self._populated_fields = {}
for field in url_values:
if field in self.url_fields:
setattr(self, field, url_values[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_URL)
# From now on only, only specific attributes can be set
# on this object:
# a) one of the instance variables set above
# b) one of the attributes found in `self.writable_fields`
self._is_initialized = True
def __getattr__(self, name, default=None):
"""Return the value of the field with the given name.
Looks in `self._modified_fields` and `self._populated_fields`.
Raises:
AttributeError: if the requested attribute does not exist
"""
if name in self._modified_fields:
return self._modified_fields[name]
elif name in self._populated_fields:
return self._populated_fields[name]
else:
self._handle_wrong_field(name, ATTR_TYPE_READ)
def __setattr__(self, name, value):
"""Set the value of a field.
This method only allows certain attributes to be set:
a) Any attribute that is defined in `__init__()`
b) Any attribute found in `self.writable_fields`
For the rest it will raise an AttributeError.
For case (a), the attribute is saved directly on this object
For case (b), the attribute is saved in `self.writable_fields`
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
# If __init__() hasn't finished yet, accept anything
if ('_is_initialized' not in self.__dict__) or (name in self.__dict__):
return super(BaseModel, self).__setattr__(name, value)
elif name in self.writable_fields:
self._modified_fields[name] = value
else:
self._handle_wrong_field(name, ATTR_TYPE_WRITE)
def save(self, **fields):
"""Save the instance to the remote Transifex server.
If it was pre-populated, it updates the instance on the server,
otherwise it creates a new object.
Any values given in `fields` will be attempted to be saved
on the object. The same goes for any other values already set
to the object by `model_instance.attr = value`.
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
for field in fields:
if field in self.writable_fields:
setattr(self, field, fields[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_WRITE)
if self._populated_fields:
self._update(**self._modified_fields)
else:
self._create(**self._modified_fields)
def delete(self):
"""Delete the instance from the remote Transifex server."""
self._delete()
def _populate(self, **kwargs):
"""Populate the instance with the values from the server."""
self._populated_fields = self._get(**kwargs)
def _get(self, **kwargs):
"""Get the resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.get(path)
def _create(self, **kwargs):
"""Create a resource in the remote Transifex server."""
path = self._construct_path_to_collection()
# Use the fields for which we have values
for field in self.writable_fields:
try:
value = getattr(self, field)
kwargs[field] = value
except AttributeError:
pass
return self._http.post(path, json.dumps(kwargs))
def _update(self, **kwargs):
"""Update a resource in a remote Transifex server."""
path = self._construct_path_to_item()
if not kwargs:
return
return self._http.put(path, json.dumps(kwargs))
def _delete(self, **kwargs):
"""Delete a resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.delete(path)
def _construct_path_to_collection(self):
"""Construct the path to an actual collection."""
template = self.get_path_to_collection_template() # flake8 fix
return template % self.get_url_parameters()
def _construct_path_to_item(self):
"""Construct the path to an actual item."""
return self.get_path_to_item_template() % self.get_url_parameters()
def get_url_parameters(self):
"""Create a dictionary of parameters used in URLs for this model."""
url_fields = {}
for field in self.url_fields:
url_fields[field] = getattr(self, field)
return url_fields
def get_path_to_collection_template(self):
"""The URL to access the collection of the model."""
return self._join_subpaths(self._prefix, self._path_to_collection)
def get_path_to_item_template(self):
"""The URL to access a specific item of the model."""
return self._join_subpaths(self._prefix, self._path_to_item)
def _join_subpaths(self, *args):
"""Join subpaths (given as arguments) to form a
well-defined URL path.
"""
return '/'.join(args).replace('///', '/').replace('//', '/')
@classmethod
|
transifex/transifex-python-library | txlib/http/base.py | BaseRequest._construct_full_hostname | python | def _construct_full_hostname(self, hostname):
if hostname.startswith(('http://', 'https://', )):
return hostname
if '://' in hostname:
protocol, host = hostname.split('://', 1)
raise ValueError('Protocol %s is not supported.' % protocol)
return '://'.join([self.default_scheme, hostname, ]) | Create a full (scheme included) hostname from the argument given.
Only HTTP and HTTP+SSL protocols are allowed.
Args:
hostname: The hostname to use.
Returns:
The full hostname.
Raises:
ValueError: A not supported protocol is used. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/http/base.py#L51-L68 | null | class BaseRequest(object):
"""Base class for http request classes."""
errors = {
400: exceptions.RequestError,
401: exceptions.AuthorizationError,
403: exceptions.AuthenticationError,
404: exceptions.NotFoundError,
409: exceptions.ConflictError,
}
success = {
200: "OK",
201: "Created",
204: "Deleted",
}
error_messages = {
400: "Bad request: %s",
401: "Authorization is required: %s",
403: "Authentication error: %s",
404: "Entity was not found: %s",
409: "Error with the request: %s",
}
default_scheme = 'https'
def __init__(self, hostname, auth=AnonymousAuth()):
"""Initializer for the base class.
Save the hostname to use for all requests as well as any
authentication info needed.
Args:
hostname: The host for the requests.
auth: The authentication info needed for any requests.
"""
self._hostname = self._construct_full_hostname(hostname)
_logger.debug("Hostname is %s" % self._hostname)
self._auth_info = auth
def _construct_full_url(self, path):
"""Construct the full url from the host and the path parts."""
return urlparse.urljoin(self._hostname, path)
def _error_message(self, code, msg):
"""Return the message that corresponds to the
request (status code and error message) specified.
Args:
`code`: The http status code.
`msg`: The message to display.
Returns:
The error message for the code given.
"""
return self.error_messages[code] % msg
def _exception_for(self, code):
"""Return the exception class suitable for the specified HTTP
status code.
Raises:
UnknownError: The HTTP status code is not one of the knowns.
"""
if code in self.errors:
return self.errors[code]
elif 500 <= code < 599:
return exceptions.RemoteServerError
else:
return exceptions.UnknownError
|
transifex/transifex-python-library | txlib/http/base.py | BaseRequest._exception_for | python | def _exception_for(self, code):
if code in self.errors:
return self.errors[code]
elif 500 <= code < 599:
return exceptions.RemoteServerError
else:
return exceptions.UnknownError | Return the exception class suitable for the specified HTTP
status code.
Raises:
UnknownError: The HTTP status code is not one of the knowns. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/http/base.py#L86-L98 | null | class BaseRequest(object):
"""Base class for http request classes."""
errors = {
400: exceptions.RequestError,
401: exceptions.AuthorizationError,
403: exceptions.AuthenticationError,
404: exceptions.NotFoundError,
409: exceptions.ConflictError,
}
success = {
200: "OK",
201: "Created",
204: "Deleted",
}
error_messages = {
400: "Bad request: %s",
401: "Authorization is required: %s",
403: "Authentication error: %s",
404: "Entity was not found: %s",
409: "Error with the request: %s",
}
default_scheme = 'https'
def __init__(self, hostname, auth=AnonymousAuth()):
"""Initializer for the base class.
Save the hostname to use for all requests as well as any
authentication info needed.
Args:
hostname: The host for the requests.
auth: The authentication info needed for any requests.
"""
self._hostname = self._construct_full_hostname(hostname)
_logger.debug("Hostname is %s" % self._hostname)
self._auth_info = auth
def _construct_full_hostname(self, hostname):
"""Create a full (scheme included) hostname from the argument given.
Only HTTP and HTTP+SSL protocols are allowed.
Args:
hostname: The hostname to use.
Returns:
The full hostname.
Raises:
ValueError: A not supported protocol is used.
"""
if hostname.startswith(('http://', 'https://', )):
return hostname
if '://' in hostname:
protocol, host = hostname.split('://', 1)
raise ValueError('Protocol %s is not supported.' % protocol)
return '://'.join([self.default_scheme, hostname, ])
def _construct_full_url(self, path):
"""Construct the full url from the host and the path parts."""
return urlparse.urljoin(self._hostname, path)
def _error_message(self, code, msg):
"""Return the message that corresponds to the
request (status code and error message) specified.
Args:
`code`: The http status code.
`msg`: The message to display.
Returns:
The error message for the code given.
"""
return self.error_messages[code] % msg
|
transifex/transifex-python-library | txlib/http/auth.py | AuthInfo.get | python | def get(self, username=None, password=None, headers={}):
if all((username, password, )):
return BasicAuth(username, password, headers)
elif not any((username, password, )):
return AnonymousAuth(headers)
else:
if username is None:
data = ("username", username, )
else:
data = ("Password", password, )
msg = "%s must have a value (instead of '%s')" % (data[0], data[1])
raise ValueError(msg) | Factory method to get the correct AuthInfo object.
The returned value depends on the arguments given. In case the
username and password don't have a value (ie evaluate to False),
return an object for anonymous access. Else, return an auth
object that supports basic authentication.
Args:
`username`: The username of the user.
`password`: The password of the user.
`headers`: Custom headers to be sent to each request.
Raises:
ValueError in case one of the two arguments evaluates to False,
(such as having the None value). | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/http/auth.py#L14-L40 | null | class AuthInfo(object):
"""Base class for all AuthInfo classes."""
@classmethod
def populate_request_data(self, request_args):
"""Add any auth info to the arguments of the (to be performed) request.
The method of the base class does nothing.
Args:
`request_args`: The arguments of the next request.
Returns:
The updated arguments for the request.
"""
return request_args
|
transifex/transifex-python-library | txlib/http/auth.py | BasicAuth.populate_request_data | python | def populate_request_data(self, request_args):
request_args['auth'] = HTTPBasicAuth(
self._username, self._password)
return request_args | Add the authentication info to the supplied dictionary.
We use the `requests.HTTPBasicAuth` class as the `auth` param.
Args:
`request_args`: The arguments that will be passed to the request.
Returns:
The updated arguments for the request. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/http/auth.py#L74-L86 | null | class BasicAuth(AuthInfo):
"""Class for basic authentication support."""
def __init__(self, username, password, headers={}):
"""Initializer.
:param str username: The username to be used for the authentication with
Transifex. It can either have the value 'API' (suggested) or the
username of a user
:param str password: The password to be used for the authentication with
Transifex. It should be a Transifex token (suggested) if the username is
'API', or the password of the user whose username is used for authentication
:param dict headers: A dictionary with custom headers which will be sent
in every request to the Transifex API.
"""
self._username = username
self._password = password
self._headers = headers
|
transifex/transifex-python-library | txlib/api/resources.py | Resource.retrieve_content | python | def retrieve_content(self):
path = self._construct_path_to_source_content()
res = self._http.get(path)
self._populated_fields['content'] = res['content']
return res['content'] | Retrieve the content of a resource. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/api/resources.py#L27-L32 | [
"def _construct_path_to_source_content(self):\n \"\"\"Construct the path to the source content for an actual resource.\"\"\"\n template = self.get_path_to_source_content_template() # flake8 fix\n return template % self.get_url_parameters()\n"
] | class Resource(BaseModel):
"""Model class for resources."""
_path_to_collection = 'project/%(project_slug)s/resources/'
_path_to_item = 'project/%(project_slug)s/resource/%(slug)s/?details'
_path_to_source_language = 'project/%(project_slug)s/resource/' \
'%(slug)s/content/'
writable_fields = {
'slug', 'name', 'accept_translations', 'source_language',
'mimetype', 'content', 'i18n_type', 'categories', 'category',
'metadata',
}
url_fields = {'project_slug', 'slug'}
def _update(self, **kwargs):
"""Use separate URL for updating the source file."""
if 'content' in kwargs:
content = kwargs.pop('content')
path = self._construct_path_to_source_content()
self._http.put(path, json.dumps({'content': content}))
super(Resource, self)._update(**kwargs)
def _construct_path_to_source_content(self):
"""Construct the path to the source content for an actual resource."""
template = self.get_path_to_source_content_template() # flake8 fix
return template % self.get_url_parameters()
def get_path_to_source_content_template(self):
"""Return the path to the source language content."""
return self._join_subpaths(self._prefix, self._path_to_source_language)
def __str__(self):
return '[Resource slug={}]'.format(self.slug)
|
transifex/transifex-python-library | txlib/api/resources.py | Resource._update | python | def _update(self, **kwargs):
if 'content' in kwargs:
content = kwargs.pop('content')
path = self._construct_path_to_source_content()
self._http.put(path, json.dumps({'content': content}))
super(Resource, self)._update(**kwargs) | Use separate URL for updating the source file. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/api/resources.py#L34-L40 | null | class Resource(BaseModel):
"""Model class for resources."""
_path_to_collection = 'project/%(project_slug)s/resources/'
_path_to_item = 'project/%(project_slug)s/resource/%(slug)s/?details'
_path_to_source_language = 'project/%(project_slug)s/resource/' \
'%(slug)s/content/'
writable_fields = {
'slug', 'name', 'accept_translations', 'source_language',
'mimetype', 'content', 'i18n_type', 'categories', 'category',
'metadata',
}
url_fields = {'project_slug', 'slug'}
def retrieve_content(self):
"""Retrieve the content of a resource."""
path = self._construct_path_to_source_content()
res = self._http.get(path)
self._populated_fields['content'] = res['content']
return res['content']
def _construct_path_to_source_content(self):
"""Construct the path to the source content for an actual resource."""
template = self.get_path_to_source_content_template() # flake8 fix
return template % self.get_url_parameters()
def get_path_to_source_content_template(self):
"""Return the path to the source language content."""
return self._join_subpaths(self._prefix, self._path_to_source_language)
def __str__(self):
return '[Resource slug={}]'.format(self.slug)
|
transifex/transifex-python-library | txlib/http/http_requests.py | HttpRequest.post | python | def post(self, path, data, filename=None):
return self._send('POST', path, data, filename) | Make a POST request.
If a `filename` is not specified, then the data must already be
JSON-encoded. We specify the Content-Type accordingly.
Else, we make a multipart/form-encoded request. In this case, the data
variable must be a dict-like object. The file must already be
suitably (usually UTF-8) encoded.
Args:
`path`: The path to the resource.
`data`: The data to send. The data must already be JSON-encoded.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/http/http_requests.py#L28-L47 | [
"def _send(self, method, path, data, filename):\n \"\"\"Send data to a remote server, either with a POST or a PUT request.\n\n Args:\n `method`: The method (POST or PUT) to use.\n `path`: The path to the resource.\n `data`: The data to send.\n `filename`: The filename of the file to send (if any).\n Returns:\n The content of the response.\n Raises:\n An exception depending on the HTTP status code of the response.\n \"\"\"\n if filename is None:\n return self._send_json(method, path, data)\n else:\n return self._send_file(method, path, data, filename)\n"
] | class HttpRequest(BaseRequest):
"""Basic http requests handler.
This class can handle both HTTP and HTTPS requests.
"""
def get(self, path):
"""Make a GET request.
Args:
`path`: The path to the resource.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return json.loads(self._make_request('GET', path))
def put(self, path, data, filename=None):
"""Make a PUT request.
If a `filename` is not specified, then the data must already be
JSON-encoded. We specify the Content-Type accordingly.
Else, we make a multipart/form-encoded request. In this case, the data
variable must be a dict-like object. The file must already be
suitably (usually UTF-8) encoded.
Args:
`path`: The path to the resource.
`data`: The data to send. The data must already be JSON-encoded.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._send('PUT', path, data, filename)
def delete(self, path):
"""Make a DELETE request.
Args:
`path`: The path to the resource.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._make_request('DELETE', path)
def _make_request(self, method, path, data=None, **kwargs):
"""Make a request.
Use the `requests` module to actually perform the request.
Args:
`method`: The method to use.
`path`: The path to the resource.
`data`: Any data to send (for POST and PUT requests).
`kwargs`: Other parameters for `requests`.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
_logger.debug("Method for request is %s" % method)
url = self._construct_full_url(path)
_logger.debug("URL for request is %s" % url)
self._auth_info.populate_request_data(kwargs)
_logger.debug("The arguments are %s" % kwargs)
# Add custom headers for the request
if self._auth_info._headers:
kwargs.setdefault('headers', {}).update(self._auth_info._headers)
res = requests.request(method, url, data=data, **kwargs)
if res.ok:
_logger.debug("Request was successful.")
return res.content.decode('utf-8')
if hasattr(res, 'content'):
_logger.debug("Response was %s:%s", res.status_code, res.content)
raise self._exception_for(res.status_code)(
res.content, http_code=res.status_code
)
else:
msg = "No response from URL: %s" % res.request.url
_logger.error(msg)
raise NoResponseError(msg)
def _send(self, method, path, data, filename):
"""Send data to a remote server, either with a POST or a PUT request.
Args:
`method`: The method (POST or PUT) to use.
`path`: The path to the resource.
`data`: The data to send.
`filename`: The filename of the file to send (if any).
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
if filename is None:
return self._send_json(method, path, data)
else:
return self._send_file(method, path, data, filename)
def _send_json(self, method, path, data):
"""Make a application/json request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
headers = {'Content-type': 'application/json'}
return self._make_request(method, path, data=data, headers=headers)
def _send_file(self, method, path, data, filename):
"""Make a multipart/form-encoded request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
with open(filename, 'r') as f:
return self._make_request(method, path, data=data, files=[f, ])
|
transifex/transifex-python-library | txlib/http/http_requests.py | HttpRequest.put | python | def put(self, path, data, filename=None):
return self._send('PUT', path, data, filename) | Make a PUT request.
If a `filename` is not specified, then the data must already be
JSON-encoded. We specify the Content-Type accordingly.
Else, we make a multipart/form-encoded request. In this case, the data
variable must be a dict-like object. The file must already be
suitably (usually UTF-8) encoded.
Args:
`path`: The path to the resource.
`data`: The data to send. The data must already be JSON-encoded.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/http/http_requests.py#L49-L68 | [
"def _send(self, method, path, data, filename):\n \"\"\"Send data to a remote server, either with a POST or a PUT request.\n\n Args:\n `method`: The method (POST or PUT) to use.\n `path`: The path to the resource.\n `data`: The data to send.\n `filename`: The filename of the file to send (if any).\n Returns:\n The content of the response.\n Raises:\n An exception depending on the HTTP status code of the response.\n \"\"\"\n if filename is None:\n return self._send_json(method, path, data)\n else:\n return self._send_file(method, path, data, filename)\n"
] | class HttpRequest(BaseRequest):
"""Basic http requests handler.
This class can handle both HTTP and HTTPS requests.
"""
def get(self, path):
"""Make a GET request.
Args:
`path`: The path to the resource.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return json.loads(self._make_request('GET', path))
def post(self, path, data, filename=None):
"""Make a POST request.
If a `filename` is not specified, then the data must already be
JSON-encoded. We specify the Content-Type accordingly.
Else, we make a multipart/form-encoded request. In this case, the data
variable must be a dict-like object. The file must already be
suitably (usually UTF-8) encoded.
Args:
`path`: The path to the resource.
`data`: The data to send. The data must already be JSON-encoded.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._send('POST', path, data, filename)
def delete(self, path):
"""Make a DELETE request.
Args:
`path`: The path to the resource.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._make_request('DELETE', path)
def _make_request(self, method, path, data=None, **kwargs):
"""Make a request.
Use the `requests` module to actually perform the request.
Args:
`method`: The method to use.
`path`: The path to the resource.
`data`: Any data to send (for POST and PUT requests).
`kwargs`: Other parameters for `requests`.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
_logger.debug("Method for request is %s" % method)
url = self._construct_full_url(path)
_logger.debug("URL for request is %s" % url)
self._auth_info.populate_request_data(kwargs)
_logger.debug("The arguments are %s" % kwargs)
# Add custom headers for the request
if self._auth_info._headers:
kwargs.setdefault('headers', {}).update(self._auth_info._headers)
res = requests.request(method, url, data=data, **kwargs)
if res.ok:
_logger.debug("Request was successful.")
return res.content.decode('utf-8')
if hasattr(res, 'content'):
_logger.debug("Response was %s:%s", res.status_code, res.content)
raise self._exception_for(res.status_code)(
res.content, http_code=res.status_code
)
else:
msg = "No response from URL: %s" % res.request.url
_logger.error(msg)
raise NoResponseError(msg)
def _send(self, method, path, data, filename):
"""Send data to a remote server, either with a POST or a PUT request.
Args:
`method`: The method (POST or PUT) to use.
`path`: The path to the resource.
`data`: The data to send.
`filename`: The filename of the file to send (if any).
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
if filename is None:
return self._send_json(method, path, data)
else:
return self._send_file(method, path, data, filename)
def _send_json(self, method, path, data):
"""Make a application/json request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
headers = {'Content-type': 'application/json'}
return self._make_request(method, path, data=data, headers=headers)
def _send_file(self, method, path, data, filename):
"""Make a multipart/form-encoded request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
with open(filename, 'r') as f:
return self._make_request(method, path, data=data, files=[f, ])
|
transifex/transifex-python-library | txlib/http/http_requests.py | HttpRequest._make_request | python | def _make_request(self, method, path, data=None, **kwargs):
_logger.debug("Method for request is %s" % method)
url = self._construct_full_url(path)
_logger.debug("URL for request is %s" % url)
self._auth_info.populate_request_data(kwargs)
_logger.debug("The arguments are %s" % kwargs)
# Add custom headers for the request
if self._auth_info._headers:
kwargs.setdefault('headers', {}).update(self._auth_info._headers)
res = requests.request(method, url, data=data, **kwargs)
if res.ok:
_logger.debug("Request was successful.")
return res.content.decode('utf-8')
if hasattr(res, 'content'):
_logger.debug("Response was %s:%s", res.status_code, res.content)
raise self._exception_for(res.status_code)(
res.content, http_code=res.status_code
)
else:
msg = "No response from URL: %s" % res.request.url
_logger.error(msg)
raise NoResponseError(msg) | Make a request.
Use the `requests` module to actually perform the request.
Args:
`method`: The method to use.
`path`: The path to the resource.
`data`: Any data to send (for POST and PUT requests).
`kwargs`: Other parameters for `requests`.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/http/http_requests.py#L82-L121 | [
"def _construct_full_url(self, path):\n \"\"\"Construct the full url from the host and the path parts.\"\"\"\n return urlparse.urljoin(self._hostname, path)\n",
"def _exception_for(self, code):\n \"\"\"Return the exception class suitable for the specified HTTP\n status code.\n\n Raises:\n UnknownError: The HTTP status code is not one of the knowns.\n \"\"\"\n if code in self.errors:\n return self.errors[code]\n elif 500 <= code < 599:\n return exceptions.RemoteServerError\n else:\n return exceptions.UnknownError\n"
] | class HttpRequest(BaseRequest):
"""Basic http requests handler.
This class can handle both HTTP and HTTPS requests.
"""
def get(self, path):
"""Make a GET request.
Args:
`path`: The path to the resource.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return json.loads(self._make_request('GET', path))
def post(self, path, data, filename=None):
"""Make a POST request.
If a `filename` is not specified, then the data must already be
JSON-encoded. We specify the Content-Type accordingly.
Else, we make a multipart/form-encoded request. In this case, the data
variable must be a dict-like object. The file must already be
suitably (usually UTF-8) encoded.
Args:
`path`: The path to the resource.
`data`: The data to send. The data must already be JSON-encoded.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._send('POST', path, data, filename)
def put(self, path, data, filename=None):
"""Make a PUT request.
If a `filename` is not specified, then the data must already be
JSON-encoded. We specify the Content-Type accordingly.
Else, we make a multipart/form-encoded request. In this case, the data
variable must be a dict-like object. The file must already be
suitably (usually UTF-8) encoded.
Args:
`path`: The path to the resource.
`data`: The data to send. The data must already be JSON-encoded.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._send('PUT', path, data, filename)
def delete(self, path):
"""Make a DELETE request.
Args:
`path`: The path to the resource.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._make_request('DELETE', path)
def _send(self, method, path, data, filename):
"""Send data to a remote server, either with a POST or a PUT request.
Args:
`method`: The method (POST or PUT) to use.
`path`: The path to the resource.
`data`: The data to send.
`filename`: The filename of the file to send (if any).
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
if filename is None:
return self._send_json(method, path, data)
else:
return self._send_file(method, path, data, filename)
def _send_json(self, method, path, data):
"""Make a application/json request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
headers = {'Content-type': 'application/json'}
return self._make_request(method, path, data=data, headers=headers)
def _send_file(self, method, path, data, filename):
"""Make a multipart/form-encoded request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
with open(filename, 'r') as f:
return self._make_request(method, path, data=data, files=[f, ])
|
transifex/transifex-python-library | txlib/http/http_requests.py | HttpRequest._send | python | def _send(self, method, path, data, filename):
if filename is None:
return self._send_json(method, path, data)
else:
return self._send_file(method, path, data, filename) | Send data to a remote server, either with a POST or a PUT request.
Args:
`method`: The method (POST or PUT) to use.
`path`: The path to the resource.
`data`: The data to send.
`filename`: The filename of the file to send (if any).
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/http/http_requests.py#L123-L139 | [
"def _send_json(self, method, path, data):\n \"\"\"Make a application/json request.\n\n Args:\n `method`: The method of the request (POST or PUT).\n `path`: The path to the resource.\n `data`: The JSON-encoded data.\n Returns:\n The content of the response.\n Raises:\n An exception depending on the HTTP status code of the response.\n \"\"\"\n headers = {'Content-type': 'application/json'}\n return self._make_request(method, path, data=data, headers=headers)\n",
"def _send_file(self, method, path, data, filename):\n \"\"\"Make a multipart/form-encoded request.\n\n Args:\n `method`: The method of the request (POST or PUT).\n `path`: The path to the resource.\n `data`: The JSON-encoded data.\n `filename`: The filename of the file to send.\n Returns:\n The content of the response.\n Raises:\n An exception depending on the HTTP status code of the response.\n \"\"\"\n with open(filename, 'r') as f:\n return self._make_request(method, path, data=data, files=[f, ])\n"
] | class HttpRequest(BaseRequest):
"""Basic http requests handler.
This class can handle both HTTP and HTTPS requests.
"""
def get(self, path):
"""Make a GET request.
Args:
`path`: The path to the resource.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return json.loads(self._make_request('GET', path))
def post(self, path, data, filename=None):
"""Make a POST request.
If a `filename` is not specified, then the data must already be
JSON-encoded. We specify the Content-Type accordingly.
Else, we make a multipart/form-encoded request. In this case, the data
variable must be a dict-like object. The file must already be
suitably (usually UTF-8) encoded.
Args:
`path`: The path to the resource.
`data`: The data to send. The data must already be JSON-encoded.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._send('POST', path, data, filename)
def put(self, path, data, filename=None):
"""Make a PUT request.
If a `filename` is not specified, then the data must already be
JSON-encoded. We specify the Content-Type accordingly.
Else, we make a multipart/form-encoded request. In this case, the data
variable must be a dict-like object. The file must already be
suitably (usually UTF-8) encoded.
Args:
`path`: The path to the resource.
`data`: The data to send. The data must already be JSON-encoded.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._send('PUT', path, data, filename)
def delete(self, path):
"""Make a DELETE request.
Args:
`path`: The path to the resource.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._make_request('DELETE', path)
def _make_request(self, method, path, data=None, **kwargs):
"""Make a request.
Use the `requests` module to actually perform the request.
Args:
`method`: The method to use.
`path`: The path to the resource.
`data`: Any data to send (for POST and PUT requests).
`kwargs`: Other parameters for `requests`.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
_logger.debug("Method for request is %s" % method)
url = self._construct_full_url(path)
_logger.debug("URL for request is %s" % url)
self._auth_info.populate_request_data(kwargs)
_logger.debug("The arguments are %s" % kwargs)
# Add custom headers for the request
if self._auth_info._headers:
kwargs.setdefault('headers', {}).update(self._auth_info._headers)
res = requests.request(method, url, data=data, **kwargs)
if res.ok:
_logger.debug("Request was successful.")
return res.content.decode('utf-8')
if hasattr(res, 'content'):
_logger.debug("Response was %s:%s", res.status_code, res.content)
raise self._exception_for(res.status_code)(
res.content, http_code=res.status_code
)
else:
msg = "No response from URL: %s" % res.request.url
_logger.error(msg)
raise NoResponseError(msg)
def _send_json(self, method, path, data):
"""Make a application/json request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
headers = {'Content-type': 'application/json'}
return self._make_request(method, path, data=data, headers=headers)
def _send_file(self, method, path, data, filename):
"""Make a multipart/form-encoded request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
with open(filename, 'r') as f:
return self._make_request(method, path, data=data, files=[f, ])
|
transifex/transifex-python-library | txlib/http/http_requests.py | HttpRequest._send_json | python | def _send_json(self, method, path, data):
headers = {'Content-type': 'application/json'}
return self._make_request(method, path, data=data, headers=headers) | Make a application/json request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/http/http_requests.py#L141-L154 | [
"def _make_request(self, method, path, data=None, **kwargs):\n \"\"\"Make a request.\n\n Use the `requests` module to actually perform the request.\n\n Args:\n `method`: The method to use.\n `path`: The path to the resource.\n `data`: Any data to send (for POST and PUT requests).\n `kwargs`: Other parameters for `requests`.\n Returns:\n The content of the response.\n Raises:\n An exception depending on the HTTP status code of the response.\n \"\"\"\n _logger.debug(\"Method for request is %s\" % method)\n url = self._construct_full_url(path)\n _logger.debug(\"URL for request is %s\" % url)\n self._auth_info.populate_request_data(kwargs)\n _logger.debug(\"The arguments are %s\" % kwargs)\n\n # Add custom headers for the request\n if self._auth_info._headers:\n kwargs.setdefault('headers', {}).update(self._auth_info._headers)\n\n res = requests.request(method, url, data=data, **kwargs)\n\n if res.ok:\n _logger.debug(\"Request was successful.\")\n return res.content.decode('utf-8')\n\n if hasattr(res, 'content'):\n _logger.debug(\"Response was %s:%s\", res.status_code, res.content)\n raise self._exception_for(res.status_code)(\n res.content, http_code=res.status_code\n )\n else:\n msg = \"No response from URL: %s\" % res.request.url\n _logger.error(msg)\n raise NoResponseError(msg)\n"
] | class HttpRequest(BaseRequest):
"""Basic http requests handler.
This class can handle both HTTP and HTTPS requests.
"""
def get(self, path):
"""Make a GET request.
Args:
`path`: The path to the resource.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return json.loads(self._make_request('GET', path))
def post(self, path, data, filename=None):
"""Make a POST request.
If a `filename` is not specified, then the data must already be
JSON-encoded. We specify the Content-Type accordingly.
Else, we make a multipart/form-encoded request. In this case, the data
variable must be a dict-like object. The file must already be
suitably (usually UTF-8) encoded.
Args:
`path`: The path to the resource.
`data`: The data to send. The data must already be JSON-encoded.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._send('POST', path, data, filename)
def put(self, path, data, filename=None):
"""Make a PUT request.
If a `filename` is not specified, then the data must already be
JSON-encoded. We specify the Content-Type accordingly.
Else, we make a multipart/form-encoded request. In this case, the data
variable must be a dict-like object. The file must already be
suitably (usually UTF-8) encoded.
Args:
`path`: The path to the resource.
`data`: The data to send. The data must already be JSON-encoded.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._send('PUT', path, data, filename)
def delete(self, path):
"""Make a DELETE request.
Args:
`path`: The path to the resource.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._make_request('DELETE', path)
def _make_request(self, method, path, data=None, **kwargs):
"""Make a request.
Use the `requests` module to actually perform the request.
Args:
`method`: The method to use.
`path`: The path to the resource.
`data`: Any data to send (for POST and PUT requests).
`kwargs`: Other parameters for `requests`.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
_logger.debug("Method for request is %s" % method)
url = self._construct_full_url(path)
_logger.debug("URL for request is %s" % url)
self._auth_info.populate_request_data(kwargs)
_logger.debug("The arguments are %s" % kwargs)
# Add custom headers for the request
if self._auth_info._headers:
kwargs.setdefault('headers', {}).update(self._auth_info._headers)
res = requests.request(method, url, data=data, **kwargs)
if res.ok:
_logger.debug("Request was successful.")
return res.content.decode('utf-8')
if hasattr(res, 'content'):
_logger.debug("Response was %s:%s", res.status_code, res.content)
raise self._exception_for(res.status_code)(
res.content, http_code=res.status_code
)
else:
msg = "No response from URL: %s" % res.request.url
_logger.error(msg)
raise NoResponseError(msg)
def _send(self, method, path, data, filename):
"""Send data to a remote server, either with a POST or a PUT request.
Args:
`method`: The method (POST or PUT) to use.
`path`: The path to the resource.
`data`: The data to send.
`filename`: The filename of the file to send (if any).
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
if filename is None:
return self._send_json(method, path, data)
else:
return self._send_file(method, path, data, filename)
def _send_file(self, method, path, data, filename):
"""Make a multipart/form-encoded request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
with open(filename, 'r') as f:
return self._make_request(method, path, data=data, files=[f, ])
|
transifex/transifex-python-library | txlib/http/http_requests.py | HttpRequest._send_file | python | def _send_file(self, method, path, data, filename):
with open(filename, 'r') as f:
return self._make_request(method, path, data=data, files=[f, ]) | Make a multipart/form-encoded request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/http/http_requests.py#L156-L170 | [
"def _make_request(self, method, path, data=None, **kwargs):\n \"\"\"Make a request.\n\n Use the `requests` module to actually perform the request.\n\n Args:\n `method`: The method to use.\n `path`: The path to the resource.\n `data`: Any data to send (for POST and PUT requests).\n `kwargs`: Other parameters for `requests`.\n Returns:\n The content of the response.\n Raises:\n An exception depending on the HTTP status code of the response.\n \"\"\"\n _logger.debug(\"Method for request is %s\" % method)\n url = self._construct_full_url(path)\n _logger.debug(\"URL for request is %s\" % url)\n self._auth_info.populate_request_data(kwargs)\n _logger.debug(\"The arguments are %s\" % kwargs)\n\n # Add custom headers for the request\n if self._auth_info._headers:\n kwargs.setdefault('headers', {}).update(self._auth_info._headers)\n\n res = requests.request(method, url, data=data, **kwargs)\n\n if res.ok:\n _logger.debug(\"Request was successful.\")\n return res.content.decode('utf-8')\n\n if hasattr(res, 'content'):\n _logger.debug(\"Response was %s:%s\", res.status_code, res.content)\n raise self._exception_for(res.status_code)(\n res.content, http_code=res.status_code\n )\n else:\n msg = \"No response from URL: %s\" % res.request.url\n _logger.error(msg)\n raise NoResponseError(msg)\n"
] | class HttpRequest(BaseRequest):
"""Basic http requests handler.
This class can handle both HTTP and HTTPS requests.
"""
def get(self, path):
"""Make a GET request.
Args:
`path`: The path to the resource.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return json.loads(self._make_request('GET', path))
def post(self, path, data, filename=None):
"""Make a POST request.
If a `filename` is not specified, then the data must already be
JSON-encoded. We specify the Content-Type accordingly.
Else, we make a multipart/form-encoded request. In this case, the data
variable must be a dict-like object. The file must already be
suitably (usually UTF-8) encoded.
Args:
`path`: The path to the resource.
`data`: The data to send. The data must already be JSON-encoded.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._send('POST', path, data, filename)
def put(self, path, data, filename=None):
"""Make a PUT request.
If a `filename` is not specified, then the data must already be
JSON-encoded. We specify the Content-Type accordingly.
Else, we make a multipart/form-encoded request. In this case, the data
variable must be a dict-like object. The file must already be
suitably (usually UTF-8) encoded.
Args:
`path`: The path to the resource.
`data`: The data to send. The data must already be JSON-encoded.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._send('PUT', path, data, filename)
def delete(self, path):
"""Make a DELETE request.
Args:
`path`: The path to the resource.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
return self._make_request('DELETE', path)
def _make_request(self, method, path, data=None, **kwargs):
"""Make a request.
Use the `requests` module to actually perform the request.
Args:
`method`: The method to use.
`path`: The path to the resource.
`data`: Any data to send (for POST and PUT requests).
`kwargs`: Other parameters for `requests`.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
_logger.debug("Method for request is %s" % method)
url = self._construct_full_url(path)
_logger.debug("URL for request is %s" % url)
self._auth_info.populate_request_data(kwargs)
_logger.debug("The arguments are %s" % kwargs)
# Add custom headers for the request
if self._auth_info._headers:
kwargs.setdefault('headers', {}).update(self._auth_info._headers)
res = requests.request(method, url, data=data, **kwargs)
if res.ok:
_logger.debug("Request was successful.")
return res.content.decode('utf-8')
if hasattr(res, 'content'):
_logger.debug("Response was %s:%s", res.status_code, res.content)
raise self._exception_for(res.status_code)(
res.content, http_code=res.status_code
)
else:
msg = "No response from URL: %s" % res.request.url
_logger.error(msg)
raise NoResponseError(msg)
def _send(self, method, path, data, filename):
"""Send data to a remote server, either with a POST or a PUT request.
Args:
`method`: The method (POST or PUT) to use.
`path`: The path to the resource.
`data`: The data to send.
`filename`: The filename of the file to send (if any).
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
if filename is None:
return self._send_json(method, path, data)
else:
return self._send_file(method, path, data, filename)
def _send_json(self, method, path, data):
"""Make a application/json request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
headers = {'Content-type': 'application/json'}
return self._make_request(method, path, data=data, headers=headers)
|
garenchan/policy | policy/enforcer.py | Rules.load_json | python | def load_json(cls, data, default_rule=None, raise_error=False):
rules = {k: _parser.parse_rule(v, raise_error)
for k, v in json.loads(data).items()}
return cls(rules, default_rule) | Allow loading of JSON rule data. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L30-L36 | null | class Rules(dict):
"""A store for rules."""
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super().__init__(rules or {})
self.default_rule = default_rule
@classmethod
@classmethod
def from_dict(cls, rules_dict: dict, default_rule=None, raise_error=False):
"""Allow loading of rule data from a dictionary."""
# Parse the rules stored in the dictionary
rules = {k: _parser.parse_rule(v, raise_error)
for k, v in rules_dict.items()}
return cls(rules, default_rule)
def __missing__(self, key):
"""Implements the default rule handling."""
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or isinstance(self.default_rule, dict):
raise KeyError(key)
if isinstance(self.default_rule, checks.BaseCheck):
return self.default_rule
# We need not check this or we will fall into infinite recursion.
if self.default_rule not in self:
raise KeyError(key)
elif isinstance(self.default_rule, str):
return self[self.default_rule]
else:
return None
def __str__(self):
"""Dumps a string representation of the rules."""
out_rules = {}
for key, value in self.items():
if isinstance(value, checks.TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
return json.dumps(out_rules, indent=4)
|
garenchan/policy | policy/enforcer.py | Rules.from_dict | python | def from_dict(cls, rules_dict: dict, default_rule=None, raise_error=False):
# Parse the rules stored in the dictionary
rules = {k: _parser.parse_rule(v, raise_error)
for k, v in rules_dict.items()}
return cls(rules, default_rule) | Allow loading of rule data from a dictionary. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L39-L46 | null | class Rules(dict):
"""A store for rules."""
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super().__init__(rules or {})
self.default_rule = default_rule
@classmethod
def load_json(cls, data, default_rule=None, raise_error=False):
"""Allow loading of JSON rule data."""
rules = {k: _parser.parse_rule(v, raise_error)
for k, v in json.loads(data).items()}
return cls(rules, default_rule)
@classmethod
def __missing__(self, key):
"""Implements the default rule handling."""
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or isinstance(self.default_rule, dict):
raise KeyError(key)
if isinstance(self.default_rule, checks.BaseCheck):
return self.default_rule
# We need not check this or we will fall into infinite recursion.
if self.default_rule not in self:
raise KeyError(key)
elif isinstance(self.default_rule, str):
return self[self.default_rule]
else:
return None
def __str__(self):
"""Dumps a string representation of the rules."""
out_rules = {}
for key, value in self.items():
if isinstance(value, checks.TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
return json.dumps(out_rules, indent=4)
|
garenchan/policy | policy/enforcer.py | Enforcer._set_rules | python | def _set_rules(self, rules: dict, overwrite=True):
if not isinstance(rules, dict):
raise TypeError('rules must be an instance of dict or Rules,'
'got %r instead' % type(rules))
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules) | Created a new Rules object based on the provided dict of rules. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L103-L113 | null | class Enforcer(object):
"""Responsible for loading and enforcing rules."""
def __init__(self, policy_file, rules=None, default_rule=None,
raise_error=False, load_once=True):
"""
:param policy_file: the filename of policy file
:param rules: default rules
:param default_rule: default rule
:param raise_error: raise error on parsing rule and enforcing
policy or not
:param load_once: load policy file just once
"""
self.default_rule = default_rule
self.rules = Rules(rules, default_rule)
self.policy_file = policy_file
self.raise_error = raise_error
self.load_once = load_once
self._policy_loaded = False
# Make rules loading thread-safe
self._load_lock = threading.Lock()
def load_rules(self, force_reload=False, overwrite=True):
"""Load rules from policy file or cache."""
# double-checked locking
if self.load_once and self._policy_loaded:
return
with self._load_lock:
if self.load_once and self._policy_loaded:
return
reloaded, data = _cache.read_file(
self.policy_file, force_reload=force_reload)
self._policy_loaded = True
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule, self.raise_error)
self._set_rules(rules, overwrite=overwrite)
LOG.debug('Reload policy file: %s', self.policy_file)
def enforce(self, rule, target, creds, exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials."""
self.load_rules()
if isinstance(rule, checks.BaseCheck):
result = rule(target, creds, self, rule)
elif not self.rules:
# No rules means we're going to fail closed.
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self, rule)
except KeyError:
LOG.debug('Rule [%s] does not exist', rule)
# If the rule doesn't exist, fail closed
result = False
if self.raise_error and not result:
if exc:
raise exc(*args, **kwargs)
else:
raise PolicyNotAuthorized(rule, target, creds)
return result
|
garenchan/policy | policy/enforcer.py | Enforcer.load_rules | python | def load_rules(self, force_reload=False, overwrite=True):
# double-checked locking
if self.load_once and self._policy_loaded:
return
with self._load_lock:
if self.load_once and self._policy_loaded:
return
reloaded, data = _cache.read_file(
self.policy_file, force_reload=force_reload)
self._policy_loaded = True
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule, self.raise_error)
self._set_rules(rules, overwrite=overwrite)
LOG.debug('Reload policy file: %s', self.policy_file) | Load rules from policy file or cache. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L115-L131 | [
"def read_file(filename: str, force_reload=False):\n \"\"\"Read a file if it has been modified.\n\n :param filename: File name which want to be read from.\n :param force_reload: Whether to reload the file.\n :returns: A tuple with a boolean specifying if the data is fresh or not.\n \"\"\"\n\n if force_reload:\n _delete_cached_file(filename)\n\n reloaded = False\n mtime = os.path.getmtime(filename)\n cache_info = CACHE.setdefault(filename, {})\n\n if not cache_info or mtime > cache_info.get('mtime', 0):\n LOG.debug('Reloading cached file %s', filename)\n with open(filename) as fp:\n cache_info['data'] = fp.read()\n cache_info['mtime'] = mtime\n reloaded = True\n\n return reloaded, cache_info['data']\n",
"def load_json(cls, data, default_rule=None, raise_error=False):\n \"\"\"Allow loading of JSON rule data.\"\"\"\n\n rules = {k: _parser.parse_rule(v, raise_error)\n for k, v in json.loads(data).items()}\n\n return cls(rules, default_rule)\n",
"def _set_rules(self, rules: dict, overwrite=True):\n \"\"\"Created a new Rules object based on the provided dict of rules.\"\"\"\n\n if not isinstance(rules, dict):\n raise TypeError('rules must be an instance of dict or Rules,'\n 'got %r instead' % type(rules))\n\n if overwrite:\n self.rules = Rules(rules, self.default_rule)\n else:\n self.rules.update(rules)\n"
] | class Enforcer(object):
"""Responsible for loading and enforcing rules."""
def __init__(self, policy_file, rules=None, default_rule=None,
raise_error=False, load_once=True):
"""
:param policy_file: the filename of policy file
:param rules: default rules
:param default_rule: default rule
:param raise_error: raise error on parsing rule and enforcing
policy or not
:param load_once: load policy file just once
"""
self.default_rule = default_rule
self.rules = Rules(rules, default_rule)
self.policy_file = policy_file
self.raise_error = raise_error
self.load_once = load_once
self._policy_loaded = False
# Make rules loading thread-safe
self._load_lock = threading.Lock()
def _set_rules(self, rules: dict, overwrite=True):
"""Created a new Rules object based on the provided dict of rules."""
if not isinstance(rules, dict):
raise TypeError('rules must be an instance of dict or Rules,'
'got %r instead' % type(rules))
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules)
def enforce(self, rule, target, creds, exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials."""
self.load_rules()
if isinstance(rule, checks.BaseCheck):
result = rule(target, creds, self, rule)
elif not self.rules:
# No rules means we're going to fail closed.
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self, rule)
except KeyError:
LOG.debug('Rule [%s] does not exist', rule)
# If the rule doesn't exist, fail closed
result = False
if self.raise_error and not result:
if exc:
raise exc(*args, **kwargs)
else:
raise PolicyNotAuthorized(rule, target, creds)
return result
|
garenchan/policy | policy/enforcer.py | Enforcer.enforce | python | def enforce(self, rule, target, creds, exc=None, *args, **kwargs):
self.load_rules()
if isinstance(rule, checks.BaseCheck):
result = rule(target, creds, self, rule)
elif not self.rules:
# No rules means we're going to fail closed.
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self, rule)
except KeyError:
LOG.debug('Rule [%s] does not exist', rule)
# If the rule doesn't exist, fail closed
result = False
if self.raise_error and not result:
if exc:
raise exc(*args, **kwargs)
else:
raise PolicyNotAuthorized(rule, target, creds)
return result | Checks authorization of a rule against the target and credentials. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L133-L158 | [
"def load_rules(self, force_reload=False, overwrite=True):\n \"\"\"Load rules from policy file or cache.\"\"\"\n\n # double-checked locking\n if self.load_once and self._policy_loaded:\n return\n with self._load_lock:\n if self.load_once and self._policy_loaded:\n return\n\n reloaded, data = _cache.read_file(\n self.policy_file, force_reload=force_reload)\n self._policy_loaded = True\n if reloaded or not self.rules:\n rules = Rules.load_json(data, self.default_rule, self.raise_error)\n self._set_rules(rules, overwrite=overwrite)\n LOG.debug('Reload policy file: %s', self.policy_file)\n"
] | class Enforcer(object):
"""Responsible for loading and enforcing rules."""
def __init__(self, policy_file, rules=None, default_rule=None,
raise_error=False, load_once=True):
"""
:param policy_file: the filename of policy file
:param rules: default rules
:param default_rule: default rule
:param raise_error: raise error on parsing rule and enforcing
policy or not
:param load_once: load policy file just once
"""
self.default_rule = default_rule
self.rules = Rules(rules, default_rule)
self.policy_file = policy_file
self.raise_error = raise_error
self.load_once = load_once
self._policy_loaded = False
# Make rules loading thread-safe
self._load_lock = threading.Lock()
def _set_rules(self, rules: dict, overwrite=True):
"""Created a new Rules object based on the provided dict of rules."""
if not isinstance(rules, dict):
raise TypeError('rules must be an instance of dict or Rules,'
'got %r instead' % type(rules))
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules)
def load_rules(self, force_reload=False, overwrite=True):
"""Load rules from policy file or cache."""
# double-checked locking
if self.load_once and self._policy_loaded:
return
with self._load_lock:
if self.load_once and self._policy_loaded:
return
reloaded, data = _cache.read_file(
self.policy_file, force_reload=force_reload)
self._policy_loaded = True
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule, self.raise_error)
self._set_rules(rules, overwrite=overwrite)
LOG.debug('Reload policy file: %s', self.policy_file)
|
garenchan/policy | policy/_cache.py | read_file | python | def read_file(filename: str, force_reload=False):
if force_reload:
_delete_cached_file(filename)
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug('Reloading cached file %s', filename)
with open(filename) as fp:
cache_info['data'] = fp.read()
cache_info['mtime'] = mtime
reloaded = True
return reloaded, cache_info['data'] | Read a file if it has been modified.
:param filename: File name which want to be read from.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh or not. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_cache.py#L19-L41 | [
"def _delete_cached_file(filename: str):\n \"\"\"Delete cached file if present.\n\n :param filename: Filename to delete\n \"\"\"\n try:\n del CACHE[filename]\n except KeyError:\n pass\n"
] | # -*- coding: utf-8 -*-
"""
policy._cache
~~~~~~~~~~~~~~~
Cache for policy file.
"""
import os
import logging
LOG = logging.getLogger(__name__)
# Global file cache
CACHE = {}
def _delete_cached_file(filename: str):
"""Delete cached file if present.
:param filename: Filename to delete
"""
try:
del CACHE[filename]
except KeyError:
pass
|
garenchan/policy | policy/_utils.py | dict_from_object | python | def dict_from_object(obj: object):
# If object is a dict instance, no need to convert.
return (obj if isinstance(obj, dict)
else {attr: getattr(obj, attr)
for attr in dir(obj) if not attr.startswith('_')}) | Convert a object into dictionary with all of its readable attributes. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_utils.py#L13-L19 | null | # -*- coding: utf-8 -*-
"""
policy._utils
~~~~~~~~~~~~~~~
Policy's utils for internal user.
"""
_sentinel = object()
def xgetattr(obj: object, name: str, default=_sentinel, getitem=False):
"""Get attribute value from object.
:param obj: object
:param name: attribute or key name
:param default: when attribute or key missing, return default; if obj is a
dict and use getitem, default will not be used.
:param getitem: when object is a dict, use getitem or get
:return: attribute or key value, or raise KeyError/AttributeError
"""
if isinstance(obj, dict):
if getitem:
# In tune with `dict.__getitem__` method.
return obj[name]
else:
# In tune with `dict.get` method.
val = obj.get(name, default)
return None if val is _sentinel else val
else:
# If object is not a dict, in tune with `getattr` method.
val = getattr(obj, name, default)
if val is _sentinel:
msg = '%r object has no attribute %r' % (obj.__class__, name)
raise AttributeError(msg)
else:
return val
|
garenchan/policy | policy/_utils.py | xgetattr | python | def xgetattr(obj: object, name: str, default=_sentinel, getitem=False):
if isinstance(obj, dict):
if getitem:
# In tune with `dict.__getitem__` method.
return obj[name]
else:
# In tune with `dict.get` method.
val = obj.get(name, default)
return None if val is _sentinel else val
else:
# If object is not a dict, in tune with `getattr` method.
val = getattr(obj, name, default)
if val is _sentinel:
msg = '%r object has no attribute %r' % (obj.__class__, name)
raise AttributeError(msg)
else:
return val | Get attribute value from object.
:param obj: object
:param name: attribute or key name
:param default: when attribute or key missing, return default; if obj is a
dict and use getitem, default will not be used.
:param getitem: when object is a dict, use getitem or get
:return: attribute or key value, or raise KeyError/AttributeError | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_utils.py#L22-L48 | null | # -*- coding: utf-8 -*-
"""
policy._utils
~~~~~~~~~~~~~~~
Policy's utils for internal user.
"""
_sentinel = object()
def dict_from_object(obj: object):
"""Convert a object into dictionary with all of its readable attributes."""
# If object is a dict instance, no need to convert.
return (obj if isinstance(obj, dict)
else {attr: getattr(obj, attr)
for attr in dir(obj) if not attr.startswith('_')})
|
garenchan/policy | policy/checks.py | register | python | def register(name, _callable=None):
def wrapper(_callable):
registered_checks[name] = _callable
return _callable
# If function or class is given, do the registeration
if _callable:
return wrapper(_callable)
return wrapper | A decorator used for register custom check.
:param name: name of check
:type: str
:param _callable: check class or a function which return check instance
:return: _callable or a decorator | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/checks.py#L177-L193 | [
"def wrapper(_callable):\n registered_checks[name] = _callable\n return _callable\n"
] | # -*- coding: utf-8 -*-
"""
policy.checkers
~~~~~~~~~~~~~~~
Various checkers to check policy.
"""
import abc
import ast
from collections import Iterable
from policy import _utils
registered_checks = {}
class BaseCheck(metaclass=abc.ABCMeta):
"""Abstract base class for Check classes."""
@abc.abstractmethod
def __str__(cls):
"""String representation of the check"""
pass
@abc.abstractmethod
def __call__(self, target, cred, enforcer, current_rule=None):
"""Triggers if instance of the class is called.
Performs check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy checker that always return ``False`` (disallow) """
def __str__(self):
"""Return a string representation of this checker."""
return '!'
def __call__(self, target, cred, enforcer, current_rule=None):
"""Check the policy"""
return False
class TrueCheck(BaseCheck):
"""A policy checker that always return ``True`` (allow) """
def __str__(self):
"""Return a string representation of this checker."""
return '@'
def __call__(self, target, cred, enforcer, current_rule=None):
"""Check the policy"""
return True
class Check(BaseCheck):
def __init__(self, kind, match):
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this checker."""
return '%s:%s' % (self.kind, self.match)
class NotCheck(BaseCheck):
def __init__(self, rule):
self.rule = rule
def __str__(self):
"""Return a string representation of this checker."""
return 'not %s' % self.rule
def __call__(self, target, cred, enforcer, current_rule=None):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred, enforcer, current_rule)
class AndCheck(BaseCheck):
def __init__(self, *rules):
self.rules = list(rules)
def __str__(self):
"""Return a string representation of this checker."""
return '(%s)' % ' and '.join(str(rule) for rule in self.rules)
def __call__(self, target, cred, enforcer, current_rule=None):
"""Check the policy.
Returns the logical AND of the wrapped checks.
"""
for rule in self.rules:
if not rule(target, cred, enforcer, current_rule):
return False
else:
return True
def add_check(self, rule):
"""Adds rule to be checked.
Allow addition of another rule to the list of rules that will
be checked.
:return: self
:rtype: :class:`.AndChecker`
"""
self.rules.append(rule)
class OrCheck(BaseCheck):
def __init__(self, *rules):
self.rules = list(rules)
def __str__(self):
"""Return a string representation of this checker."""
return '(%s)' % ' or '.join(str(rule) for rule in self.rules)
def __call__(self, target, cred, enforcer, current_rule=None):
"""Check the policy.
Returns the logical OR of the wrapped checks.
"""
for rule in self.rules:
if rule(target, cred, enforcer, current_rule):
return True
else:
return False
def add_check(self, rule):
"""Adds rule to be checked.
Allow addition of another rule to the list of rules that will
be checked.
:return: self
:rtype: :class:`.AndChecker`
"""
self.rules.append(rule)
def pop_check(self):
"""Pops the last checker from the list and returns it.
:return: self, poped checker
:rtype: :class:`.OrChecker`, class:`.Checker`
"""
checker = self.rules.pop()
return self, checker
@register('rule')
class RuleCheck(Check):
def __call__(self, target, creds, enforcer, current_rule=None):
try:
return enforcer.rules[self.match](
target, creds, enforcer, current_rule)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register('role')
class RoleCheck(Check):
"""Check whether thers is a matched role in the ``creds`` dict."""
ROLE_ATTRIBUTE = 'roles'
def __call__(self, target, creds, enforcer, current_rule=None):
try:
match = self.match % _utils.dict_from_object(target)
except KeyError:
# if key not present in target return False
return False
roles = _utils.xgetattr(creds, self.ROLE_ATTRIBUTE, None)
return (match.lower() in (role.lower() for role in roles)
if roles else False)
@register(None)
class GenericChecker(Check):
"""Check an individual match.
Matches look like:
- tenant:%(tanant_id)s
- role:compute:admin
- True:%(user.enabled)s
- 'Member':%(role.name)s
"""
def _find_in_object(self, test_value, path_segments, match):
if len(path_segments) == 0:
return match == str(test_value)
key, path_segments = path_segments[0], path_segments[1:]
try:
test_value = _utils.xgetattr(test_value, key, getitem=True)
except (KeyError, AttributeError):
return False
if (isinstance(test_value, Iterable) and
not isinstance(test_value, (str, bytes))):
for val in test_value:
if self._find_in_object(val, path_segments, match):
return True
else:
return False
else:
return self._find_in_object(test_value, path_segments, match)
def __call__(self, target, creds, enforcer, current_rule=None):
try:
match = self.match % _utils.dict_from_object(target)
except KeyError:
# if key not present in target return False
return False
try:
test_value = ast.literal_eval(self.kind)
return match == str(test_value)
except ValueError:
pass
path_segments = self.kind.split('.')
return self._find_in_object(creds, path_segments, match)
|
garenchan/policy | demos/flask/server.py | enforce_policy | python | def enforce_policy(rule):
def wrapper(func):
"""Decorator used for wrap API."""
@functools.wraps(func)
def wrapped(*args, **kwargs):
if enforcer.enforce(rule, {}, g.cred):
return func(*args, **kwargs)
return wrapped
return wrapper | Enforce a policy to a API. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/demos/flask/server.py#L56-L67 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import functools
from flask import Flask, request, g
from policy import Enforcer
from policy.exceptions import PolicyNotAuthorized
app = Flask(__name__)
enforcer = Enforcer('policy.json', raise_error=True)
@app.errorhandler(PolicyNotAuthorized)
def handle_policy_exception(error):
return str(error)
users = {
'lily': {
'id': 'd55a4192eb3b489589d5ee95dcf3af7d',
'roles': ['user', 'admin']
},
'kate': {
'id': '1a535309687244e2aa434b25ef4bfb59',
'roles': ['user']
},
'lucy': {
'id': '186977181e7f4a9e85104ca017e845f3',
'roles': ['user']
}
}
articles = {
'python': {
'id': 'e6e31ad693734b269099d9acac2cb800',
'user_id': '1a535309687244e2aa434b25ef4bfb59' # owned by kate
}
}
def login_required(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
username = request.args.get('me')
credential = users.get(username)
if not credential:
raise Exception('login required')
else:
g.cred = credential
return func(*args, **kwargs)
return wrapped
@app.route('/user', methods=['GET'])
@login_required
@enforce_policy('user:create')
def create_user():
# do create action here
return 'user created'
@app.route('/article', methods=['GET'])
@login_required
def delete_article():
article_name = request.args.get('name')
article = articles.get(article_name)
# do fine-grained permission check here
enforcer.enforce('article:delete', article, g.cred)
# do delete action here
return 'arcticle %s deleted' % article['id']
if __name__ == '__main__':
app.run(port=8888, debug=True)
|
garenchan/policy | setup.py | get_package_version | python | def get_package_version():
base = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(base, 'policy', '__init__.py'),
mode='rt',
encoding='utf-8') as initf:
for line in initf:
m = version.match(line.strip())
if not m:
continue
return m.groups()[0] | return package version without importing it | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/setup.py#L24-L34 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
try:
# Use setuptools if available
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
def find_packages():
return ['policy', 'policy.tests']
# Check python version info
if sys.version_info < (3, 0, 0):
raise Exception('Policy only support Python 3.0.0+')
version = re.compile(r"__version__\s*=\s*'(.*?)'")
def get_long_description():
"""return package's long description"""
base = os.path.abspath(os.path.dirname(__file__))
readme_file = os.path.join(base, 'README.md')
with open(readme_file, mode='rt', encoding='utf-8') as readme:
return readme.read()
def get_classifiers():
return [
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
]
def get_install_requires():
"""return package's install requires"""
base = os.path.abspath(os.path.dirname(__file__))
requirements_file = os.path.join(base, 'requirements.txt')
if not os.path.exists(requirements_file):
return []
with open(requirements_file, mode='rt', encoding='utf-8') as f:
return f.read().splitlines()
if __name__ == '__main__':
setup(
name='policy',
version=get_package_version(),
description='A Policy library provides support for RBAC policy enforcement.',
long_description=get_long_description(),
long_description_content_type='text/markdown',
author='garenchan',
author_email='1412950785@qq.com',
url='https://github.com/garenchan/policy',
license='http://www.apache.org/licenses/LICENSE-2.0',
classifiers=get_classifiers(),
packages=find_packages(),
install_requires=get_install_requires(),
)
|
garenchan/policy | setup.py | get_long_description | python | def get_long_description():
base = os.path.abspath(os.path.dirname(__file__))
readme_file = os.path.join(base, 'README.md')
with open(readme_file, mode='rt', encoding='utf-8') as readme:
return readme.read() | return package's long description | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/setup.py#L37-L42 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
try:
# Use setuptools if available
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
def find_packages():
return ['policy', 'policy.tests']
# Check python version info
if sys.version_info < (3, 0, 0):
raise Exception('Policy only support Python 3.0.0+')
version = re.compile(r"__version__\s*=\s*'(.*?)'")
def get_package_version():
"""return package version without importing it"""
base = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(base, 'policy', '__init__.py'),
mode='rt',
encoding='utf-8') as initf:
for line in initf:
m = version.match(line.strip())
if not m:
continue
return m.groups()[0]
def get_classifiers():
return [
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
]
def get_install_requires():
"""return package's install requires"""
base = os.path.abspath(os.path.dirname(__file__))
requirements_file = os.path.join(base, 'requirements.txt')
if not os.path.exists(requirements_file):
return []
with open(requirements_file, mode='rt', encoding='utf-8') as f:
return f.read().splitlines()
if __name__ == '__main__':
setup(
name='policy',
version=get_package_version(),
description='A Policy library provides support for RBAC policy enforcement.',
long_description=get_long_description(),
long_description_content_type='text/markdown',
author='garenchan',
author_email='1412950785@qq.com',
url='https://github.com/garenchan/policy',
license='http://www.apache.org/licenses/LICENSE-2.0',
classifiers=get_classifiers(),
packages=find_packages(),
install_requires=get_install_requires(),
)
|
garenchan/policy | setup.py | get_install_requires | python | def get_install_requires():
base = os.path.abspath(os.path.dirname(__file__))
requirements_file = os.path.join(base, 'requirements.txt')
if not os.path.exists(requirements_file):
return []
with open(requirements_file, mode='rt', encoding='utf-8') as f:
return f.read().splitlines() | return package's install requires | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/setup.py#L61-L68 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
try:
# Use setuptools if available
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
def find_packages():
return ['policy', 'policy.tests']
# Check python version info
if sys.version_info < (3, 0, 0):
raise Exception('Policy only support Python 3.0.0+')
version = re.compile(r"__version__\s*=\s*'(.*?)'")
def get_package_version():
"""return package version without importing it"""
base = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(base, 'policy', '__init__.py'),
mode='rt',
encoding='utf-8') as initf:
for line in initf:
m = version.match(line.strip())
if not m:
continue
return m.groups()[0]
def get_long_description():
"""return package's long description"""
base = os.path.abspath(os.path.dirname(__file__))
readme_file = os.path.join(base, 'README.md')
with open(readme_file, mode='rt', encoding='utf-8') as readme:
return readme.read()
def get_classifiers():
return [
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
]
if __name__ == '__main__':
setup(
name='policy',
version=get_package_version(),
description='A Policy library provides support for RBAC policy enforcement.',
long_description=get_long_description(),
long_description_content_type='text/markdown',
author='garenchan',
author_email='1412950785@qq.com',
url='https://github.com/garenchan/policy',
license='http://www.apache.org/licenses/LICENSE-2.0',
classifiers=get_classifiers(),
packages=find_packages(),
install_requires=get_install_requires(),
)
|
garenchan/policy | policy/_parser.py | reducer | python | def reducer(*tokens):
def wrapper(func):
# Make sure that we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the token to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return wrapper | Decorator for reduction methods.
Arguments are a sequence of tokens, which should trigger running
this reduction method. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L19-L36 | null | # -*- coding: utf-8 -*-
"""
policy._parser
~~~~~~~~~~~~~~~
Parser for parse policy file.
"""
import re
import logging
from policy import checks
from policy.exceptions import InvalidRuleException
LOG = logging.getLogger(__name__)
class ParserMeta(type):
"""Meta class for the :class:`.Parser` class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name: str, bases: tuple, attrs: dict):
"""Create the class.
Injects the 'reducers' attribute, a list of tuple matching token
sequences to the name of the corresponding reduction methods.
"""
reducers = []
for key, value in attrs.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
attrs['reducers'] = reducers
return super().__new__(mcs, name, bases, attrs)
class Parser(metaclass=ParserMeta):
# used for tokenizing the policy language
_TOKENIZE_RE = re.compile(r'\s+')
def __init__(self, raise_error: bool):
self.raise_error = raise_error
# Internal states
self.tokens = []
self.values = []
def _reduce(self):
"""Perform a greedy reduction of token stream.
If a reducer method matches, it will be executed, then the
:meth:`reduce` method will be called recursively to search
for any more possible reductions.
"""
for reduction, methname in self.reducers:
token_num = len(reduction)
if (len(self.tokens) >= token_num and
self.tokens[-token_num:] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-token_num:])
self.tokens[-token_num:] = [r[0] for r in results]
self.values[-token_num:] = [r[1] for r in results]
# Check for any more reductions
return self._reduce()
def _shift(self, token, value):
self.tokens.append(token)
self.values.append(value)
self._reduce()
@property
def result(self):
"""Obtain the final result of the parse.
:return: check instance
:raises ValueError: If the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError('Could not parse rule')
return self.values[0]
def _parse_check(self, rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special constant-type checks
for check_cls in (checks.FalseCheck, checks.TrueCheck):
check = check_cls()
if rule == str(check):
return check
try:
kind, match = rule.split(':', 1)
except Exception:
if self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.exception('Failed to understand rule %r', rule)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
if kind in checks.registered_checks:
return checks.registered_checks[kind](kind, match)
elif None in checks.registered_checks:
return checks.registered_checks[None](kind, match)
elif self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.error('No handler for matches of kind %r', kind)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
def _parse_tokenize(self, rule):
"""Tokenizer for the policy language."""
for token in self._TOKENIZE_RE.split(rule):
# Skip empty tokens
if not token or token.isspace():
continue
# Handle leading parens on the token
clean = token.lstrip('(')
for i in range(len(token) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
token = clean
# Handle trailing parens on the token
clean = token.rstrip(')')
trail = len(token) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(token) >= 2 and ((token[0], token[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', token[1:-1]
else:
yield 'check', self._parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
def parse(self, rule: str):
"""Parses policy to tree.
Translate a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return checks.TrueCheck()
for token, value in self._parse_tokenize(rule):
self._shift(token, value)
try:
return self.result
except ValueError:
LOG.exception('Failed to understand rule %r', rule)
# Fail closed
return checks.FalseCheck()
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expression into a 'check' token"""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'
Join two checks by the 'and' operator.
"""
return [('and_expr', checks.AndCheck(check1, check2))]
@reducer('or_expr', 'and', 'check')
def _mix_or_and_expr(self, or_expr, _and, check):
"""Modify the case 'A or B and C'
AND operator's priority is higher than OR operator.
"""
or_expr, check1 = or_expr.pop_check()
if isinstance(check1, checks.AndCheck):
and_expr = check1
and_expr.add_check(check)
else:
and_expr = checks.AndCheck(check1, check)
return [('or_expr', or_expr.add_check(and_expr))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding another check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
@reducer('and_expr', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'
Join two checks by the 'or' operator.
"""
return [('or_expr', checks.OrCheck(check1, check2))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding another check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of a check."""
return [('check', checks.NotCheck(check))]
def parse_rule(rule: str, raise_error=False):
"""Parses policy to a tree of Check objects."""
parser = Parser(raise_error)
return parser.parse(rule)
|
garenchan/policy | policy/_parser.py | parse_rule | python | def parse_rule(rule: str, raise_error=False):
parser = Parser(raise_error)
return parser.parse(rule) | Parses policy to a tree of Check objects. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L269-L273 | [
"def parse(self, rule: str):\n \"\"\"Parses policy to tree.\n\n Translate a policy written in the policy language into a tree of\n Check objects.\n \"\"\"\n\n # Empty rule means always accept\n if not rule:\n return checks.TrueCheck()\n\n for token, value in self._parse_tokenize(rule):\n self._shift(token, value)\n\n try:\n return self.result\n except ValueError:\n LOG.exception('Failed to understand rule %r', rule)\n # Fail closed\n return checks.FalseCheck()\n"
] | # -*- coding: utf-8 -*-
"""
policy._parser
~~~~~~~~~~~~~~~
Parser for parse policy file.
"""
import re
import logging
from policy import checks
from policy.exceptions import InvalidRuleException
LOG = logging.getLogger(__name__)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, which should trigger running
this reduction method.
"""
def wrapper(func):
# Make sure that we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the token to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return wrapper
class ParserMeta(type):
"""Meta class for the :class:`.Parser` class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name: str, bases: tuple, attrs: dict):
"""Create the class.
Injects the 'reducers' attribute, a list of tuple matching token
sequences to the name of the corresponding reduction methods.
"""
reducers = []
for key, value in attrs.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
attrs['reducers'] = reducers
return super().__new__(mcs, name, bases, attrs)
class Parser(metaclass=ParserMeta):
# used for tokenizing the policy language
_TOKENIZE_RE = re.compile(r'\s+')
def __init__(self, raise_error: bool):
self.raise_error = raise_error
# Internal states
self.tokens = []
self.values = []
def _reduce(self):
"""Perform a greedy reduction of token stream.
If a reducer method matches, it will be executed, then the
:meth:`reduce` method will be called recursively to search
for any more possible reductions.
"""
for reduction, methname in self.reducers:
token_num = len(reduction)
if (len(self.tokens) >= token_num and
self.tokens[-token_num:] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-token_num:])
self.tokens[-token_num:] = [r[0] for r in results]
self.values[-token_num:] = [r[1] for r in results]
# Check for any more reductions
return self._reduce()
def _shift(self, token, value):
self.tokens.append(token)
self.values.append(value)
self._reduce()
@property
def result(self):
"""Obtain the final result of the parse.
:return: check instance
:raises ValueError: If the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError('Could not parse rule')
return self.values[0]
def _parse_check(self, rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special constant-type checks
for check_cls in (checks.FalseCheck, checks.TrueCheck):
check = check_cls()
if rule == str(check):
return check
try:
kind, match = rule.split(':', 1)
except Exception:
if self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.exception('Failed to understand rule %r', rule)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
if kind in checks.registered_checks:
return checks.registered_checks[kind](kind, match)
elif None in checks.registered_checks:
return checks.registered_checks[None](kind, match)
elif self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.error('No handler for matches of kind %r', kind)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
def _parse_tokenize(self, rule):
"""Tokenizer for the policy language."""
for token in self._TOKENIZE_RE.split(rule):
# Skip empty tokens
if not token or token.isspace():
continue
# Handle leading parens on the token
clean = token.lstrip('(')
for i in range(len(token) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
token = clean
# Handle trailing parens on the token
clean = token.rstrip(')')
trail = len(token) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(token) >= 2 and ((token[0], token[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', token[1:-1]
else:
yield 'check', self._parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
def parse(self, rule: str):
"""Parses policy to tree.
Translate a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return checks.TrueCheck()
for token, value in self._parse_tokenize(rule):
self._shift(token, value)
try:
return self.result
except ValueError:
LOG.exception('Failed to understand rule %r', rule)
# Fail closed
return checks.FalseCheck()
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expression into a 'check' token"""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'
Join two checks by the 'and' operator.
"""
return [('and_expr', checks.AndCheck(check1, check2))]
@reducer('or_expr', 'and', 'check')
def _mix_or_and_expr(self, or_expr, _and, check):
"""Modify the case 'A or B and C'
AND operator's priority is higher than OR operator.
"""
or_expr, check1 = or_expr.pop_check()
if isinstance(check1, checks.AndCheck):
and_expr = check1
and_expr.add_check(check)
else:
and_expr = checks.AndCheck(check1, check)
return [('or_expr', or_expr.add_check(and_expr))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding another check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
@reducer('and_expr', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'
Join two checks by the 'or' operator.
"""
return [('or_expr', checks.OrCheck(check1, check2))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding another check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of a check."""
return [('check', checks.NotCheck(check))]
|
garenchan/policy | policy/_parser.py | Parser._reduce | python | def _reduce(self):
for reduction, methname in self.reducers:
token_num = len(reduction)
if (len(self.tokens) >= token_num and
self.tokens[-token_num:] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-token_num:])
self.tokens[-token_num:] = [r[0] for r in results]
self.values[-token_num:] = [r[1] for r in results]
# Check for any more reductions
return self._reduce() | Perform a greedy reduction of token stream.
If a reducer method matches, it will be executed, then the
:meth:`reduce` method will be called recursively to search
for any more possible reductions. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L75-L97 | null | class Parser(metaclass=ParserMeta):
# used for tokenizing the policy language
_TOKENIZE_RE = re.compile(r'\s+')
def __init__(self, raise_error: bool):
self.raise_error = raise_error
# Internal states
self.tokens = []
self.values = []
def _shift(self, token, value):
self.tokens.append(token)
self.values.append(value)
self._reduce()
@property
def result(self):
"""Obtain the final result of the parse.
:return: check instance
:raises ValueError: If the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError('Could not parse rule')
return self.values[0]
def _parse_check(self, rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special constant-type checks
for check_cls in (checks.FalseCheck, checks.TrueCheck):
check = check_cls()
if rule == str(check):
return check
try:
kind, match = rule.split(':', 1)
except Exception:
if self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.exception('Failed to understand rule %r', rule)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
if kind in checks.registered_checks:
return checks.registered_checks[kind](kind, match)
elif None in checks.registered_checks:
return checks.registered_checks[None](kind, match)
elif self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.error('No handler for matches of kind %r', kind)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
def _parse_tokenize(self, rule):
"""Tokenizer for the policy language."""
for token in self._TOKENIZE_RE.split(rule):
# Skip empty tokens
if not token or token.isspace():
continue
# Handle leading parens on the token
clean = token.lstrip('(')
for i in range(len(token) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
token = clean
# Handle trailing parens on the token
clean = token.rstrip(')')
trail = len(token) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(token) >= 2 and ((token[0], token[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', token[1:-1]
else:
yield 'check', self._parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
def parse(self, rule: str):
"""Parses policy to tree.
Translate a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return checks.TrueCheck()
for token, value in self._parse_tokenize(rule):
self._shift(token, value)
try:
return self.result
except ValueError:
LOG.exception('Failed to understand rule %r', rule)
# Fail closed
return checks.FalseCheck()
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expression into a 'check' token"""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'
Join two checks by the 'and' operator.
"""
return [('and_expr', checks.AndCheck(check1, check2))]
@reducer('or_expr', 'and', 'check')
def _mix_or_and_expr(self, or_expr, _and, check):
"""Modify the case 'A or B and C'
AND operator's priority is higher than OR operator.
"""
or_expr, check1 = or_expr.pop_check()
if isinstance(check1, checks.AndCheck):
and_expr = check1
and_expr.add_check(check)
else:
and_expr = checks.AndCheck(check1, check)
return [('or_expr', or_expr.add_check(and_expr))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding another check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
@reducer('and_expr', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'
Join two checks by the 'or' operator.
"""
return [('or_expr', checks.OrCheck(check1, check2))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding another check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of a check."""
return [('check', checks.NotCheck(check))]
|
garenchan/policy | policy/_parser.py | Parser._parse_check | python | def _parse_check(self, rule):
# Handle the special constant-type checks
for check_cls in (checks.FalseCheck, checks.TrueCheck):
check = check_cls()
if rule == str(check):
return check
try:
kind, match = rule.split(':', 1)
except Exception:
if self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.exception('Failed to understand rule %r', rule)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
if kind in checks.registered_checks:
return checks.registered_checks[kind](kind, match)
elif None in checks.registered_checks:
return checks.registered_checks[None](kind, match)
elif self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.error('No handler for matches of kind %r', kind)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck() | Parse a single base check rule into an appropriate Check object. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L117-L145 | null | class Parser(metaclass=ParserMeta):
# used for tokenizing the policy language
_TOKENIZE_RE = re.compile(r'\s+')
def __init__(self, raise_error: bool):
self.raise_error = raise_error
# Internal states
self.tokens = []
self.values = []
def _reduce(self):
"""Perform a greedy reduction of token stream.
If a reducer method matches, it will be executed, then the
:meth:`reduce` method will be called recursively to search
for any more possible reductions.
"""
for reduction, methname in self.reducers:
token_num = len(reduction)
if (len(self.tokens) >= token_num and
self.tokens[-token_num:] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-token_num:])
self.tokens[-token_num:] = [r[0] for r in results]
self.values[-token_num:] = [r[1] for r in results]
# Check for any more reductions
return self._reduce()
def _shift(self, token, value):
self.tokens.append(token)
self.values.append(value)
self._reduce()
@property
def result(self):
"""Obtain the final result of the parse.
:return: check instance
:raises ValueError: If the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError('Could not parse rule')
return self.values[0]
def _parse_tokenize(self, rule):
"""Tokenizer for the policy language."""
for token in self._TOKENIZE_RE.split(rule):
# Skip empty tokens
if not token or token.isspace():
continue
# Handle leading parens on the token
clean = token.lstrip('(')
for i in range(len(token) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
token = clean
# Handle trailing parens on the token
clean = token.rstrip(')')
trail = len(token) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(token) >= 2 and ((token[0], token[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', token[1:-1]
else:
yield 'check', self._parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
def parse(self, rule: str):
"""Parses policy to tree.
Translate a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return checks.TrueCheck()
for token, value in self._parse_tokenize(rule):
self._shift(token, value)
try:
return self.result
except ValueError:
LOG.exception('Failed to understand rule %r', rule)
# Fail closed
return checks.FalseCheck()
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expression into a 'check' token"""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'
Join two checks by the 'and' operator.
"""
return [('and_expr', checks.AndCheck(check1, check2))]
@reducer('or_expr', 'and', 'check')
def _mix_or_and_expr(self, or_expr, _and, check):
"""Modify the case 'A or B and C'
AND operator's priority is higher than OR operator.
"""
or_expr, check1 = or_expr.pop_check()
if isinstance(check1, checks.AndCheck):
and_expr = check1
and_expr.add_check(check)
else:
and_expr = checks.AndCheck(check1, check)
return [('or_expr', or_expr.add_check(and_expr))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding another check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
@reducer('and_expr', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'
Join two checks by the 'or' operator.
"""
return [('or_expr', checks.OrCheck(check1, check2))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding another check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of a check."""
return [('check', checks.NotCheck(check))]
|
garenchan/policy | policy/_parser.py | Parser._parse_tokenize | python | def _parse_tokenize(self, rule):
for token in self._TOKENIZE_RE.split(rule):
# Skip empty tokens
if not token or token.isspace():
continue
# Handle leading parens on the token
clean = token.lstrip('(')
for i in range(len(token) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
token = clean
# Handle trailing parens on the token
clean = token.rstrip(')')
trail = len(token) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(token) >= 2 and ((token[0], token[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', token[1:-1]
else:
yield 'check', self._parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')' | Tokenizer for the policy language. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L147-L186 | null | class Parser(metaclass=ParserMeta):
# used for tokenizing the policy language
_TOKENIZE_RE = re.compile(r'\s+')
def __init__(self, raise_error: bool):
self.raise_error = raise_error
# Internal states
self.tokens = []
self.values = []
def _reduce(self):
"""Perform a greedy reduction of token stream.
If a reducer method matches, it will be executed, then the
:meth:`reduce` method will be called recursively to search
for any more possible reductions.
"""
for reduction, methname in self.reducers:
token_num = len(reduction)
if (len(self.tokens) >= token_num and
self.tokens[-token_num:] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-token_num:])
self.tokens[-token_num:] = [r[0] for r in results]
self.values[-token_num:] = [r[1] for r in results]
# Check for any more reductions
return self._reduce()
def _shift(self, token, value):
self.tokens.append(token)
self.values.append(value)
self._reduce()
@property
def result(self):
"""Obtain the final result of the parse.
:return: check instance
:raises ValueError: If the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError('Could not parse rule')
return self.values[0]
def _parse_check(self, rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special constant-type checks
for check_cls in (checks.FalseCheck, checks.TrueCheck):
check = check_cls()
if rule == str(check):
return check
try:
kind, match = rule.split(':', 1)
except Exception:
if self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.exception('Failed to understand rule %r', rule)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
if kind in checks.registered_checks:
return checks.registered_checks[kind](kind, match)
elif None in checks.registered_checks:
return checks.registered_checks[None](kind, match)
elif self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.error('No handler for matches of kind %r', kind)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
def parse(self, rule: str):
"""Parses policy to tree.
Translate a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return checks.TrueCheck()
for token, value in self._parse_tokenize(rule):
self._shift(token, value)
try:
return self.result
except ValueError:
LOG.exception('Failed to understand rule %r', rule)
# Fail closed
return checks.FalseCheck()
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expression into a 'check' token"""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'
Join two checks by the 'and' operator.
"""
return [('and_expr', checks.AndCheck(check1, check2))]
@reducer('or_expr', 'and', 'check')
def _mix_or_and_expr(self, or_expr, _and, check):
"""Modify the case 'A or B and C'
AND operator's priority is higher than OR operator.
"""
or_expr, check1 = or_expr.pop_check()
if isinstance(check1, checks.AndCheck):
and_expr = check1
and_expr.add_check(check)
else:
and_expr = checks.AndCheck(check1, check)
return [('or_expr', or_expr.add_check(and_expr))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding another check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
@reducer('and_expr', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'
Join two checks by the 'or' operator.
"""
return [('or_expr', checks.OrCheck(check1, check2))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding another check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of a check."""
return [('check', checks.NotCheck(check))]
|
garenchan/policy | policy/_parser.py | Parser.parse | python | def parse(self, rule: str):
# Empty rule means always accept
if not rule:
return checks.TrueCheck()
for token, value in self._parse_tokenize(rule):
self._shift(token, value)
try:
return self.result
except ValueError:
LOG.exception('Failed to understand rule %r', rule)
# Fail closed
return checks.FalseCheck() | Parses policy to tree.
Translate a policy written in the policy language into a tree of
Check objects. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L188-L207 | [
"def _shift(self, token, value):\n self.tokens.append(token)\n self.values.append(value)\n\n self._reduce()\n",
"def _parse_tokenize(self, rule):\n \"\"\"Tokenizer for the policy language.\"\"\"\n\n for token in self._TOKENIZE_RE.split(rule):\n # Skip empty tokens\n if not token or token.isspace():\n continue\n\n # Handle leading parens on the token\n clean = token.lstrip('(')\n for i in range(len(token) - len(clean)):\n yield '(', '('\n\n # If it was only parentheses, continue\n if not clean:\n continue\n else:\n token = clean\n\n # Handle trailing parens on the token\n clean = token.rstrip(')')\n trail = len(token) - len(clean)\n\n # Yield the cleaned token\n lowered = clean.lower()\n if lowered in ('and', 'or', 'not'):\n # Special tokens\n yield lowered, clean\n elif clean:\n # Not a special token, but not composed solely of ')'\n if len(token) >= 2 and ((token[0], token[-1]) in\n [('\"', '\"'), (\"'\", \"'\")]):\n # It's a quoted string\n yield 'string', token[1:-1]\n else:\n yield 'check', self._parse_check(clean)\n\n # Yield the trailing parens\n for i in range(trail):\n yield ')', ')'\n"
] | class Parser(metaclass=ParserMeta):
# used for tokenizing the policy language
_TOKENIZE_RE = re.compile(r'\s+')
def __init__(self, raise_error: bool):
self.raise_error = raise_error
# Internal states
self.tokens = []
self.values = []
def _reduce(self):
"""Perform a greedy reduction of token stream.
If a reducer method matches, it will be executed, then the
:meth:`reduce` method will be called recursively to search
for any more possible reductions.
"""
for reduction, methname in self.reducers:
token_num = len(reduction)
if (len(self.tokens) >= token_num and
self.tokens[-token_num:] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-token_num:])
self.tokens[-token_num:] = [r[0] for r in results]
self.values[-token_num:] = [r[1] for r in results]
# Check for any more reductions
return self._reduce()
def _shift(self, token, value):
self.tokens.append(token)
self.values.append(value)
self._reduce()
@property
def result(self):
"""Obtain the final result of the parse.
:return: check instance
:raises ValueError: If the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError('Could not parse rule')
return self.values[0]
def _parse_check(self, rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special constant-type checks
for check_cls in (checks.FalseCheck, checks.TrueCheck):
check = check_cls()
if rule == str(check):
return check
try:
kind, match = rule.split(':', 1)
except Exception:
if self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.exception('Failed to understand rule %r', rule)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
if kind in checks.registered_checks:
return checks.registered_checks[kind](kind, match)
elif None in checks.registered_checks:
return checks.registered_checks[None](kind, match)
elif self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.error('No handler for matches of kind %r', kind)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
def _parse_tokenize(self, rule):
"""Tokenizer for the policy language."""
for token in self._TOKENIZE_RE.split(rule):
# Skip empty tokens
if not token or token.isspace():
continue
# Handle leading parens on the token
clean = token.lstrip('(')
for i in range(len(token) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
token = clean
# Handle trailing parens on the token
clean = token.rstrip(')')
trail = len(token) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(token) >= 2 and ((token[0], token[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', token[1:-1]
else:
yield 'check', self._parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expression into a 'check' token"""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'
Join two checks by the 'and' operator.
"""
return [('and_expr', checks.AndCheck(check1, check2))]
@reducer('or_expr', 'and', 'check')
def _mix_or_and_expr(self, or_expr, _and, check):
"""Modify the case 'A or B and C'
AND operator's priority is higher than OR operator.
"""
or_expr, check1 = or_expr.pop_check()
if isinstance(check1, checks.AndCheck):
and_expr = check1
and_expr.add_check(check)
else:
and_expr = checks.AndCheck(check1, check)
return [('or_expr', or_expr.add_check(and_expr))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding another check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
@reducer('and_expr', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'
Join two checks by the 'or' operator.
"""
return [('or_expr', checks.OrCheck(check1, check2))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding another check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of a check."""
return [('check', checks.NotCheck(check))]
|
garenchan/policy | policy/_parser.py | Parser._mix_or_and_expr | python | def _mix_or_and_expr(self, or_expr, _and, check):
or_expr, check1 = or_expr.pop_check()
if isinstance(check1, checks.AndCheck):
and_expr = check1
and_expr.add_check(check)
else:
and_expr = checks.AndCheck(check1, check)
return [('or_expr', or_expr.add_check(and_expr))] | Modify the case 'A or B and C'
AND operator's priority is higher than OR operator. | train | https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/_parser.py#L226-L238 | null | class Parser(metaclass=ParserMeta):
# used for tokenizing the policy language
_TOKENIZE_RE = re.compile(r'\s+')
def __init__(self, raise_error: bool):
self.raise_error = raise_error
# Internal states
self.tokens = []
self.values = []
def _reduce(self):
"""Perform a greedy reduction of token stream.
If a reducer method matches, it will be executed, then the
:meth:`reduce` method will be called recursively to search
for any more possible reductions.
"""
for reduction, methname in self.reducers:
token_num = len(reduction)
if (len(self.tokens) >= token_num and
self.tokens[-token_num:] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-token_num:])
self.tokens[-token_num:] = [r[0] for r in results]
self.values[-token_num:] = [r[1] for r in results]
# Check for any more reductions
return self._reduce()
def _shift(self, token, value):
self.tokens.append(token)
self.values.append(value)
self._reduce()
@property
def result(self):
"""Obtain the final result of the parse.
:return: check instance
:raises ValueError: If the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError('Could not parse rule')
return self.values[0]
def _parse_check(self, rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special constant-type checks
for check_cls in (checks.FalseCheck, checks.TrueCheck):
check = check_cls()
if rule == str(check):
return check
try:
kind, match = rule.split(':', 1)
except Exception:
if self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.exception('Failed to understand rule %r', rule)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
if kind in checks.registered_checks:
return checks.registered_checks[kind](kind, match)
elif None in checks.registered_checks:
return checks.registered_checks[None](kind, match)
elif self.raise_error:
raise InvalidRuleException(rule)
else:
LOG.error('No handler for matches of kind %r', kind)
# If the rule is invalid, we'll fail closed
return checks.FalseCheck()
def _parse_tokenize(self, rule):
"""Tokenizer for the policy language."""
for token in self._TOKENIZE_RE.split(rule):
# Skip empty tokens
if not token or token.isspace():
continue
# Handle leading parens on the token
clean = token.lstrip('(')
for i in range(len(token) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
token = clean
# Handle trailing parens on the token
clean = token.rstrip(')')
trail = len(token) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(token) >= 2 and ((token[0], token[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', token[1:-1]
else:
yield 'check', self._parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
def parse(self, rule: str):
"""Parses policy to tree.
Translate a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return checks.TrueCheck()
for token, value in self._parse_tokenize(rule):
self._shift(token, value)
try:
return self.result
except ValueError:
LOG.exception('Failed to understand rule %r', rule)
# Fail closed
return checks.FalseCheck()
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expression into a 'check' token"""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'
Join two checks by the 'and' operator.
"""
return [('and_expr', checks.AndCheck(check1, check2))]
@reducer('or_expr', 'and', 'check')
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding another check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
@reducer('and_expr', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'
Join two checks by the 'or' operator.
"""
return [('or_expr', checks.OrCheck(check1, check2))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding another check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of a check."""
return [('check', checks.NotCheck(check))]
|
marrow/util | marrow/util/context/cwd.py | pcwd | python | def pcwd(func):
@wraps(func)
def inner(*args, **kw):
with PreserveWorkingDirectory():
return func(*args, **kw)
return inner | A decorator to provide the functionality of the
PreserveWorkingDirectory context manager for functions and methods. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/context/cwd.py#L32-L41 | null | # encoding: utf-8
import os
from contextlib import contextmanager
from functools import wraps
__all__ = ['PreserveWorkingDirectory', 'pcwd']
class PreserveWorkingDirectory(object):
"""A context manager to preserve the current working directory.
Additionally, if @nwd@ is provided, preserve the current working
directory and change to @nwd@.
"""
def __init__(self, nwd=None):
self.cwd = None
self.nwd = nwd
def __enter__(self):
self.cwd = os.getcwd()
if self.nwd: os.chdir(self.nwd)
def __exit__(self, type, value, traceback):
os.chdir(self.cwd)
def pcwd(func):
"""A decorator to provide the functionality of the
PreserveWorkingDirectory context manager for functions and methods."""
@wraps(func)
def inner(*args, **kw):
with PreserveWorkingDirectory():
return func(*args, **kw)
return inner
|
marrow/util | marrow/util/context/__init__.py | cd | python | def cd(path, on=os):
original = on.getcwd()
on.chdir(path)
yield
on.chdir(original) | Change the current working directory within this context.
Preserves the previous working directory and can be applied to remote
connections that offer @getcwd@ and @chdir@ methods using the @on@
argument. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/context/__init__.py#L13-L26 | null | # encoding: utf-8
import os
from contextlib import contextmanager
__all__ = ['cd', 'path']
@contextmanager
def cd(path, on=os):
"""Change the current working directory within this context.
Preserves the previous working directory and can be applied to remote
connections that offer @getcwd@ and @chdir@ methods using the @on@
argument.
"""
original = on.getcwd()
on.chdir(path)
yield
on.chdir(original)
@contextmanager
def path(append=None, prepend=None, replace=None, on=os):
"""Update the PATH environment variable.
Can append, prepend, or replace the path. Each of these expects a string
or a list of strings (for multiple path elements) and can operate on remote
connections that offer an @environ@ attribute using the @on@ argument.
"""
original = on.environ['PATH']
if replace and (append or prepend):
raise ValueError("You can not combine append or prepend with replace.")
if replace:
if not isinstance(replace, list):
replace = list(replace)
on.environ['PATH'] = ':'.join(replace)
else:
if append:
if not isinstance(append, list):
append = list(append)
append.insert(0, on.environ['PATH'])
on.environ['PATH'] = ':'.join(append)
if prepend:
if not isinstance(prepend, list):
prepend = list(prepend)
prepend.append(on.environ['PATH'])
on.environ['PATH'] = ':'.join(prepend)
yield
on.environ['PATH'] = original
@contextmanager
def environ(on=os, **kw):
"""Update one or more environment variables.
Preserves the previous environment variable (if available) and can be
applied to remote connections that offer an @environ@ attribute using the
@on@ argument.
"""
originals = list()
for key in kw:
originals.append((key, on.environ.get(key, None)))
on.environ[key] = kw[key]
yield
for key, value in originals:
if not value:
del on.environ[key]
continue
on.environ[key] = value
|
marrow/util | marrow/util/context/__init__.py | path | python | def path(append=None, prepend=None, replace=None, on=os):
original = on.environ['PATH']
if replace and (append or prepend):
raise ValueError("You can not combine append or prepend with replace.")
if replace:
if not isinstance(replace, list):
replace = list(replace)
on.environ['PATH'] = ':'.join(replace)
else:
if append:
if not isinstance(append, list):
append = list(append)
append.insert(0, on.environ['PATH'])
on.environ['PATH'] = ':'.join(append)
if prepend:
if not isinstance(prepend, list):
prepend = list(prepend)
prepend.append(on.environ['PATH'])
on.environ['PATH'] = ':'.join(prepend)
yield
on.environ['PATH'] = original | Update the PATH environment variable.
Can append, prepend, or replace the path. Each of these expects a string
or a list of strings (for multiple path elements) and can operate on remote
connections that offer an @environ@ attribute using the @on@ argument. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/context/__init__.py#L30-L66 | null | # encoding: utf-8
import os
from contextlib import contextmanager
__all__ = ['cd', 'path']
@contextmanager
def cd(path, on=os):
"""Change the current working directory within this context.
Preserves the previous working directory and can be applied to remote
connections that offer @getcwd@ and @chdir@ methods using the @on@
argument.
"""
original = on.getcwd()
on.chdir(path)
yield
on.chdir(original)
@contextmanager
def path(append=None, prepend=None, replace=None, on=os):
"""Update the PATH environment variable.
Can append, prepend, or replace the path. Each of these expects a string
or a list of strings (for multiple path elements) and can operate on remote
connections that offer an @environ@ attribute using the @on@ argument.
"""
original = on.environ['PATH']
if replace and (append or prepend):
raise ValueError("You can not combine append or prepend with replace.")
if replace:
if not isinstance(replace, list):
replace = list(replace)
on.environ['PATH'] = ':'.join(replace)
else:
if append:
if not isinstance(append, list):
append = list(append)
append.insert(0, on.environ['PATH'])
on.environ['PATH'] = ':'.join(append)
if prepend:
if not isinstance(prepend, list):
prepend = list(prepend)
prepend.append(on.environ['PATH'])
on.environ['PATH'] = ':'.join(prepend)
yield
on.environ['PATH'] = original
@contextmanager
def environ(on=os, **kw):
"""Update one or more environment variables.
Preserves the previous environment variable (if available) and can be
applied to remote connections that offer an @environ@ attribute using the
@on@ argument.
"""
originals = list()
for key in kw:
originals.append((key, on.environ.get(key, None)))
on.environ[key] = kw[key]
yield
for key, value in originals:
if not value:
del on.environ[key]
continue
on.environ[key] = value
|
marrow/util | marrow/util/context/__init__.py | environ | python | def environ(on=os, **kw):
originals = list()
for key in kw:
originals.append((key, on.environ.get(key, None)))
on.environ[key] = kw[key]
yield
for key, value in originals:
if not value:
del on.environ[key]
continue
on.environ[key] = value | Update one or more environment variables.
Preserves the previous environment variable (if available) and can be
applied to remote connections that offer an @environ@ attribute using the
@on@ argument. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/context/__init__.py#L70-L91 | null | # encoding: utf-8
import os
from contextlib import contextmanager
__all__ = ['cd', 'path']
@contextmanager
def cd(path, on=os):
"""Change the current working directory within this context.
Preserves the previous working directory and can be applied to remote
connections that offer @getcwd@ and @chdir@ methods using the @on@
argument.
"""
original = on.getcwd()
on.chdir(path)
yield
on.chdir(original)
@contextmanager
def path(append=None, prepend=None, replace=None, on=os):
"""Update the PATH environment variable.
Can append, prepend, or replace the path. Each of these expects a string
or a list of strings (for multiple path elements) and can operate on remote
connections that offer an @environ@ attribute using the @on@ argument.
"""
original = on.environ['PATH']
if replace and (append or prepend):
raise ValueError("You can not combine append or prepend with replace.")
if replace:
if not isinstance(replace, list):
replace = list(replace)
on.environ['PATH'] = ':'.join(replace)
else:
if append:
if not isinstance(append, list):
append = list(append)
append.insert(0, on.environ['PATH'])
on.environ['PATH'] = ':'.join(append)
if prepend:
if not isinstance(prepend, list):
prepend = list(prepend)
prepend.append(on.environ['PATH'])
on.environ['PATH'] = ':'.join(prepend)
yield
on.environ['PATH'] = original
@contextmanager
def environ(on=os, **kw):
"""Update one or more environment variables.
Preserves the previous environment variable (if available) and can be
applied to remote connections that offer an @environ@ attribute using the
@on@ argument.
"""
originals = list()
for key in kw:
originals.append((key, on.environ.get(key, None)))
on.environ[key] = kw[key]
yield
for key, value in originals:
if not value:
del on.environ[key]
continue
on.environ[key] = value
|
marrow/util | marrow/util/bunch.py | Bunch.partial | python | def partial(cls, prefix, source):
match = prefix + "."
matches = cls([(key[len(match):], source[key]) for key in source if key.startswith(match)])
if not matches:
raise ValueError()
return matches | Strip a prefix from the keys of another dictionary, returning a Bunch containing only valid key, value pairs. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/bunch.py#L41-L50 | null | class Bunch(dict):
"""A dictionary with attribute-style access. It maps attribute access to the real dictionary."""
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, super(Bunch, self).__repr__())
def __getattr__(self, name):
try:
value = self[name]
return Bunch(value) if isinstance(value, dict) else value
except KeyError:
try:
return self.partial(name, self)
except ValueError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
def __delattr__(self, name):
try:
del self[name]
except KeyError:
raise AttributeError(name)
@classmethod
def partial(cls, prefix, source):
"""Strip a prefix from the keys of another dictionary, returning a Bunch containing only valid key, value pairs."""
match = prefix + "."
matches = cls([(key[len(match):], source[key]) for key in source if key.startswith(match)])
if not matches:
raise ValueError()
return matches
|
marrow/util | marrow/util/pipe.py | pipe | python | def pipe():
try:
from os import pipe
return pipe()
except:
pipe = Pipe()
return pipe.reader_fd, pipe.writer_fd | Return the optimum pipe implementation for the capabilities of the active system. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/pipe.py#L63-L72 | null | # encoding: utf-8
__all__ = ['Pipe', 'pipe']
class Pipe(object):
"""An OS independent asynchronous pipe."""
def __init__(self):
# Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
self.writer = socket.socket()
self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while 1:
count += 1
a = socket.socket()
a.bind(("127.0.0.1", 0))
connect_address = a.getsockname()
a.listen(1)
try:
self.writer.connect(connect_address)
break
except socket.error:
detail = exception().exception
if detail[0] != errno.WSAEADDRINUSE:
raise
if count >= 10:
a.close()
self.writer.close()
raise socket.error("Cannot bind trigger!")
a.close()
self.reader, addr = a.accept()
self.reader.setblocking(0)
self.writer.setblocking(0)
a.close()
self.writer_fd = self.writer.fileno()
self.reader_fd = self.reader.fileno()
def read(self):
"""Emulate a file descriptors read method"""
try:
return self.reader.recv(1)
except socket.error:
ex = exception().exception
if ex.args[0] == errno.EWOULDBLOCK:
raise IOError
raise
def write(self, data):
"""Emulate a file descriptors write method"""
return self.writer.send(data)
def pipe():
"""Return the optimum pipe implementation for the capabilities of the active system."""
try:
from os import pipe
return pipe()
except:
pipe = Pipe()
return pipe.reader_fd, pipe.writer_fd
|
marrow/util | marrow/util/pipe.py | Pipe.read | python | def read(self):
try:
return self.reader.recv(1)
except socket.error:
ex = exception().exception
if ex.args[0] == errno.EWOULDBLOCK:
raise IOError
raise | Emulate a file descriptors read method | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/pipe.py#L48-L56 | null | class Pipe(object):
"""An OS independent asynchronous pipe."""
def __init__(self):
# Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
self.writer = socket.socket()
self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while 1:
count += 1
a = socket.socket()
a.bind(("127.0.0.1", 0))
connect_address = a.getsockname()
a.listen(1)
try:
self.writer.connect(connect_address)
break
except socket.error:
detail = exception().exception
if detail[0] != errno.WSAEADDRINUSE:
raise
if count >= 10:
a.close()
self.writer.close()
raise socket.error("Cannot bind trigger!")
a.close()
self.reader, addr = a.accept()
self.reader.setblocking(0)
self.writer.setblocking(0)
a.close()
self.writer_fd = self.writer.fileno()
self.reader_fd = self.reader.fileno()
def write(self, data):
"""Emulate a file descriptors write method"""
return self.writer.send(data)
|
marrow/util | marrow/util/text.py | ellipsis | python | def ellipsis(text, length, symbol="..."):
if len(text) > length:
pos = text.rfind(" ", 0, length)
if pos < 0:
return text[:length].rstrip(".") + symbol
else:
return text[:pos].rstrip(".") + symbol
else:
return text | Present a block of text of given length.
If the length of available text exceeds the requested length, truncate and
intelligently append an ellipsis. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/text.py#L26-L40 | null | # encoding: utf-8
"""Text processing helper functions."""
import re
__all__ = ['normalize', 'ellipsis', 'wrap']
NORMALIZE_EXPRESSION = re.compile('\W+')
def normalize(name, collection=[], replacement='-'):
base = NORMALIZE_EXPRESSION.sub(replacement, name.lower())
suffix = 0
while True:
if ("%s%s" % (base.strip(replacement), ("%s%d" % (replacement, suffix)) if suffix else "")) not in collection: break
suffix += 1
return ("%s%s" % (base.strip(replacement), ("%s%d" % (replacement, suffix)) if suffix else ""))
def ellipsis(text, length, symbol="..."):
"""Present a block of text of given length.
If the length of available text exceeds the requested length, truncate and
intelligently append an ellipsis.
"""
if len(text) > length:
pos = text.rfind(" ", 0, length)
if pos < 0:
return text[:length].rstrip(".") + symbol
else:
return text[:pos].rstrip(".") + symbol
else:
return text
def wrap(text, columns=78):
from textwrap import wrap
lines = []
for iline in text.splitlines():
if not iline:
lines.append(iline)
else:
for oline in wrap(iline, columns):
lines.append(oline)
return "\n".join(lines)
def rewrap(text, columns=78):
lines = []
if isinstance(text, list):
in_paragraph = False
for line in text:
if not line:
in_paragraph = False
lines.append(line)
continue
if in_paragraph:
lines[-1] = lines[-1] + ' ' + line
continue
lines.append(line)
in_paragraph = True
text = "\n".join(lines)
lines = []
in_paragraph = True
for iline in text.splitlines():
if not iline:
lines.append(iline)
else:
for oline in wrap_(iline, columns):
lines.append(oline)
return "\n".join(lines)
|
marrow/util | marrow/util/object.py | flatten | python | def flatten(x):
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, (binary, unicode)):
for els in flatten(el):
yield els
else:
yield el | flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10] | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/object.py#L15-L34 | [
"def flatten(x):\n \"\"\"flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n from the sequence and all recursively contained sub-sequences\n (iterables).\n\n Examples:\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\n \"\"\"\n\n for el in x:\n if hasattr(el, \"__iter__\") and not isinstance(el, (binary, unicode)):\n for els in flatten(el):\n yield els\n else:\n yield el\n"
] | # encoding: utf-8
"""Object instance and class helper functions."""
import logging
import inspect
import pkg_resources
from collections import defaultdict
from functools import partial
from marrow.util.compat import binary, unicode
def yield_property(iterable, name, default=None):
for i in iterable: yield getattr(i, name, default)
def yield_keyvalue(iterable, key, default=None):
for i in iterable: yield i[key] if key in iterable else default
class _NoDefault(object):
pass
NoDefault = _NoDefault()
def merge(s, t):
"""Merge dictionary t into s."""
for k, v in t.items():
if isinstance(v, dict):
if k not in s:
s[k] = v
continue
s[k] = merge(s[k], v)
continue
s[k] = v
return s
def load_object(target, namespace=None):
"""This helper function loads an object identified by a dotted-notation string.
For example:
# Load class Foo from example.objects
load_object('example.objects:Foo')
If a plugin namespace is provided simple name references are allowed. For example:
# Load the plugin named 'routing' from the 'web.dispatch' namespace
load_object('routing', 'web.dispatch')
Providing a namespace does not prevent full object lookup (dot-colon notation) from working.
"""
if namespace and ':' not in target:
allowable = dict((i.name, i) for i in pkg_resources.iter_entry_points(namespace))
if target not in allowable:
raise ValueError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable))
return allowable[target].load()
parts, target = target.split(':') if ':' in target else (target, None)
module = __import__(parts)
for part in parts.split('.')[1:] + ([target] if target else []):
module = getattr(module, part)
return module
class PluginCache(defaultdict):
"""Lazily load plugins from the given namespace."""
def __init__(self, namespace):
super(PluginCache, self).__init__()
self.namespace = namespace
def __missing__(self, key):
return load_object(key, self.namespace)
class Cache(dict):
"""A least-recently-used (LRU) cache.
Discards the least recently referenced object when full.
Based on Python Cookbook contributions from multiple sources:
* http://code.activestate.com/recipes/521871/
* http://code.activestate.com/recipes/498110/
* http://code.activestate.com/recipes/252524/
* http://code.activestate.com/recipes/498245/
And Genshi's LRUCache:
http://genshi.edgewall.org/browser/trunk/genshi/util.py
Warning: If memory cleanup is diabled this dictionary will leak.
"""
class CacheElement(object):
def __init__(self, key, value):
self.previous = self.next = None
self.key, self.value = key, value
def __repr__(self):
return repr(self.value)
def __init__(self, capacity):
super(Cache, self).__init__()
self.head = self.tail = None
self.capacity = capacity
def __iter__(self):
cur = self.head
while cur:
yield cur.key
cur = cur.next
def __getitem__(self, key):
element = super(Cache, self).__getitem__(key)
self._update(element)
return element.value
def __setitem__(self, key, value):
try:
element = super(Cache, self).__getitem__(key)
element.value = value
self._update(element)
except KeyError:
# Item doesn't exist, create a new wrapper element.
element = self.CacheElement(key, value)
super(Cache, self).__setitem__(key, element)
self._insert(element)
self._restrict()
def _insert(self, element):
element.previous, element.next = None, self.head
if self.head is not None:
self.head.previous = element
else:
self.tail = element
self.head = element
def _restrict(self):
while len(self) > self.capacity:
# element = super(Cache, self).get(self.tail.key)
del self[self.tail.key]
if self.tail != self.head:
self.tail = self.tail.previous
self.tail.next = None
else:
self.head = self.tail = None
def _update(self, element):
if self.head == element:
return
previous = element.previous
previous.next = element.next
if element.next is not None:
element.next.previous = previous
else:
self.tail = previous
element.previous, element.next = None, self.head
self.head.previous = self.head = element
class LoggingFile(object):
"""A write-only file-like object that redirects to the standard Python logging module."""
def __init__(self, logger=None, level=logging.ERROR):
logger = logger if logger else logging.getLogger('logfile')
self.logger = partial(logger.log, level)
def write(self, text):
self.logger(text)
def writelines(self, lines):
for line in lines:
self.logger(line)
def close(self, *args, **kw):
"""A no-op method used for several of the file-like object methods."""
pass
def next(self, *args, **kw):
"""An error-raising exception usedbfor several of the methods."""
raise IOError("Logging files can not be read.")
flush = close
read = next
readline = next
readlines = next
class CounterMeta(type):
'''
A simple meta class which adds a ``_counter`` attribute to the instances of
the classes it is used on. This counter is simply incremented for each new
instance.
'''
counter = 0
def __call__(self, *args, **kwargs):
instance = type.__call__(self, *args, **kwargs)
instance._counter = CounterMeta.counter
CounterMeta.counter += 1
return instance
def getargspec(obj):
"""An improved inspect.getargspec.
Has a slightly different return value from the default getargspec.
Returns a tuple of:
required, optional, args, kwargs
list, dict, bool, bool
Required is a list of required named arguments.
Optional is a dictionary mapping optional arguments to defaults.
Args and kwargs are True for the respective unlimited argument type.
"""
argnames, varargs, varkw, _defaults = None, None, None, None
if inspect.isfunction(obj) or inspect.ismethod(obj):
argnames, varargs, varkw, _defaults = inspect.getargspec(obj)
elif inspect.isclass(obj):
if inspect.ismethoddescriptor(obj.__init__):
argnames, varargs, varkw, _defaults = [], False, False, None
else:
argnames, varargs, varkw, _defaults = inspect.getargspec(obj.__init__)
elif hasattr(obj, '__call__'):
argnames, varargs, varkw, _defaults = inspect.getargspec(obj.__call__)
else:
raise TypeError("Object not callable?")
# Need test case to prove this is even possible.
# if (argnames, varargs, varkw, defaults) is (None, None, None, None):
# raise InspectionFailed()
if argnames and argnames[0] == 'self':
del argnames[0]
if _defaults is None:
_defaults = []
defaults = dict()
else:
# Create a mapping dictionary of defaults; this is slightly more useful.
defaults = dict()
_defaults = list(_defaults)
_defaults.reverse()
argnames.reverse()
for i, default in enumerate(_defaults):
defaults[argnames[i]] = default
argnames.reverse()
# del argnames[-len(_defaults):]
return argnames, defaults, True if varargs else False, True if varkw else False
class RichComparisonMixin(object):
def __eq__(self, other):
raise NotImplementedError("Equality not implemented")
def __lt__(self, other):
raise NotImplementedError("Less than not implemented")
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
|
marrow/util | marrow/util/object.py | merge | python | def merge(s, t):
for k, v in t.items():
if isinstance(v, dict):
if k not in s:
s[k] = v
continue
s[k] = merge(s[k], v)
continue
s[k] = v
return s | Merge dictionary t into s. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/object.py#L51-L65 | [
"def merge(s, t):\n \"\"\"Merge dictionary t into s.\"\"\"\n\n for k, v in t.items():\n if isinstance(v, dict):\n if k not in s:\n s[k] = v\n continue\n\n s[k] = merge(s[k], v)\n continue\n\n s[k] = v\n\n return s\n"
] | # encoding: utf-8
"""Object instance and class helper functions."""
import logging
import inspect
import pkg_resources
from collections import defaultdict
from functools import partial
from marrow.util.compat import binary, unicode
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
"""
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, (binary, unicode)):
for els in flatten(el):
yield els
else:
yield el
def yield_property(iterable, name, default=None):
for i in iterable: yield getattr(i, name, default)
def yield_keyvalue(iterable, key, default=None):
for i in iterable: yield i[key] if key in iterable else default
class _NoDefault(object):
pass
NoDefault = _NoDefault()
def load_object(target, namespace=None):
"""This helper function loads an object identified by a dotted-notation string.
For example:
# Load class Foo from example.objects
load_object('example.objects:Foo')
If a plugin namespace is provided simple name references are allowed. For example:
# Load the plugin named 'routing' from the 'web.dispatch' namespace
load_object('routing', 'web.dispatch')
Providing a namespace does not prevent full object lookup (dot-colon notation) from working.
"""
if namespace and ':' not in target:
allowable = dict((i.name, i) for i in pkg_resources.iter_entry_points(namespace))
if target not in allowable:
raise ValueError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable))
return allowable[target].load()
parts, target = target.split(':') if ':' in target else (target, None)
module = __import__(parts)
for part in parts.split('.')[1:] + ([target] if target else []):
module = getattr(module, part)
return module
class PluginCache(defaultdict):
"""Lazily load plugins from the given namespace."""
def __init__(self, namespace):
super(PluginCache, self).__init__()
self.namespace = namespace
def __missing__(self, key):
return load_object(key, self.namespace)
class Cache(dict):
"""A least-recently-used (LRU) cache.
Discards the least recently referenced object when full.
Based on Python Cookbook contributions from multiple sources:
* http://code.activestate.com/recipes/521871/
* http://code.activestate.com/recipes/498110/
* http://code.activestate.com/recipes/252524/
* http://code.activestate.com/recipes/498245/
And Genshi's LRUCache:
http://genshi.edgewall.org/browser/trunk/genshi/util.py
Warning: If memory cleanup is diabled this dictionary will leak.
"""
class CacheElement(object):
def __init__(self, key, value):
self.previous = self.next = None
self.key, self.value = key, value
def __repr__(self):
return repr(self.value)
def __init__(self, capacity):
super(Cache, self).__init__()
self.head = self.tail = None
self.capacity = capacity
def __iter__(self):
cur = self.head
while cur:
yield cur.key
cur = cur.next
def __getitem__(self, key):
element = super(Cache, self).__getitem__(key)
self._update(element)
return element.value
def __setitem__(self, key, value):
try:
element = super(Cache, self).__getitem__(key)
element.value = value
self._update(element)
except KeyError:
# Item doesn't exist, create a new wrapper element.
element = self.CacheElement(key, value)
super(Cache, self).__setitem__(key, element)
self._insert(element)
self._restrict()
def _insert(self, element):
element.previous, element.next = None, self.head
if self.head is not None:
self.head.previous = element
else:
self.tail = element
self.head = element
def _restrict(self):
while len(self) > self.capacity:
# element = super(Cache, self).get(self.tail.key)
del self[self.tail.key]
if self.tail != self.head:
self.tail = self.tail.previous
self.tail.next = None
else:
self.head = self.tail = None
def _update(self, element):
if self.head == element:
return
previous = element.previous
previous.next = element.next
if element.next is not None:
element.next.previous = previous
else:
self.tail = previous
element.previous, element.next = None, self.head
self.head.previous = self.head = element
class LoggingFile(object):
"""A write-only file-like object that redirects to the standard Python logging module."""
def __init__(self, logger=None, level=logging.ERROR):
logger = logger if logger else logging.getLogger('logfile')
self.logger = partial(logger.log, level)
def write(self, text):
self.logger(text)
def writelines(self, lines):
for line in lines:
self.logger(line)
def close(self, *args, **kw):
"""A no-op method used for several of the file-like object methods."""
pass
def next(self, *args, **kw):
"""An error-raising exception usedbfor several of the methods."""
raise IOError("Logging files can not be read.")
flush = close
read = next
readline = next
readlines = next
class CounterMeta(type):
'''
A simple meta class which adds a ``_counter`` attribute to the instances of
the classes it is used on. This counter is simply incremented for each new
instance.
'''
counter = 0
def __call__(self, *args, **kwargs):
instance = type.__call__(self, *args, **kwargs)
instance._counter = CounterMeta.counter
CounterMeta.counter += 1
return instance
def getargspec(obj):
"""An improved inspect.getargspec.
Has a slightly different return value from the default getargspec.
Returns a tuple of:
required, optional, args, kwargs
list, dict, bool, bool
Required is a list of required named arguments.
Optional is a dictionary mapping optional arguments to defaults.
Args and kwargs are True for the respective unlimited argument type.
"""
argnames, varargs, varkw, _defaults = None, None, None, None
if inspect.isfunction(obj) or inspect.ismethod(obj):
argnames, varargs, varkw, _defaults = inspect.getargspec(obj)
elif inspect.isclass(obj):
if inspect.ismethoddescriptor(obj.__init__):
argnames, varargs, varkw, _defaults = [], False, False, None
else:
argnames, varargs, varkw, _defaults = inspect.getargspec(obj.__init__)
elif hasattr(obj, '__call__'):
argnames, varargs, varkw, _defaults = inspect.getargspec(obj.__call__)
else:
raise TypeError("Object not callable?")
# Need test case to prove this is even possible.
# if (argnames, varargs, varkw, defaults) is (None, None, None, None):
# raise InspectionFailed()
if argnames and argnames[0] == 'self':
del argnames[0]
if _defaults is None:
_defaults = []
defaults = dict()
else:
# Create a mapping dictionary of defaults; this is slightly more useful.
defaults = dict()
_defaults = list(_defaults)
_defaults.reverse()
argnames.reverse()
for i, default in enumerate(_defaults):
defaults[argnames[i]] = default
argnames.reverse()
# del argnames[-len(_defaults):]
return argnames, defaults, True if varargs else False, True if varkw else False
class RichComparisonMixin(object):
def __eq__(self, other):
raise NotImplementedError("Equality not implemented")
def __lt__(self, other):
raise NotImplementedError("Less than not implemented")
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
|
marrow/util | marrow/util/object.py | load_object | python | def load_object(target, namespace=None):
if namespace and ':' not in target:
allowable = dict((i.name, i) for i in pkg_resources.iter_entry_points(namespace))
if target not in allowable:
raise ValueError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable))
return allowable[target].load()
parts, target = target.split(':') if ':' in target else (target, None)
module = __import__(parts)
for part in parts.split('.')[1:] + ([target] if target else []):
module = getattr(module, part)
return module | This helper function loads an object identified by a dotted-notation string.
For example:
# Load class Foo from example.objects
load_object('example.objects:Foo')
If a plugin namespace is provided simple name references are allowed. For example:
# Load the plugin named 'routing' from the 'web.dispatch' namespace
load_object('routing', 'web.dispatch')
Providing a namespace does not prevent full object lookup (dot-colon notation) from working. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/object.py#L68-L95 | null | # encoding: utf-8
"""Object instance and class helper functions."""
import logging
import inspect
import pkg_resources
from collections import defaultdict
from functools import partial
from marrow.util.compat import binary, unicode
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
"""
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, (binary, unicode)):
for els in flatten(el):
yield els
else:
yield el
def yield_property(iterable, name, default=None):
for i in iterable: yield getattr(i, name, default)
def yield_keyvalue(iterable, key, default=None):
for i in iterable: yield i[key] if key in iterable else default
class _NoDefault(object):
pass
NoDefault = _NoDefault()
def merge(s, t):
"""Merge dictionary t into s."""
for k, v in t.items():
if isinstance(v, dict):
if k not in s:
s[k] = v
continue
s[k] = merge(s[k], v)
continue
s[k] = v
return s
class PluginCache(defaultdict):
"""Lazily load plugins from the given namespace."""
def __init__(self, namespace):
super(PluginCache, self).__init__()
self.namespace = namespace
def __missing__(self, key):
return load_object(key, self.namespace)
class Cache(dict):
"""A least-recently-used (LRU) cache.
Discards the least recently referenced object when full.
Based on Python Cookbook contributions from multiple sources:
* http://code.activestate.com/recipes/521871/
* http://code.activestate.com/recipes/498110/
* http://code.activestate.com/recipes/252524/
* http://code.activestate.com/recipes/498245/
And Genshi's LRUCache:
http://genshi.edgewall.org/browser/trunk/genshi/util.py
Warning: If memory cleanup is diabled this dictionary will leak.
"""
class CacheElement(object):
def __init__(self, key, value):
self.previous = self.next = None
self.key, self.value = key, value
def __repr__(self):
return repr(self.value)
def __init__(self, capacity):
super(Cache, self).__init__()
self.head = self.tail = None
self.capacity = capacity
def __iter__(self):
cur = self.head
while cur:
yield cur.key
cur = cur.next
def __getitem__(self, key):
element = super(Cache, self).__getitem__(key)
self._update(element)
return element.value
def __setitem__(self, key, value):
try:
element = super(Cache, self).__getitem__(key)
element.value = value
self._update(element)
except KeyError:
# Item doesn't exist, create a new wrapper element.
element = self.CacheElement(key, value)
super(Cache, self).__setitem__(key, element)
self._insert(element)
self._restrict()
def _insert(self, element):
element.previous, element.next = None, self.head
if self.head is not None:
self.head.previous = element
else:
self.tail = element
self.head = element
def _restrict(self):
while len(self) > self.capacity:
# element = super(Cache, self).get(self.tail.key)
del self[self.tail.key]
if self.tail != self.head:
self.tail = self.tail.previous
self.tail.next = None
else:
self.head = self.tail = None
def _update(self, element):
if self.head == element:
return
previous = element.previous
previous.next = element.next
if element.next is not None:
element.next.previous = previous
else:
self.tail = previous
element.previous, element.next = None, self.head
self.head.previous = self.head = element
class LoggingFile(object):
"""A write-only file-like object that redirects to the standard Python logging module."""
def __init__(self, logger=None, level=logging.ERROR):
logger = logger if logger else logging.getLogger('logfile')
self.logger = partial(logger.log, level)
def write(self, text):
self.logger(text)
def writelines(self, lines):
for line in lines:
self.logger(line)
def close(self, *args, **kw):
"""A no-op method used for several of the file-like object methods."""
pass
def next(self, *args, **kw):
"""An error-raising exception usedbfor several of the methods."""
raise IOError("Logging files can not be read.")
flush = close
read = next
readline = next
readlines = next
class CounterMeta(type):
'''
A simple meta class which adds a ``_counter`` attribute to the instances of
the classes it is used on. This counter is simply incremented for each new
instance.
'''
counter = 0
def __call__(self, *args, **kwargs):
instance = type.__call__(self, *args, **kwargs)
instance._counter = CounterMeta.counter
CounterMeta.counter += 1
return instance
def getargspec(obj):
"""An improved inspect.getargspec.
Has a slightly different return value from the default getargspec.
Returns a tuple of:
required, optional, args, kwargs
list, dict, bool, bool
Required is a list of required named arguments.
Optional is a dictionary mapping optional arguments to defaults.
Args and kwargs are True for the respective unlimited argument type.
"""
argnames, varargs, varkw, _defaults = None, None, None, None
if inspect.isfunction(obj) or inspect.ismethod(obj):
argnames, varargs, varkw, _defaults = inspect.getargspec(obj)
elif inspect.isclass(obj):
if inspect.ismethoddescriptor(obj.__init__):
argnames, varargs, varkw, _defaults = [], False, False, None
else:
argnames, varargs, varkw, _defaults = inspect.getargspec(obj.__init__)
elif hasattr(obj, '__call__'):
argnames, varargs, varkw, _defaults = inspect.getargspec(obj.__call__)
else:
raise TypeError("Object not callable?")
# Need test case to prove this is even possible.
# if (argnames, varargs, varkw, defaults) is (None, None, None, None):
# raise InspectionFailed()
if argnames and argnames[0] == 'self':
del argnames[0]
if _defaults is None:
_defaults = []
defaults = dict()
else:
# Create a mapping dictionary of defaults; this is slightly more useful.
defaults = dict()
_defaults = list(_defaults)
_defaults.reverse()
argnames.reverse()
for i, default in enumerate(_defaults):
defaults[argnames[i]] = default
argnames.reverse()
# del argnames[-len(_defaults):]
return argnames, defaults, True if varargs else False, True if varkw else False
class RichComparisonMixin(object):
def __eq__(self, other):
raise NotImplementedError("Equality not implemented")
def __lt__(self, other):
raise NotImplementedError("Less than not implemented")
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
|
marrow/util | marrow/util/object.py | getargspec | python | def getargspec(obj):
argnames, varargs, varkw, _defaults = None, None, None, None
if inspect.isfunction(obj) or inspect.ismethod(obj):
argnames, varargs, varkw, _defaults = inspect.getargspec(obj)
elif inspect.isclass(obj):
if inspect.ismethoddescriptor(obj.__init__):
argnames, varargs, varkw, _defaults = [], False, False, None
else:
argnames, varargs, varkw, _defaults = inspect.getargspec(obj.__init__)
elif hasattr(obj, '__call__'):
argnames, varargs, varkw, _defaults = inspect.getargspec(obj.__call__)
else:
raise TypeError("Object not callable?")
# Need test case to prove this is even possible.
# if (argnames, varargs, varkw, defaults) is (None, None, None, None):
# raise InspectionFailed()
if argnames and argnames[0] == 'self':
del argnames[0]
if _defaults is None:
_defaults = []
defaults = dict()
else:
# Create a mapping dictionary of defaults; this is slightly more useful.
defaults = dict()
_defaults = list(_defaults)
_defaults.reverse()
argnames.reverse()
for i, default in enumerate(_defaults):
defaults[argnames[i]] = default
argnames.reverse()
# del argnames[-len(_defaults):]
return argnames, defaults, True if varargs else False, True if varkw else False | An improved inspect.getargspec.
Has a slightly different return value from the default getargspec.
Returns a tuple of:
required, optional, args, kwargs
list, dict, bool, bool
Required is a list of required named arguments.
Optional is a dictionary mapping optional arguments to defaults.
Args and kwargs are True for the respective unlimited argument type. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/object.py#L253-L308 | null | # encoding: utf-8
"""Object instance and class helper functions."""
import logging
import inspect
import pkg_resources
from collections import defaultdict
from functools import partial
from marrow.util.compat import binary, unicode
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
"""
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, (binary, unicode)):
for els in flatten(el):
yield els
else:
yield el
def yield_property(iterable, name, default=None):
for i in iterable: yield getattr(i, name, default)
def yield_keyvalue(iterable, key, default=None):
for i in iterable: yield i[key] if key in iterable else default
class _NoDefault(object):
pass
NoDefault = _NoDefault()
def merge(s, t):
"""Merge dictionary t into s."""
for k, v in t.items():
if isinstance(v, dict):
if k not in s:
s[k] = v
continue
s[k] = merge(s[k], v)
continue
s[k] = v
return s
def load_object(target, namespace=None):
"""This helper function loads an object identified by a dotted-notation string.
For example:
# Load class Foo from example.objects
load_object('example.objects:Foo')
If a plugin namespace is provided simple name references are allowed. For example:
# Load the plugin named 'routing' from the 'web.dispatch' namespace
load_object('routing', 'web.dispatch')
Providing a namespace does not prevent full object lookup (dot-colon notation) from working.
"""
if namespace and ':' not in target:
allowable = dict((i.name, i) for i in pkg_resources.iter_entry_points(namespace))
if target not in allowable:
raise ValueError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable))
return allowable[target].load()
parts, target = target.split(':') if ':' in target else (target, None)
module = __import__(parts)
for part in parts.split('.')[1:] + ([target] if target else []):
module = getattr(module, part)
return module
class PluginCache(defaultdict):
"""Lazily load plugins from the given namespace."""
def __init__(self, namespace):
super(PluginCache, self).__init__()
self.namespace = namespace
def __missing__(self, key):
return load_object(key, self.namespace)
class Cache(dict):
"""A least-recently-used (LRU) cache.
Discards the least recently referenced object when full.
Based on Python Cookbook contributions from multiple sources:
* http://code.activestate.com/recipes/521871/
* http://code.activestate.com/recipes/498110/
* http://code.activestate.com/recipes/252524/
* http://code.activestate.com/recipes/498245/
And Genshi's LRUCache:
http://genshi.edgewall.org/browser/trunk/genshi/util.py
Warning: If memory cleanup is diabled this dictionary will leak.
"""
class CacheElement(object):
def __init__(self, key, value):
self.previous = self.next = None
self.key, self.value = key, value
def __repr__(self):
return repr(self.value)
def __init__(self, capacity):
super(Cache, self).__init__()
self.head = self.tail = None
self.capacity = capacity
def __iter__(self):
cur = self.head
while cur:
yield cur.key
cur = cur.next
def __getitem__(self, key):
element = super(Cache, self).__getitem__(key)
self._update(element)
return element.value
def __setitem__(self, key, value):
try:
element = super(Cache, self).__getitem__(key)
element.value = value
self._update(element)
except KeyError:
# Item doesn't exist, create a new wrapper element.
element = self.CacheElement(key, value)
super(Cache, self).__setitem__(key, element)
self._insert(element)
self._restrict()
def _insert(self, element):
element.previous, element.next = None, self.head
if self.head is not None:
self.head.previous = element
else:
self.tail = element
self.head = element
def _restrict(self):
while len(self) > self.capacity:
# element = super(Cache, self).get(self.tail.key)
del self[self.tail.key]
if self.tail != self.head:
self.tail = self.tail.previous
self.tail.next = None
else:
self.head = self.tail = None
def _update(self, element):
if self.head == element:
return
previous = element.previous
previous.next = element.next
if element.next is not None:
element.next.previous = previous
else:
self.tail = previous
element.previous, element.next = None, self.head
self.head.previous = self.head = element
class LoggingFile(object):
"""A write-only file-like object that redirects to the standard Python logging module."""
def __init__(self, logger=None, level=logging.ERROR):
logger = logger if logger else logging.getLogger('logfile')
self.logger = partial(logger.log, level)
def write(self, text):
self.logger(text)
def writelines(self, lines):
for line in lines:
self.logger(line)
def close(self, *args, **kw):
"""A no-op method used for several of the file-like object methods."""
pass
def next(self, *args, **kw):
"""An error-raising exception usedbfor several of the methods."""
raise IOError("Logging files can not be read.")
flush = close
read = next
readline = next
readlines = next
class CounterMeta(type):
'''
A simple meta class which adds a ``_counter`` attribute to the instances of
the classes it is used on. This counter is simply incremented for each new
instance.
'''
counter = 0
def __call__(self, *args, **kwargs):
instance = type.__call__(self, *args, **kwargs)
instance._counter = CounterMeta.counter
CounterMeta.counter += 1
return instance
class RichComparisonMixin(object):
def __eq__(self, other):
raise NotImplementedError("Equality not implemented")
def __lt__(self, other):
raise NotImplementedError("Less than not implemented")
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
|
marrow/util | marrow/util/convert.py | boolean | python | def boolean(input):
try:
input = input.strip().lower()
except AttributeError:
return bool(input)
if input in ('yes', 'y', 'on', 'true', 't', '1'):
return True
if input in ('no', 'n', 'off', 'false', 'f', '0'):
return False
raise ValueError("Unable to convert {0!r} to a boolean value.".format(input)) | Convert the given input to a boolean value.
Intelligently handles boolean and non-string values, returning
as-is and passing to the bool builtin respectively.
This process is case-insensitive.
Acceptable values:
True
* yes
* y
* on
* true
* t
* 1
False
* no
* n
* off
* false
* f
* 0
:param input: the value to convert to a boolean
:type input: any
:returns: converted boolean value
:rtype: bool | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/convert.py#L16-L60 | null | # encoding: utf-8
"""Useful datatype converters."""
import re
import collections
import inspect
from marrow.util.compat import binary, unicode
__all__ = ['boolean', 'array', 'integer', 'number', 'KeywordProcessor', 'tags', 'terms']
def boolean(input):
"""Convert the given input to a boolean value.
Intelligently handles boolean and non-string values, returning
as-is and passing to the bool builtin respectively.
This process is case-insensitive.
Acceptable values:
True
* yes
* y
* on
* true
* t
* 1
False
* no
* n
* off
* false
* f
* 0
:param input: the value to convert to a boolean
:type input: any
:returns: converted boolean value
:rtype: bool
"""
try:
input = input.strip().lower()
except AttributeError:
return bool(input)
if input in ('yes', 'y', 'on', 'true', 't', '1'):
return True
if input in ('no', 'n', 'off', 'false', 'f', '0'):
return False
raise ValueError("Unable to convert {0!r} to a boolean value.".format(input))
def array(input, separator=',', strip=True, empty=False):
"""Convert the given input to a list.
Intelligently handles list and non-string values, returning
as-is and passing to the list builtin respectively.
The default optional keyword arguments allow for lists in the form::
"foo,bar, baz , diz" -> ['foo', 'bar', 'baz', 'diz']
For a far more advanced method of converting a string to a list of
values see :class:`KeywordProcessor`.
:param input: the value to convert to a list
:type input: any
:param separator: The character (or string) to use to split the
input. May be None to split on any whitespace.
:type separator: basestring or None
:param strip: If True, the values found by splitting will be stripped
of extraneous whitespace.
:type strip: bool
:param empty: If True, allow empty list items.
:type empty: bool
:returns: converted values as a list
:rtype: list
"""
if input is None:
return []
if isinstance(input, list):
if not empty:
return [i for i in input if i]
return input
if not isinstance(input, (binary, unicode)):
if not empty:
return [i for i in list(input) if i]
return list(input)
if not strip:
if not empty:
return [i for i in input.split(separator) if i]
return input.split(separator)
if not empty:
return [i for i in [i.strip() for i in input.split(separator)] if i]
return [i.strip() for i in input.split(separator)]
def integer(input):
"""Convert the given input to an integer value.
:param input: the value to convert to an integer
:type input: any
:returns: converted integer value
:rtype: int
"""
try:
return int(input)
except (TypeError, ValueError):
raise ValueError("Unable to convert {0!r} to an integer value.".format(input))
def number(input):
"""Convert the given input to a floating point or integer value.
In cases of ambiguity, integers will be prefered to floating point.
:param input: the value to convert to a number
:type input: any
:returns: converted integer value
:rtype: float or int
"""
try:
return int(input)
except (TypeError, ValueError):
pass
try:
return float(input)
except (TypeError, ValueError):
raise ValueError("Unable to convert {0!r} to a number.".format(input))
class KeywordProcessor(object):
"""Process user-supplied keywords, tags, or search terms.
This tries to be as flexible as possible while remaining efficient.
The vast majority of the work is done in the regular expression,
and the primary goal of this class is to generate the complex regular
expression.
Two example converters covering the most common cases are created for
you automatically as :data:`tags` and :data:`terms`.
"""
def __init__(self, separators=' \t', quotes="\"'", groups=[], group=False, normalize=None, sort=False, result=list):
"""Configure the processor.
:param separators: A list of acceptable separator characters. The first will be used for joins.
:type separator: list or string
:param quotes: Pass a list or tuple of allowable quotes. E.g. ["\"", "'"] or None to disable.
:param groups: Pass a string, list, or tuple of allowable prefixes. E.g. '+-' or None to disable.
:param group: Pass in the type you want to group by, e.g. list, tuple, or dict.
:param normalize: Pass a function which will normalize the results. E.g. lambda s: s.lower().strip(' \"')
:param sort: Sort the resulting list (or lists) alphabeticlly.
:param result: The return type. One of set, tuple, list.
If groups are defined, and group is not, the result will be a list/tuple/set of tuples, e.g. [('+', "foo"), ...]
"""
self.separators = separators = list(separators)
self.quotes = quotes = list(quotes) if quotes else []
self.pattern = ''.join((
('[\s%s]*' % (''.join(separators), )), # Trap possible leading space or separators.
'(',
('[%s]%s' % (''.join([i for i in list(groups) if i is not None]), '?' if None in groups else '')) if groups else '', # Pass groups=('+','-') to handle optional leading + or -.
''.join([(r'%s[^%s]+%s|' % (i, i, i)) for i in quotes]) if quotes else '', # Match any amount of text (that isn't a quote) inside quotes.
('[^%s]+' % (''.join(separators), )), # Match any amount of text that isn't whitespace.
')',
('[%s]*' % (''.join(separators), )), # Match possible separator character.
))
self.regex = re.compile(self.pattern)
self.groups = list(groups)
self.group = dict if group is True else group
self.normalize = normalize
self.sort = sort
self.result = result
def __call__(self, value):
if isinstance(value, (binary, unicode)):
return self.split(value)
return self.join(value)
def split(self, value):
if not isinstance(value, (binary, unicode)): raise TypeError("Invalid type for argument 'value'.")
matches = self.regex.findall(value)
if hasattr(self.normalize, '__call__'): matches = [self.normalize(i) for i in matches]
if self.sort: matches.sort()
if not self.groups: return self.result(matches)
groups = dict([(i, list()) for i in self.groups])
if None not in groups: groups[None] = list() # To prevent errors.
for i in matches:
if i[0] in self.groups:
groups[i[0]].append(i[1:])
else:
groups[None].append(i)
if self.group is dict: return groups
if self.group is False or self.group is None:
results = []
for group in self.groups:
results.extend([(group, match) for match in groups[group]])
return self.result(results)
return self.group([[match for match in groups[group]] for group in self.groups])
def join(self, values):
def sanatize(keyword):
if not self.quotes:
return keyword
for sep in self.separators:
if sep in keyword:
return self.quotes[0] + keyword + self.quotes[0]
return keyword
if self.group is dict:
if not isinstance(values, dict):
raise ValueError("Dictionary grouped values must be passed as a dictionary.") # pragma: no cover
return self.separators[0].join([(prefix + sanatize(keyword)) for prefix, keywords in values for keyword in values[prefix]])
if not isinstance(values, (list, tuple, set)):
raise ValueError("Ungrouped values must be passed as a list, tuple, or set.")
return self.separators[0].join([sanatize(keyword) for keyword in values])
tags = KeywordProcessor(' \t,', normalize=lambda s: s.lower().strip('"'), sort=True, result=set)
tags.__doc__ = 'A lowercase-normalized ungrouped tagset processor, returning only unique tags.'
terms = KeywordProcessor(groups=[None, '+', '-'], group=tuple)
terms.__doc__ = 'A search keyword processor which retains quotes and groups into a dictionary of lists.'
|
marrow/util | marrow/util/convert.py | array | python | def array(input, separator=',', strip=True, empty=False):
if input is None:
return []
if isinstance(input, list):
if not empty:
return [i for i in input if i]
return input
if not isinstance(input, (binary, unicode)):
if not empty:
return [i for i in list(input) if i]
return list(input)
if not strip:
if not empty:
return [i for i in input.split(separator) if i]
return input.split(separator)
if not empty:
return [i for i in [i.strip() for i in input.split(separator)] if i]
return [i.strip() for i in input.split(separator)] | Convert the given input to a list.
Intelligently handles list and non-string values, returning
as-is and passing to the list builtin respectively.
The default optional keyword arguments allow for lists in the form::
"foo,bar, baz , diz" -> ['foo', 'bar', 'baz', 'diz']
For a far more advanced method of converting a string to a list of
values see :class:`KeywordProcessor`.
:param input: the value to convert to a list
:type input: any
:param separator: The character (or string) to use to split the
input. May be None to split on any whitespace.
:type separator: basestring or None
:param strip: If True, the values found by splitting will be stripped
of extraneous whitespace.
:type strip: bool
:param empty: If True, allow empty list items.
:type empty: bool
:returns: converted values as a list
:rtype: list | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/convert.py#L63-L118 | null | # encoding: utf-8
"""Useful datatype converters."""
import re
import collections
import inspect
from marrow.util.compat import binary, unicode
__all__ = ['boolean', 'array', 'integer', 'number', 'KeywordProcessor', 'tags', 'terms']
def boolean(input):
"""Convert the given input to a boolean value.
Intelligently handles boolean and non-string values, returning
as-is and passing to the bool builtin respectively.
This process is case-insensitive.
Acceptable values:
True
* yes
* y
* on
* true
* t
* 1
False
* no
* n
* off
* false
* f
* 0
:param input: the value to convert to a boolean
:type input: any
:returns: converted boolean value
:rtype: bool
"""
try:
input = input.strip().lower()
except AttributeError:
return bool(input)
if input in ('yes', 'y', 'on', 'true', 't', '1'):
return True
if input in ('no', 'n', 'off', 'false', 'f', '0'):
return False
raise ValueError("Unable to convert {0!r} to a boolean value.".format(input))
def array(input, separator=',', strip=True, empty=False):
"""Convert the given input to a list.
Intelligently handles list and non-string values, returning
as-is and passing to the list builtin respectively.
The default optional keyword arguments allow for lists in the form::
"foo,bar, baz , diz" -> ['foo', 'bar', 'baz', 'diz']
For a far more advanced method of converting a string to a list of
values see :class:`KeywordProcessor`.
:param input: the value to convert to a list
:type input: any
:param separator: The character (or string) to use to split the
input. May be None to split on any whitespace.
:type separator: basestring or None
:param strip: If True, the values found by splitting will be stripped
of extraneous whitespace.
:type strip: bool
:param empty: If True, allow empty list items.
:type empty: bool
:returns: converted values as a list
:rtype: list
"""
if input is None:
return []
if isinstance(input, list):
if not empty:
return [i for i in input if i]
return input
if not isinstance(input, (binary, unicode)):
if not empty:
return [i for i in list(input) if i]
return list(input)
if not strip:
if not empty:
return [i for i in input.split(separator) if i]
return input.split(separator)
if not empty:
return [i for i in [i.strip() for i in input.split(separator)] if i]
return [i.strip() for i in input.split(separator)]
def integer(input):
"""Convert the given input to an integer value.
:param input: the value to convert to an integer
:type input: any
:returns: converted integer value
:rtype: int
"""
try:
return int(input)
except (TypeError, ValueError):
raise ValueError("Unable to convert {0!r} to an integer value.".format(input))
def number(input):
"""Convert the given input to a floating point or integer value.
In cases of ambiguity, integers will be prefered to floating point.
:param input: the value to convert to a number
:type input: any
:returns: converted integer value
:rtype: float or int
"""
try:
return int(input)
except (TypeError, ValueError):
pass
try:
return float(input)
except (TypeError, ValueError):
raise ValueError("Unable to convert {0!r} to a number.".format(input))
class KeywordProcessor(object):
"""Process user-supplied keywords, tags, or search terms.
This tries to be as flexible as possible while remaining efficient.
The vast majority of the work is done in the regular expression,
and the primary goal of this class is to generate the complex regular
expression.
Two example converters covering the most common cases are created for
you automatically as :data:`tags` and :data:`terms`.
"""
def __init__(self, separators=' \t', quotes="\"'", groups=[], group=False, normalize=None, sort=False, result=list):
"""Configure the processor.
:param separators: A list of acceptable separator characters. The first will be used for joins.
:type separator: list or string
:param quotes: Pass a list or tuple of allowable quotes. E.g. ["\"", "'"] or None to disable.
:param groups: Pass a string, list, or tuple of allowable prefixes. E.g. '+-' or None to disable.
:param group: Pass in the type you want to group by, e.g. list, tuple, or dict.
:param normalize: Pass a function which will normalize the results. E.g. lambda s: s.lower().strip(' \"')
:param sort: Sort the resulting list (or lists) alphabeticlly.
:param result: The return type. One of set, tuple, list.
If groups are defined, and group is not, the result will be a list/tuple/set of tuples, e.g. [('+', "foo"), ...]
"""
self.separators = separators = list(separators)
self.quotes = quotes = list(quotes) if quotes else []
self.pattern = ''.join((
('[\s%s]*' % (''.join(separators), )), # Trap possible leading space or separators.
'(',
('[%s]%s' % (''.join([i for i in list(groups) if i is not None]), '?' if None in groups else '')) if groups else '', # Pass groups=('+','-') to handle optional leading + or -.
''.join([(r'%s[^%s]+%s|' % (i, i, i)) for i in quotes]) if quotes else '', # Match any amount of text (that isn't a quote) inside quotes.
('[^%s]+' % (''.join(separators), )), # Match any amount of text that isn't whitespace.
')',
('[%s]*' % (''.join(separators), )), # Match possible separator character.
))
self.regex = re.compile(self.pattern)
self.groups = list(groups)
self.group = dict if group is True else group
self.normalize = normalize
self.sort = sort
self.result = result
def __call__(self, value):
if isinstance(value, (binary, unicode)):
return self.split(value)
return self.join(value)
def split(self, value):
if not isinstance(value, (binary, unicode)): raise TypeError("Invalid type for argument 'value'.")
matches = self.regex.findall(value)
if hasattr(self.normalize, '__call__'): matches = [self.normalize(i) for i in matches]
if self.sort: matches.sort()
if not self.groups: return self.result(matches)
groups = dict([(i, list()) for i in self.groups])
if None not in groups: groups[None] = list() # To prevent errors.
for i in matches:
if i[0] in self.groups:
groups[i[0]].append(i[1:])
else:
groups[None].append(i)
if self.group is dict: return groups
if self.group is False or self.group is None:
results = []
for group in self.groups:
results.extend([(group, match) for match in groups[group]])
return self.result(results)
return self.group([[match for match in groups[group]] for group in self.groups])
def join(self, values):
def sanatize(keyword):
if not self.quotes:
return keyword
for sep in self.separators:
if sep in keyword:
return self.quotes[0] + keyword + self.quotes[0]
return keyword
if self.group is dict:
if not isinstance(values, dict):
raise ValueError("Dictionary grouped values must be passed as a dictionary.") # pragma: no cover
return self.separators[0].join([(prefix + sanatize(keyword)) for prefix, keywords in values for keyword in values[prefix]])
if not isinstance(values, (list, tuple, set)):
raise ValueError("Ungrouped values must be passed as a list, tuple, or set.")
return self.separators[0].join([sanatize(keyword) for keyword in values])
tags = KeywordProcessor(' \t,', normalize=lambda s: s.lower().strip('"'), sort=True, result=set)
tags.__doc__ = 'A lowercase-normalized ungrouped tagset processor, returning only unique tags.'
terms = KeywordProcessor(groups=[None, '+', '-'], group=tuple)
terms.__doc__ = 'A search keyword processor which retains quotes and groups into a dictionary of lists.'
|
marrow/util | marrow/util/convert.py | number | python | def number(input):
try:
return int(input)
except (TypeError, ValueError):
pass
try:
return float(input)
except (TypeError, ValueError):
raise ValueError("Unable to convert {0!r} to a number.".format(input)) | Convert the given input to a floating point or integer value.
In cases of ambiguity, integers will be prefered to floating point.
:param input: the value to convert to a number
:type input: any
:returns: converted integer value
:rtype: float or int | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/convert.py#L137-L157 | null | # encoding: utf-8
"""Useful datatype converters."""
import re
import collections
import inspect
from marrow.util.compat import binary, unicode
__all__ = ['boolean', 'array', 'integer', 'number', 'KeywordProcessor', 'tags', 'terms']
def boolean(input):
"""Convert the given input to a boolean value.
Intelligently handles boolean and non-string values, returning
as-is and passing to the bool builtin respectively.
This process is case-insensitive.
Acceptable values:
True
* yes
* y
* on
* true
* t
* 1
False
* no
* n
* off
* false
* f
* 0
:param input: the value to convert to a boolean
:type input: any
:returns: converted boolean value
:rtype: bool
"""
try:
input = input.strip().lower()
except AttributeError:
return bool(input)
if input in ('yes', 'y', 'on', 'true', 't', '1'):
return True
if input in ('no', 'n', 'off', 'false', 'f', '0'):
return False
raise ValueError("Unable to convert {0!r} to a boolean value.".format(input))
def array(input, separator=',', strip=True, empty=False):
"""Convert the given input to a list.
Intelligently handles list and non-string values, returning
as-is and passing to the list builtin respectively.
The default optional keyword arguments allow for lists in the form::
"foo,bar, baz , diz" -> ['foo', 'bar', 'baz', 'diz']
For a far more advanced method of converting a string to a list of
values see :class:`KeywordProcessor`.
:param input: the value to convert to a list
:type input: any
:param separator: The character (or string) to use to split the
input. May be None to split on any whitespace.
:type separator: basestring or None
:param strip: If True, the values found by splitting will be stripped
of extraneous whitespace.
:type strip: bool
:param empty: If True, allow empty list items.
:type empty: bool
:returns: converted values as a list
:rtype: list
"""
if input is None:
return []
if isinstance(input, list):
if not empty:
return [i for i in input if i]
return input
if not isinstance(input, (binary, unicode)):
if not empty:
return [i for i in list(input) if i]
return list(input)
if not strip:
if not empty:
return [i for i in input.split(separator) if i]
return input.split(separator)
if not empty:
return [i for i in [i.strip() for i in input.split(separator)] if i]
return [i.strip() for i in input.split(separator)]
def integer(input):
"""Convert the given input to an integer value.
:param input: the value to convert to an integer
:type input: any
:returns: converted integer value
:rtype: int
"""
try:
return int(input)
except (TypeError, ValueError):
raise ValueError("Unable to convert {0!r} to an integer value.".format(input))
def number(input):
"""Convert the given input to a floating point or integer value.
In cases of ambiguity, integers will be prefered to floating point.
:param input: the value to convert to a number
:type input: any
:returns: converted integer value
:rtype: float or int
"""
try:
return int(input)
except (TypeError, ValueError):
pass
try:
return float(input)
except (TypeError, ValueError):
raise ValueError("Unable to convert {0!r} to a number.".format(input))
class KeywordProcessor(object):
"""Process user-supplied keywords, tags, or search terms.
This tries to be as flexible as possible while remaining efficient.
The vast majority of the work is done in the regular expression,
and the primary goal of this class is to generate the complex regular
expression.
Two example converters covering the most common cases are created for
you automatically as :data:`tags` and :data:`terms`.
"""
def __init__(self, separators=' \t', quotes="\"'", groups=[], group=False, normalize=None, sort=False, result=list):
"""Configure the processor.
:param separators: A list of acceptable separator characters. The first will be used for joins.
:type separator: list or string
:param quotes: Pass a list or tuple of allowable quotes. E.g. ["\"", "'"] or None to disable.
:param groups: Pass a string, list, or tuple of allowable prefixes. E.g. '+-' or None to disable.
:param group: Pass in the type you want to group by, e.g. list, tuple, or dict.
:param normalize: Pass a function which will normalize the results. E.g. lambda s: s.lower().strip(' \"')
:param sort: Sort the resulting list (or lists) alphabeticlly.
:param result: The return type. One of set, tuple, list.
If groups are defined, and group is not, the result will be a list/tuple/set of tuples, e.g. [('+', "foo"), ...]
"""
self.separators = separators = list(separators)
self.quotes = quotes = list(quotes) if quotes else []
self.pattern = ''.join((
('[\s%s]*' % (''.join(separators), )), # Trap possible leading space or separators.
'(',
('[%s]%s' % (''.join([i for i in list(groups) if i is not None]), '?' if None in groups else '')) if groups else '', # Pass groups=('+','-') to handle optional leading + or -.
''.join([(r'%s[^%s]+%s|' % (i, i, i)) for i in quotes]) if quotes else '', # Match any amount of text (that isn't a quote) inside quotes.
('[^%s]+' % (''.join(separators), )), # Match any amount of text that isn't whitespace.
')',
('[%s]*' % (''.join(separators), )), # Match possible separator character.
))
self.regex = re.compile(self.pattern)
self.groups = list(groups)
self.group = dict if group is True else group
self.normalize = normalize
self.sort = sort
self.result = result
def __call__(self, value):
if isinstance(value, (binary, unicode)):
return self.split(value)
return self.join(value)
def split(self, value):
if not isinstance(value, (binary, unicode)): raise TypeError("Invalid type for argument 'value'.")
matches = self.regex.findall(value)
if hasattr(self.normalize, '__call__'): matches = [self.normalize(i) for i in matches]
if self.sort: matches.sort()
if not self.groups: return self.result(matches)
groups = dict([(i, list()) for i in self.groups])
if None not in groups: groups[None] = list() # To prevent errors.
for i in matches:
if i[0] in self.groups:
groups[i[0]].append(i[1:])
else:
groups[None].append(i)
if self.group is dict: return groups
if self.group is False or self.group is None:
results = []
for group in self.groups:
results.extend([(group, match) for match in groups[group]])
return self.result(results)
return self.group([[match for match in groups[group]] for group in self.groups])
def join(self, values):
def sanatize(keyword):
if not self.quotes:
return keyword
for sep in self.separators:
if sep in keyword:
return self.quotes[0] + keyword + self.quotes[0]
return keyword
if self.group is dict:
if not isinstance(values, dict):
raise ValueError("Dictionary grouped values must be passed as a dictionary.") # pragma: no cover
return self.separators[0].join([(prefix + sanatize(keyword)) for prefix, keywords in values for keyword in values[prefix]])
if not isinstance(values, (list, tuple, set)):
raise ValueError("Ungrouped values must be passed as a list, tuple, or set.")
return self.separators[0].join([sanatize(keyword) for keyword in values])
tags = KeywordProcessor(' \t,', normalize=lambda s: s.lower().strip('"'), sort=True, result=set)
tags.__doc__ = 'A lowercase-normalized ungrouped tagset processor, returning only unique tags.'
terms = KeywordProcessor(groups=[None, '+', '-'], group=tuple)
terms.__doc__ = 'A search keyword processor which retains quotes and groups into a dictionary of lists.'
|
marrow/util | marrow/util/tuple.py | NamedTuple._replace | python | def _replace(self, **kwds):
'Return a new NamedTuple object replacing specified fields with new values'
result = self._make(map(kwds.pop, self._fields, self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result | Return a new NamedTuple object replacing specified fields with new values | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/tuple.py#L63-L70 | null | class NamedTuple(tuple):
"""A tuple with attribute access.
When creating instances, later values can be omitted and default to None.
"""
__slots__ = ()
_fields = None # OVERRIDE THIS IN A SUBCLASS
def __new__(cls, *args, **kw):
if (len(args) + len(kw)) > len(cls._fields):
raise TypeError('Expected no more than %d arguments, got %d' % (len(cls._fields), len(args) + len(kw)))
values = list(args) + ([None] * (len(cls._fields) - len(args)))
try:
for i in kw:
values[cls._fields.index(i)] = kw[i]
except ValueError:
raise TypeError('Unknown attribute name %r' % (i, ))
return tuple.__new__(cls, tuple(values))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
result = new(cls, iterable)
if len(result) != len(cls._fields):
raise TypeError('Expected %d arguments, got %d' % (len(cls._fields), len(result)))
return result
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ", ".join([("%s=%r" % (i, j)) for i, j in zip(self._fields, self)]))
def as_dict(self):
return dict(zip(self._fields, self))
@classmethod
def from_dict(cls, d, new=tuple.__new__, len=len):
values = [None] * len(cls._fields)
try:
for i in d:
values[cls._fields.index(i)] = d[i]
except ValueError:
raise TypeError('Unknown attribute name %r' % (i, ))
return cls(*values)
def _replace(self, **kwds):
'Return a new NamedTuple object replacing specified fields with new values'
result = self._make(map(kwds.pop, self._fields, self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getattr__(self, name):
if name not in self._fields:
raise AttributeError('Unknown field name: %r' % name)
return self[self._fields.index(name)]
def __getitem__(self, name):
if type(name) is int:
return tuple.__getitem__(self, name)
return tuple.__getitem__(self, self._fields.index(name))
def keys(self):
return self._fields
# The following are pseudo set operations.
def __or__(self, other):
"""Combine tuples, with values from self overriding ones from other."""
if type(self) != type(other):
raise TypeError("Can not merge dissimilar types.")
data = other.as_dict()
for i in self._fields:
if self[i] is None:
continue
data[i] = self[i]
return self.__class__(**data)
# The following operations only work on fully numeric NamedTuple instances.
def __add__(self, other):
if type(self) != type(other):
raise TypeError("Can not add dissimilar types.")
v = []
for n in self._fields:
v.append(((self[n] or 0) + (other[n] or 0)) or None)
return self.__class__(*v)
def __neg__(self):
data = self.as_dict()
for i in data:
if data[i] is None:
continue
data[i] = -data[i]
return self.__class__(**data)
|
marrow/util | marrow/util/compat.py | exception | python | def exception(maxTBlevel=None):
try:
from marrow.util.bunch import Bunch
cls, exc, trbk = sys.exc_info()
excName = cls.__name__
excArgs = getattr(exc, 'args', None)
excTb = ''.join(traceback.format_exception(cls, exc, trbk, maxTBlevel))
return Bunch(
name=excName,
cls=cls,
exception=exc,
trace=trbk,
formatted=excTb,
args=excArgs
)
finally:
del cls, exc, trbk | Retrieve useful information about an exception.
Returns a bunch (attribute-access dict) with the following information:
* name: exception class name
* cls: the exception class
* exception: the exception instance
* trace: the traceback instance
* formatted: formatted traceback
* args: arguments to the exception instance
This functionality allows you to trap an exception in a method agnostic to
differences between Python 2.x and 3.x. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/compat.py#L55-L90 | null | # encoding: utf-8
"""Compatability features.
Python 2.5 is the minimum version supported by Marrow, and great effort is
being made to support Python 3.x.
"""
from __future__ import with_statement
import sys
import traceback
__all__ = ['formatdate', 'unquote', 'range', 'execfile', 'exception', 'binary',
'unicode', 'bytestring', 'native', 'unicodestr', 'uvalues', 'IO',
'parse_qsl']
if sys.version_info < (3, 0):
from email.utils import formatdate # DEPRECATE
from urllib import unquote_plus as unquote
from urlparse import parse_qsl
basestring = basestring
binary = bytes = str
unicode = unicode
range = xrange
execfile = execfile
else: # pragma: no cover
from email.utils import formatdate # DEPRECATE
from urllib.parse import unquote_plus as unquote_
from cgi import parse_qsl
basestring = str
binary = bytes = bytes
unicode = str
range = range
def execfile(filename, globals_=None, locals_=None):
if globals_ is None:
globals_ = globals()
if locals_ is None:
locals_ = globals_
exec(open(filename).read(), globals_, locals_)
def unquote(t):
"""Python 3 requires unquote to be passed unicode, but unicode
characters may be encoded using quoted bytes!
"""
return unquote_(t.decode('iso-8859-1')).encode('iso-8859-1')
def bytestring(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a given string into a bytestring."""
if isinstance(s, bytes):
return s
try:
return s.encode(encoding)
except UnicodeError:
return s.encode(fallback)
def native(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a given string into a native string."""
if isinstance(s, str):
return s
if str is unicode: # Python 3.x ->
return unicodestr(s, encoding, fallback)
return bytestring(s, encoding, fallback)
def unicodestr(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a string to unicode if it isn't already."""
if isinstance(s, unicode):
return s
try:
return s.decode(encoding)
except UnicodeError:
return s.decode(fallback)
def uvalues(a, encoding='utf-8', fallback='iso-8859-1'):
"""Return a list of decoded values from an iterator.
If any of the values fail to decode, re-decode all values using the
fallback.
"""
try:
return encoding, [s.decode(encoding) for s in a]
except UnicodeError:
return fallback, [s.decode(fallback) for s in a]
# In-memory binary stream representation for Python 2.5 or 2.6+.
if sys.version_info >= (2, 6):
from io import BytesIO as IO
else: # pragma: no cover
try:
from cStringIO import cStringIO as IO
except ImportError:
from StringIO import StringIO as IO
|
marrow/util | marrow/util/compat.py | native | python | def native(s, encoding='utf-8', fallback='iso-8859-1'):
if isinstance(s, str):
return s
if str is unicode: # Python 3.x ->
return unicodestr(s, encoding, fallback)
return bytestring(s, encoding, fallback) | Convert a given string into a native string. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/compat.py#L106-L115 | [
"def unicodestr(s, encoding='utf-8', fallback='iso-8859-1'):\n \"\"\"Convert a string to unicode if it isn't already.\"\"\"\n\n if isinstance(s, unicode):\n return s\n\n try:\n return s.decode(encoding)\n except UnicodeError:\n return s.decode(fallback)\n"
] | # encoding: utf-8
"""Compatability features.
Python 2.5 is the minimum version supported by Marrow, and great effort is
being made to support Python 3.x.
"""
from __future__ import with_statement
import sys
import traceback
__all__ = ['formatdate', 'unquote', 'range', 'execfile', 'exception', 'binary',
'unicode', 'bytestring', 'native', 'unicodestr', 'uvalues', 'IO',
'parse_qsl']
if sys.version_info < (3, 0):
from email.utils import formatdate # DEPRECATE
from urllib import unquote_plus as unquote
from urlparse import parse_qsl
basestring = basestring
binary = bytes = str
unicode = unicode
range = xrange
execfile = execfile
else: # pragma: no cover
from email.utils import formatdate # DEPRECATE
from urllib.parse import unquote_plus as unquote_
from cgi import parse_qsl
basestring = str
binary = bytes = bytes
unicode = str
range = range
def execfile(filename, globals_=None, locals_=None):
if globals_ is None:
globals_ = globals()
if locals_ is None:
locals_ = globals_
exec(open(filename).read(), globals_, locals_)
def unquote(t):
"""Python 3 requires unquote to be passed unicode, but unicode
characters may be encoded using quoted bytes!
"""
return unquote_(t.decode('iso-8859-1')).encode('iso-8859-1')
def exception(maxTBlevel=None):
"""Retrieve useful information about an exception.
Returns a bunch (attribute-access dict) with the following information:
* name: exception class name
* cls: the exception class
* exception: the exception instance
* trace: the traceback instance
* formatted: formatted traceback
* args: arguments to the exception instance
This functionality allows you to trap an exception in a method agnostic to
differences between Python 2.x and 3.x.
"""
try:
from marrow.util.bunch import Bunch
cls, exc, trbk = sys.exc_info()
excName = cls.__name__
excArgs = getattr(exc, 'args', None)
excTb = ''.join(traceback.format_exception(cls, exc, trbk, maxTBlevel))
return Bunch(
name=excName,
cls=cls,
exception=exc,
trace=trbk,
formatted=excTb,
args=excArgs
)
finally:
del cls, exc, trbk
def bytestring(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a given string into a bytestring."""
if isinstance(s, bytes):
return s
try:
return s.encode(encoding)
except UnicodeError:
return s.encode(fallback)
def unicodestr(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a string to unicode if it isn't already."""
if isinstance(s, unicode):
return s
try:
return s.decode(encoding)
except UnicodeError:
return s.decode(fallback)
def uvalues(a, encoding='utf-8', fallback='iso-8859-1'):
"""Return a list of decoded values from an iterator.
If any of the values fail to decode, re-decode all values using the
fallback.
"""
try:
return encoding, [s.decode(encoding) for s in a]
except UnicodeError:
return fallback, [s.decode(fallback) for s in a]
# In-memory binary stream representation for Python 2.5 or 2.6+.
if sys.version_info >= (2, 6):
from io import BytesIO as IO
else: # pragma: no cover
try:
from cStringIO import cStringIO as IO
except ImportError:
from StringIO import StringIO as IO
|
marrow/util | marrow/util/compat.py | unicodestr | python | def unicodestr(s, encoding='utf-8', fallback='iso-8859-1'):
if isinstance(s, unicode):
return s
try:
return s.decode(encoding)
except UnicodeError:
return s.decode(fallback) | Convert a string to unicode if it isn't already. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/compat.py#L118-L127 | null | # encoding: utf-8
"""Compatability features.
Python 2.5 is the minimum version supported by Marrow, and great effort is
being made to support Python 3.x.
"""
from __future__ import with_statement
import sys
import traceback
__all__ = ['formatdate', 'unquote', 'range', 'execfile', 'exception', 'binary',
'unicode', 'bytestring', 'native', 'unicodestr', 'uvalues', 'IO',
'parse_qsl']
if sys.version_info < (3, 0):
from email.utils import formatdate # DEPRECATE
from urllib import unquote_plus as unquote
from urlparse import parse_qsl
basestring = basestring
binary = bytes = str
unicode = unicode
range = xrange
execfile = execfile
else: # pragma: no cover
from email.utils import formatdate # DEPRECATE
from urllib.parse import unquote_plus as unquote_
from cgi import parse_qsl
basestring = str
binary = bytes = bytes
unicode = str
range = range
def execfile(filename, globals_=None, locals_=None):
if globals_ is None:
globals_ = globals()
if locals_ is None:
locals_ = globals_
exec(open(filename).read(), globals_, locals_)
def unquote(t):
"""Python 3 requires unquote to be passed unicode, but unicode
characters may be encoded using quoted bytes!
"""
return unquote_(t.decode('iso-8859-1')).encode('iso-8859-1')
def exception(maxTBlevel=None):
"""Retrieve useful information about an exception.
Returns a bunch (attribute-access dict) with the following information:
* name: exception class name
* cls: the exception class
* exception: the exception instance
* trace: the traceback instance
* formatted: formatted traceback
* args: arguments to the exception instance
This functionality allows you to trap an exception in a method agnostic to
differences between Python 2.x and 3.x.
"""
try:
from marrow.util.bunch import Bunch
cls, exc, trbk = sys.exc_info()
excName = cls.__name__
excArgs = getattr(exc, 'args', None)
excTb = ''.join(traceback.format_exception(cls, exc, trbk, maxTBlevel))
return Bunch(
name=excName,
cls=cls,
exception=exc,
trace=trbk,
formatted=excTb,
args=excArgs
)
finally:
del cls, exc, trbk
def bytestring(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a given string into a bytestring."""
if isinstance(s, bytes):
return s
try:
return s.encode(encoding)
except UnicodeError:
return s.encode(fallback)
def native(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a given string into a native string."""
if isinstance(s, str):
return s
if str is unicode: # Python 3.x ->
return unicodestr(s, encoding, fallback)
return bytestring(s, encoding, fallback)
def uvalues(a, encoding='utf-8', fallback='iso-8859-1'):
"""Return a list of decoded values from an iterator.
If any of the values fail to decode, re-decode all values using the
fallback.
"""
try:
return encoding, [s.decode(encoding) for s in a]
except UnicodeError:
return fallback, [s.decode(fallback) for s in a]
# In-memory binary stream representation for Python 2.5 or 2.6+.
if sys.version_info >= (2, 6):
from io import BytesIO as IO
else: # pragma: no cover
try:
from cStringIO import cStringIO as IO
except ImportError:
from StringIO import StringIO as IO
|
marrow/util | marrow/util/compat.py | uvalues | python | def uvalues(a, encoding='utf-8', fallback='iso-8859-1'):
try:
return encoding, [s.decode(encoding) for s in a]
except UnicodeError:
return fallback, [s.decode(fallback) for s in a] | Return a list of decoded values from an iterator.
If any of the values fail to decode, re-decode all values using the
fallback. | train | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/compat.py#L130-L141 | null | # encoding: utf-8
"""Compatability features.
Python 2.5 is the minimum version supported by Marrow, and great effort is
being made to support Python 3.x.
"""
from __future__ import with_statement
import sys
import traceback
__all__ = ['formatdate', 'unquote', 'range', 'execfile', 'exception', 'binary',
'unicode', 'bytestring', 'native', 'unicodestr', 'uvalues', 'IO',
'parse_qsl']
if sys.version_info < (3, 0):
from email.utils import formatdate # DEPRECATE
from urllib import unquote_plus as unquote
from urlparse import parse_qsl
basestring = basestring
binary = bytes = str
unicode = unicode
range = xrange
execfile = execfile
else: # pragma: no cover
from email.utils import formatdate # DEPRECATE
from urllib.parse import unquote_plus as unquote_
from cgi import parse_qsl
basestring = str
binary = bytes = bytes
unicode = str
range = range
def execfile(filename, globals_=None, locals_=None):
if globals_ is None:
globals_ = globals()
if locals_ is None:
locals_ = globals_
exec(open(filename).read(), globals_, locals_)
def unquote(t):
"""Python 3 requires unquote to be passed unicode, but unicode
characters may be encoded using quoted bytes!
"""
return unquote_(t.decode('iso-8859-1')).encode('iso-8859-1')
def exception(maxTBlevel=None):
"""Retrieve useful information about an exception.
Returns a bunch (attribute-access dict) with the following information:
* name: exception class name
* cls: the exception class
* exception: the exception instance
* trace: the traceback instance
* formatted: formatted traceback
* args: arguments to the exception instance
This functionality allows you to trap an exception in a method agnostic to
differences between Python 2.x and 3.x.
"""
try:
from marrow.util.bunch import Bunch
cls, exc, trbk = sys.exc_info()
excName = cls.__name__
excArgs = getattr(exc, 'args', None)
excTb = ''.join(traceback.format_exception(cls, exc, trbk, maxTBlevel))
return Bunch(
name=excName,
cls=cls,
exception=exc,
trace=trbk,
formatted=excTb,
args=excArgs
)
finally:
del cls, exc, trbk
def bytestring(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a given string into a bytestring."""
if isinstance(s, bytes):
return s
try:
return s.encode(encoding)
except UnicodeError:
return s.encode(fallback)
def native(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a given string into a native string."""
if isinstance(s, str):
return s
if str is unicode: # Python 3.x ->
return unicodestr(s, encoding, fallback)
return bytestring(s, encoding, fallback)
def unicodestr(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a string to unicode if it isn't already."""
if isinstance(s, unicode):
return s
try:
return s.decode(encoding)
except UnicodeError:
return s.decode(fallback)
# In-memory binary stream representation for Python 2.5 or 2.6+.
if sys.version_info >= (2, 6):
from io import BytesIO as IO
else: # pragma: no cover
try:
from cStringIO import cStringIO as IO
except ImportError:
from StringIO import StringIO as IO
|
tethysplatform/condorpy | condorpy/node.py | Node._get_all_descendants | python | def _get_all_descendants(self):
descendants = set()
descendants = descendants.union(self.child_nodes)
for child in self.child_nodes:
descendants = descendants.union(child._get_all_descendants())
if self in descendants:
log.error('circular dependancy found in %s. Descendants: %s ', self, descendants)
raise CircularDependency('Node %s contains itself in it\'s list of dependencies.' % (self.job.name,))
return descendants | traverses all descendants nodes
:raises: CircularDependency if self is contained in descendants
:return: a set containing all descendant nodes | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/node.py#L449-L463 | null | class Node(object):
"""
"""
def __init__(self, job,
parents=None,
children=None,
pre_script=None,
pre_script_args=None,
post_script=None,
post_script_args=None,
variables=None, # VARS JobName macroname="string" [macroname="string"... ]
priority=None, # PRIORITY JobName PriorityValue
category=None, # CATEGORY JobName CategoryName
retry=None, # JobName NumberOfRetries [UNLESS-EXIT value]
retry_unless_exit_value=None,
pre_skip=None, # JobName non-zero-exit-code
abort_dag_on=None, # JobName AbortExitValue [RETURN DAGReturnValue]
abort_dag_on_return_value=None,
dir=None,
noop=None,
done=None,
):
"""
Node constructor
Args:
job:
parents:
children:
...
"""
self.job = job
self._parent_nodes = parents or set()
self._link_parent_nodes()
self._child_nodes = children or set()
self._link_child_nodes()
self.pre_script = pre_script
self.pre_script_args = pre_script_args
self.post_script = post_script
self.post_script_args = post_script_args
self.vars = variables or dict()
self.priority = priority
self.category = category
self.retry = retry
self.retry_unless_exit_value = retry_unless_exit_value
self.pre_skip = pre_skip
self.abort_dag_on = abort_dag_on
self.abort_dag_on_return_value = abort_dag_on_return_value
self.dir = dir
self.noop = noop
self.done = done
def __str__(self):
"""
Returns:
A string representing the node as it should be represented in the dag file.
"""
result = '%s %s %s\n' % (self.type, self.job.name, self.job.job_file)
if self.dir:
result += ' DIR %s' % (self.dir,)
if self.noop:
result += ' NOOP'
if self.done:
result += ' DONE'
return result
def __repr__(self):
"""
:return:
"""
return '<Node: %s parents(%s) children(%s)>' % (self.job.name, self._get_parent_names(), self._get_child_names())
@property
def type(self):
return 'JOB'
@property
def job(self):
"""
:return:
"""
return self._job
@job.setter
def job(self, job):
"""
:param job:
:return:
"""
if isinstance(job, HTCondorObjectBase):
self._job = job
else:
raise TypeError('%s is not of type Job or Workflow' % (str(job),))
@property
def parent_nodes(self):
"""
:return:
"""
return self._parent_nodes
@property
def child_nodes(self):
"""
:return:
"""
return self._child_nodes
# @property
# def pre_script(self):
# """
#
# :return:
# """
# return self.pre_script
#
# @pre_script.setter
# def pre_script(self, script):
# """
#
# :param script:
# :return:
# """
# self.pre_script = script
#
# @property
# def pre_script_args(self):
# return self.pre_script_args
#
# @pre_script_args.setter
# def pre_script_args(self, args):
# self.pre_script_args = args
#
# @property
# def post_script(self):
# """
#
# :return:
# """
# return self.post_script
#
# @post_script.setter
# def post_script(self, script):
# """
#
# :param script:
# :return:
# """
# self.post_script = script
#
# @property
# def post_script_args(self):
# return self.post_script_args
#
# @post_script_args.setter
# def post_script_args(self, args):
# self.post_script_args = args
#
# @property
# def vars(self):
# """
#
# """
# return self.vars
#
# @vars.setter
# def vars(self, vars):
# """
# vars setter
#
# Args:
# vars ():
# """
# self.vars = vars
#
# @property
# def priority(self):
# """
#
# """
# return self.priority
#
# @priority.setter
# def priority(self, priority):
# """
# priority setter
#
# Args:
# priority ():
# """
# self.priority = priority
#
# @property
# def category(self):
# """
#
# """
# return self.category
#
# @category.setter
# def category(self, category):
# """
# category setter
#
# Args:
# category ():
# """
# self.category = category
#
# @property
# def retry(self):
# """
# An integer indicating the number of times to retry running a node.
# """
# return self.retry
#
# @retry.setter
# def retry(self, retry):
# """
# retry setter
#
# Args:
# retry ():
# """
# self.retry = retry
#
# @property
# def retry_unless_exit_value(self):
# """
# An integer indicating the exit value for which not to retry a job
# """
# return self.retry_unless_exit_value
#
# @retry.setter
# def retry_unless_exit_value(self, retry_unless_exit_value):
# """
# retry_unless_exit_value setter
#
# Args:
# retry_unless_exit_value ():
# """
# self.retry_unless_exit_value = retry_unless_exit_value
#
# @property
# def pre_skip(self):
# """
#
# """
# return self.pre_skip
#
# @pre_skip.setter
# def pre_skip(self, pre_skip):
# """
# pre_skip setter
#
# Args:
# pre_skip ():
# """
# self.pre_skip = pre_skip
#
# @property
# def abort_dag_on(self):
# """
#
# """
# return self.abort_dag_on
#
# @abort_dag_on.setter
# def abort_dag_on(self, abort_dag_on):
# """
# abort_dag_on setter
#
# Args:
# abort_dag_on ():
# """
# self.abort_dag_on = abort_dag_on
#
# @property
# def abort_dag_on_return_value(self):
# """
#
# """
# return self.abort_dag_on
#
# @abort_dag_on_return_value.setter
# def abort_dag_on_return_value(self, abort_dag_on_return_value):
# """
# abort_dag_on_return_value setter
#
# Args:
# abort_dag_on_return_value ():
# """
# self.abort_dag_on_return_value = abort_dag_on_return_value
#
# @property
# def dir(self):
# """
#
# """
# return self._dir
#
# @dir.setter
# def dir(self, dir):
# """
# dir setter
#
# Args:
# dir ():
# """
# self.dir = dir
#
# @property
# def noop(self):
# """
#
# """
# return self.noop
#
# @noop.setter
# def noop(self, noop):
# """
# noop setter
#
# Args:
# noop ():
# """
# self.noop = noop
#
# @property
# def done(self):
# """
#
# """
# return self.done
#
# @done.setter
# def done(self, done):
# """
# done setter
#
# Args:
# done ():
# """
# self.done = done
def add_parent(self, parent):
"""
:param parent:
:return:
"""
assert isinstance(parent, Node)
self.parent_nodes.add(parent)
if self not in parent.child_nodes:
parent.add_child(self)
def remove_parent(self, parent):
"""
:param parent:
:return:
"""
assert isinstance(parent, Node)
self.parent_nodes.discard(parent)
if self in parent.child_nodes:
parent.remove_child(self)
def add_child(self, child):
"""
:param child:
:return:
"""
assert isinstance(child, Node)
self.child_nodes.add(child)
if self not in child.parent_nodes:
child.add_parent(self)
def remove_child(self, child):
"""
:param child:
:return:
"""
assert isinstance(child, Node)
self.child_nodes.discard(child)
if self in child.parent_nodes:
child.remove_parent(self)
def _add(self):
"""
:return:
"""
pass
def _remove(self):
"""
:return:
"""
pass
def get_all_family_nodes(self):
"""
:return:
"""
ancestors = self._get_all_ancestors()
descendants = self._get_all_descendants()
family_nodes = ancestors.union(descendants)
return family_nodes
def _get_all_ancestors(self):
"""
:return:
"""
ancestors = set()
ancestors = ancestors.union(self.parent_nodes)
for parent in self.parent_nodes:
ancestors = ancestors.union(parent._get_all_ancestors())
if self in ancestors:
log.error('circular dependancy found in %s. Ancestors: %s ', self, ancestors)
raise CircularDependency('Node %s contains itself in it\'s list of dependencies.' % (self.job.name,))
return ancestors
def _link_parent_nodes(self):
"""
:return:
"""
for parent in self.parent_nodes:
if self not in parent.child_nodes:
parent.add_child(self)
def _link_child_nodes(self):
"""
:return:
"""
for child in self.child_nodes:
if self not in child.parent_nodes:
child.add_parent(self)
@classmethod
def all_list_functions(cls):
return [cls.__str__.__name__,
cls.list_vars.__name__,
cls.list_relations.__name__,
cls.list_scripts.__name__,
cls.list_pre_skip.__name__,
cls.list_category.__name__,
cls.list_priority.__name__,
cls.list_options.__name__,
]
def list_relations(self):
"""
:return:
"""
result = ''
if len(self.child_nodes):
result += 'PARENT %s CHILD %s\n' % (self.job.name, self._get_child_names())
return result
def list_scripts(self):
result = ''
if self.pre_script:
result += 'SCRIPT PRE %s %s %s\n' % (self.job.name, self.pre_script, self.pre_script_args or '')
if self.post_script:
result += 'SCRIPT POST %s %s %s\n' % (self.job.name, self.post_script, self.post_script_args or '')
return result
def list_options(self):
result = ''
if self.retry:
result += 'RETRY %s %d\n' % (self.job.name, self.retry)
return result
def list_option(self, option):
result = ''
value = getattr(self, option)
if value:
result += '%s %s %s\n' % (option.upper(), self.job.name, str(value))
return result
def list_vars(self):
result = ''
if self.vars:
result = 'VARS %s' % (self.job.name,)
for key, value in self.vars.items():
result += ' %s="%s"' % (key, value)
result += '\n'
return result
def list_priority(self):
return self.list_option('priority')
def list_category(self):
return self.list_option('category')
#TODO retry=None, # JobName NumberOfRetries [UNLESS-EXIT value]
def list_pre_skip(self):
return self.list_option('pre_skip')
#TODO abort_dag_on=None, # JobName AbortExitValue [RETURN DAGReturnValue]
def _get_child_names(self):
"""
:return:
"""
return self._get_names(self.child_nodes)
def _get_parent_names(self):
"""
:return:
"""
return self._get_names(self.parent_nodes)
def _get_names(self, nodes):
"""
:return:
"""
names = []
for node in nodes:
names.append(node.job.name)
return ' '.join(names)
|
tethysplatform/condorpy | condorpy/htcondor_object_base.py | HTCondorObjectBase.set_scheduler | python | def set_scheduler(self, host, username='root', password=None, private_key=None, private_key_pass=None):
self._remote = RemoteClient(host, username, password, private_key, private_key_pass)
self._remote_id = uuid.uuid4().hex | Defines the remote scheduler
Args:
host (str): the hostname or ip address of the remote scheduler
username (str, optional): the username used to connect to the remote scheduler. Default is 'root'
password (str, optional): the password for username on the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key (str, optional): the path to the private ssh key used to connect to the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key_pass (str, optional): the passphrase for the private_key. Default is None.
Returns:
An RemoteClient representing the remote scheduler. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/htcondor_object_base.py#L68-L83 | null | class HTCondorObjectBase(object):
"""
"""
NULL_CLUSTER_ID = 0
def __init__(self,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.'):
"""
"""
object.__setattr__(self, '_cluster_id', self.NULL_CLUSTER_ID)
object.__setattr__(self, '_remote', None)
object.__setattr__(self, '_remote_input_files', remote_input_files or None)
object.__setattr__(self, '_cwd', working_directory)
object.__setattr__(self, '_remote', None)
object.__setattr__(self, '_remote_id', None)
if host:
self.set_scheduler(host=host, username=username, password=password,
private_key=private_key, private_key_pass=private_key_pass)
@property
def cluster_id(self):
"""
The id assigned to the job (called a cluster in HTConodr) when the job is submitted.
"""
return self._cluster_id
@property
def num_jobs(self):
return 1
@property
def scheduler(self):
"""
The remote scheduler where the job/workflow will be submitted
"""
return self._remote
@property
def remote_input_files(self):
"""A list of paths to files or directories to be copied to a remote server for remote job submission.
"""
return self._remote_input_files
@remote_input_files.setter
def remote_input_files(self, files):
"""A list of paths to files or directories to be copied to a remote server for remote job submission.
Args:
files (list or tuple of strings): A list or tuple of file paths to all input files and the executable that
are required to be copied to the remote server when submitting the job remotely.
Note:
File paths defined for remote_input_files should be relative to the job's working directory on the
client machine. They are copied into the working directory on the remote. Input file paths defined for
the submit description file should be relative to the initial directory on the remote server.
"""
self._remote_input_files = list(files) if files else None
def set_cwd(fn):
"""
Decorator to set the specified working directory to execute the function, and then restore the previous cwd.
"""
def wrapped(self, *args, **kwargs):
log.info('Calling function: %s with args=%s', fn, args if args else [])
cwd = os.getcwd()
log.info('Saved cwd: %s', cwd)
os.chdir(self._cwd)
log.info('Changing working directory to: %s', self._cwd)
try:
return fn(self, *args, **kwargs)
finally:
os.chdir(cwd)
log.info('Restored working directory to: %s', cwd)
return wrapped
def submit(self, args):
"""
"""
out, err = self._execute(args)
if err:
if re.match('WARNING|Renaming', err):
log.warning(err)
else:
raise HTCondorError(err)
log.info(out)
try:
self._cluster_id = int(re.search('(?<=cluster |\*\* Proc )(\d*)', out).group(1))
except:
self._cluster_id = -1
return self.cluster_id
def remove(self, options=[], sub_job_num=None):
"""Removes a job from the job queue, or from being executed.
Args:
options (list of str, optional): A list of command line options for the condor_rm command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_rm.html.
Defaults to an empty list.
job_num (int, optional): The number of sub_job to remove rather than the whole cluster. Defaults to None.
"""
args = ['condor_rm']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
args.append(job_id)
out, err = self._execute(args)
return out,err
def sync_remote_output(self):
"""Sync the initial directory containing the output and log files with the remote server.
"""
self._copy_output_from_remote()
def close_remote(self):
"""Cleans up and closes connection to remote server if defined.
"""
if self._remote:
try:
# first see if remote dir is still there
self._remote.execute('ls %s' % (self._remote_id,))
if self.status != 'Completed':
self.remove()
self._remote.execute('rm -rf %s' % (self._remote_id,))
except RuntimeError:
pass
self._remote.close()
del self._remote
@set_cwd
def _execute(self, args, shell=False, run_in_job_dir=True):
out = None
err = None
if self._remote:
log.info('Executing remote command %s', ' '.join(args))
cmd = ' '.join(args)
try:
if run_in_job_dir:
cmd = 'cd %s && %s' % (self._remote_id, cmd)
out = '\n'.join(self._remote.execute(cmd))
except RuntimeError as e:
err = str(e)
except SSHException as e:
err = str(e)
else:
log.info('Executing local command %s', ' '.join(args))
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)
out, err = process.communicate()
out = out.decode() if isinstance(out, bytes) else out
err = err.decode() if isinstance(err, bytes) else err
log.info('Execute results - out: %s, err: %s', out, err)
return out, err
@set_cwd
def _copy_input_files_to_remote(self):
self._remote.put(self.remote_input_files, self._remote_id)
@set_cwd
def _copy_output_from_remote(self):
self._remote.get(os.path.join(self._remote_id, self.initial_dir))
@set_cwd
def _open(self, file_name, mode='w'):
if self._remote:
return self._remote.remote_file(os.path.join(self._remote_id, file_name), mode)
else:
return open(file_name, mode)
@set_cwd
def _make_dir(self, dir_name):
try:
log.info('making directory %s', dir_name)
if self._remote:
self._remote.makedirs(os.path.join(self._remote_id, dir_name))
else:
os.makedirs(dir_name)
except OSError:
log.warn('Unable to create directory %s. It may already exist.', dir_name)
|
tethysplatform/condorpy | condorpy/htcondor_object_base.py | HTCondorObjectBase.set_cwd | python | def set_cwd(fn):
def wrapped(self, *args, **kwargs):
log.info('Calling function: %s with args=%s', fn, args if args else [])
cwd = os.getcwd()
log.info('Saved cwd: %s', cwd)
os.chdir(self._cwd)
log.info('Changing working directory to: %s', self._cwd)
try:
return fn(self, *args, **kwargs)
finally:
os.chdir(cwd)
log.info('Restored working directory to: %s', cwd)
return wrapped | Decorator to set the specified working directory to execute the function, and then restore the previous cwd. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/htcondor_object_base.py#L108-L124 | null | class HTCondorObjectBase(object):
"""
"""
NULL_CLUSTER_ID = 0
def __init__(self,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.'):
"""
"""
object.__setattr__(self, '_cluster_id', self.NULL_CLUSTER_ID)
object.__setattr__(self, '_remote', None)
object.__setattr__(self, '_remote_input_files', remote_input_files or None)
object.__setattr__(self, '_cwd', working_directory)
object.__setattr__(self, '_remote', None)
object.__setattr__(self, '_remote_id', None)
if host:
self.set_scheduler(host=host, username=username, password=password,
private_key=private_key, private_key_pass=private_key_pass)
@property
def cluster_id(self):
"""
The id assigned to the job (called a cluster in HTConodr) when the job is submitted.
"""
return self._cluster_id
@property
def num_jobs(self):
return 1
@property
def scheduler(self):
"""
The remote scheduler where the job/workflow will be submitted
"""
return self._remote
def set_scheduler(self, host, username='root', password=None, private_key=None, private_key_pass=None):
"""
Defines the remote scheduler
Args:
host (str): the hostname or ip address of the remote scheduler
username (str, optional): the username used to connect to the remote scheduler. Default is 'root'
password (str, optional): the password for username on the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key (str, optional): the path to the private ssh key used to connect to the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key_pass (str, optional): the passphrase for the private_key. Default is None.
Returns:
An RemoteClient representing the remote scheduler.
"""
self._remote = RemoteClient(host, username, password, private_key, private_key_pass)
self._remote_id = uuid.uuid4().hex
@property
def remote_input_files(self):
"""A list of paths to files or directories to be copied to a remote server for remote job submission.
"""
return self._remote_input_files
@remote_input_files.setter
def remote_input_files(self, files):
"""A list of paths to files or directories to be copied to a remote server for remote job submission.
Args:
files (list or tuple of strings): A list or tuple of file paths to all input files and the executable that
are required to be copied to the remote server when submitting the job remotely.
Note:
File paths defined for remote_input_files should be relative to the job's working directory on the
client machine. They are copied into the working directory on the remote. Input file paths defined for
the submit description file should be relative to the initial directory on the remote server.
"""
self._remote_input_files = list(files) if files else None
def submit(self, args):
"""
"""
out, err = self._execute(args)
if err:
if re.match('WARNING|Renaming', err):
log.warning(err)
else:
raise HTCondorError(err)
log.info(out)
try:
self._cluster_id = int(re.search('(?<=cluster |\*\* Proc )(\d*)', out).group(1))
except:
self._cluster_id = -1
return self.cluster_id
def remove(self, options=[], sub_job_num=None):
"""Removes a job from the job queue, or from being executed.
Args:
options (list of str, optional): A list of command line options for the condor_rm command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_rm.html.
Defaults to an empty list.
job_num (int, optional): The number of sub_job to remove rather than the whole cluster. Defaults to None.
"""
args = ['condor_rm']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
args.append(job_id)
out, err = self._execute(args)
return out,err
def sync_remote_output(self):
"""Sync the initial directory containing the output and log files with the remote server.
"""
self._copy_output_from_remote()
def close_remote(self):
"""Cleans up and closes connection to remote server if defined.
"""
if self._remote:
try:
# first see if remote dir is still there
self._remote.execute('ls %s' % (self._remote_id,))
if self.status != 'Completed':
self.remove()
self._remote.execute('rm -rf %s' % (self._remote_id,))
except RuntimeError:
pass
self._remote.close()
del self._remote
@set_cwd
def _execute(self, args, shell=False, run_in_job_dir=True):
out = None
err = None
if self._remote:
log.info('Executing remote command %s', ' '.join(args))
cmd = ' '.join(args)
try:
if run_in_job_dir:
cmd = 'cd %s && %s' % (self._remote_id, cmd)
out = '\n'.join(self._remote.execute(cmd))
except RuntimeError as e:
err = str(e)
except SSHException as e:
err = str(e)
else:
log.info('Executing local command %s', ' '.join(args))
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)
out, err = process.communicate()
out = out.decode() if isinstance(out, bytes) else out
err = err.decode() if isinstance(err, bytes) else err
log.info('Execute results - out: %s, err: %s', out, err)
return out, err
@set_cwd
def _copy_input_files_to_remote(self):
self._remote.put(self.remote_input_files, self._remote_id)
@set_cwd
def _copy_output_from_remote(self):
self._remote.get(os.path.join(self._remote_id, self.initial_dir))
@set_cwd
def _open(self, file_name, mode='w'):
if self._remote:
return self._remote.remote_file(os.path.join(self._remote_id, file_name), mode)
else:
return open(file_name, mode)
@set_cwd
def _make_dir(self, dir_name):
try:
log.info('making directory %s', dir_name)
if self._remote:
self._remote.makedirs(os.path.join(self._remote_id, dir_name))
else:
os.makedirs(dir_name)
except OSError:
log.warn('Unable to create directory %s. It may already exist.', dir_name)
|
tethysplatform/condorpy | condorpy/htcondor_object_base.py | HTCondorObjectBase.remove | python | def remove(self, options=[], sub_job_num=None):
args = ['condor_rm']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
args.append(job_id)
out, err = self._execute(args)
return out,err | Removes a job from the job queue, or from being executed.
Args:
options (list of str, optional): A list of command line options for the condor_rm command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_rm.html.
Defaults to an empty list.
job_num (int, optional): The number of sub_job to remove rather than the whole cluster. Defaults to None. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/htcondor_object_base.py#L144-L159 | [
"def wrapped(self, *args, **kwargs):\n log.info('Calling function: %s with args=%s', fn, args if args else [])\n cwd = os.getcwd()\n log.info('Saved cwd: %s', cwd)\n os.chdir(self._cwd)\n log.info('Changing working directory to: %s', self._cwd)\n try:\n return fn(self, *args, **kwargs)\n finally:\n os.chdir(cwd)\n log.info('Restored working directory to: %s', cwd)\n"
] | class HTCondorObjectBase(object):
"""
"""
NULL_CLUSTER_ID = 0
def __init__(self,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.'):
"""
"""
object.__setattr__(self, '_cluster_id', self.NULL_CLUSTER_ID)
object.__setattr__(self, '_remote', None)
object.__setattr__(self, '_remote_input_files', remote_input_files or None)
object.__setattr__(self, '_cwd', working_directory)
object.__setattr__(self, '_remote', None)
object.__setattr__(self, '_remote_id', None)
if host:
self.set_scheduler(host=host, username=username, password=password,
private_key=private_key, private_key_pass=private_key_pass)
@property
def cluster_id(self):
"""
The id assigned to the job (called a cluster in HTConodr) when the job is submitted.
"""
return self._cluster_id
@property
def num_jobs(self):
return 1
@property
def scheduler(self):
"""
The remote scheduler where the job/workflow will be submitted
"""
return self._remote
def set_scheduler(self, host, username='root', password=None, private_key=None, private_key_pass=None):
"""
Defines the remote scheduler
Args:
host (str): the hostname or ip address of the remote scheduler
username (str, optional): the username used to connect to the remote scheduler. Default is 'root'
password (str, optional): the password for username on the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key (str, optional): the path to the private ssh key used to connect to the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key_pass (str, optional): the passphrase for the private_key. Default is None.
Returns:
An RemoteClient representing the remote scheduler.
"""
self._remote = RemoteClient(host, username, password, private_key, private_key_pass)
self._remote_id = uuid.uuid4().hex
@property
def remote_input_files(self):
"""A list of paths to files or directories to be copied to a remote server for remote job submission.
"""
return self._remote_input_files
@remote_input_files.setter
def remote_input_files(self, files):
"""A list of paths to files or directories to be copied to a remote server for remote job submission.
Args:
files (list or tuple of strings): A list or tuple of file paths to all input files and the executable that
are required to be copied to the remote server when submitting the job remotely.
Note:
File paths defined for remote_input_files should be relative to the job's working directory on the
client machine. They are copied into the working directory on the remote. Input file paths defined for
the submit description file should be relative to the initial directory on the remote server.
"""
self._remote_input_files = list(files) if files else None
def set_cwd(fn):
"""
Decorator to set the specified working directory to execute the function, and then restore the previous cwd.
"""
def wrapped(self, *args, **kwargs):
log.info('Calling function: %s with args=%s', fn, args if args else [])
cwd = os.getcwd()
log.info('Saved cwd: %s', cwd)
os.chdir(self._cwd)
log.info('Changing working directory to: %s', self._cwd)
try:
return fn(self, *args, **kwargs)
finally:
os.chdir(cwd)
log.info('Restored working directory to: %s', cwd)
return wrapped
def submit(self, args):
"""
"""
out, err = self._execute(args)
if err:
if re.match('WARNING|Renaming', err):
log.warning(err)
else:
raise HTCondorError(err)
log.info(out)
try:
self._cluster_id = int(re.search('(?<=cluster |\*\* Proc )(\d*)', out).group(1))
except:
self._cluster_id = -1
return self.cluster_id
def sync_remote_output(self):
"""Sync the initial directory containing the output and log files with the remote server.
"""
self._copy_output_from_remote()
def close_remote(self):
"""Cleans up and closes connection to remote server if defined.
"""
if self._remote:
try:
# first see if remote dir is still there
self._remote.execute('ls %s' % (self._remote_id,))
if self.status != 'Completed':
self.remove()
self._remote.execute('rm -rf %s' % (self._remote_id,))
except RuntimeError:
pass
self._remote.close()
del self._remote
@set_cwd
def _execute(self, args, shell=False, run_in_job_dir=True):
out = None
err = None
if self._remote:
log.info('Executing remote command %s', ' '.join(args))
cmd = ' '.join(args)
try:
if run_in_job_dir:
cmd = 'cd %s && %s' % (self._remote_id, cmd)
out = '\n'.join(self._remote.execute(cmd))
except RuntimeError as e:
err = str(e)
except SSHException as e:
err = str(e)
else:
log.info('Executing local command %s', ' '.join(args))
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)
out, err = process.communicate()
out = out.decode() if isinstance(out, bytes) else out
err = err.decode() if isinstance(err, bytes) else err
log.info('Execute results - out: %s, err: %s', out, err)
return out, err
@set_cwd
def _copy_input_files_to_remote(self):
self._remote.put(self.remote_input_files, self._remote_id)
@set_cwd
def _copy_output_from_remote(self):
self._remote.get(os.path.join(self._remote_id, self.initial_dir))
@set_cwd
def _open(self, file_name, mode='w'):
if self._remote:
return self._remote.remote_file(os.path.join(self._remote_id, file_name), mode)
else:
return open(file_name, mode)
@set_cwd
def _make_dir(self, dir_name):
try:
log.info('making directory %s', dir_name)
if self._remote:
self._remote.makedirs(os.path.join(self._remote_id, dir_name))
else:
os.makedirs(dir_name)
except OSError:
log.warn('Unable to create directory %s. It may already exist.', dir_name)
|
tethysplatform/condorpy | condorpy/htcondor_object_base.py | HTCondorObjectBase.close_remote | python | def close_remote(self):
if self._remote:
try:
# first see if remote dir is still there
self._remote.execute('ls %s' % (self._remote_id,))
if self.status != 'Completed':
self.remove()
self._remote.execute('rm -rf %s' % (self._remote_id,))
except RuntimeError:
pass
self._remote.close()
del self._remote | Cleans up and closes connection to remote server if defined. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/htcondor_object_base.py#L167-L181 | [
"def remove(self, options=[], sub_job_num=None):\n \"\"\"Removes a job from the job queue, or from being executed.\n\n Args:\n options (list of str, optional): A list of command line options for the condor_rm command. For\n details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_rm.html.\n Defaults to an empty list.\n job_num (int, optional): The number of sub_job to remove rather than the whole cluster. Defaults to None.\n\n \"\"\"\n args = ['condor_rm']\n args.extend(options)\n job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)\n args.append(job_id)\n out, err = self._execute(args)\n return out,err\n"
] | class HTCondorObjectBase(object):
"""
"""
NULL_CLUSTER_ID = 0
def __init__(self,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.'):
"""
"""
object.__setattr__(self, '_cluster_id', self.NULL_CLUSTER_ID)
object.__setattr__(self, '_remote', None)
object.__setattr__(self, '_remote_input_files', remote_input_files or None)
object.__setattr__(self, '_cwd', working_directory)
object.__setattr__(self, '_remote', None)
object.__setattr__(self, '_remote_id', None)
if host:
self.set_scheduler(host=host, username=username, password=password,
private_key=private_key, private_key_pass=private_key_pass)
@property
def cluster_id(self):
"""
The id assigned to the job (called a cluster in HTConodr) when the job is submitted.
"""
return self._cluster_id
@property
def num_jobs(self):
return 1
@property
def scheduler(self):
"""
The remote scheduler where the job/workflow will be submitted
"""
return self._remote
def set_scheduler(self, host, username='root', password=None, private_key=None, private_key_pass=None):
"""
Defines the remote scheduler
Args:
host (str): the hostname or ip address of the remote scheduler
username (str, optional): the username used to connect to the remote scheduler. Default is 'root'
password (str, optional): the password for username on the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key (str, optional): the path to the private ssh key used to connect to the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key_pass (str, optional): the passphrase for the private_key. Default is None.
Returns:
An RemoteClient representing the remote scheduler.
"""
self._remote = RemoteClient(host, username, password, private_key, private_key_pass)
self._remote_id = uuid.uuid4().hex
@property
def remote_input_files(self):
"""A list of paths to files or directories to be copied to a remote server for remote job submission.
"""
return self._remote_input_files
@remote_input_files.setter
def remote_input_files(self, files):
"""A list of paths to files or directories to be copied to a remote server for remote job submission.
Args:
files (list or tuple of strings): A list or tuple of file paths to all input files and the executable that
are required to be copied to the remote server when submitting the job remotely.
Note:
File paths defined for remote_input_files should be relative to the job's working directory on the
client machine. They are copied into the working directory on the remote. Input file paths defined for
the submit description file should be relative to the initial directory on the remote server.
"""
self._remote_input_files = list(files) if files else None
def set_cwd(fn):
"""
Decorator to set the specified working directory to execute the function, and then restore the previous cwd.
"""
def wrapped(self, *args, **kwargs):
log.info('Calling function: %s with args=%s', fn, args if args else [])
cwd = os.getcwd()
log.info('Saved cwd: %s', cwd)
os.chdir(self._cwd)
log.info('Changing working directory to: %s', self._cwd)
try:
return fn(self, *args, **kwargs)
finally:
os.chdir(cwd)
log.info('Restored working directory to: %s', cwd)
return wrapped
def submit(self, args):
"""
"""
out, err = self._execute(args)
if err:
if re.match('WARNING|Renaming', err):
log.warning(err)
else:
raise HTCondorError(err)
log.info(out)
try:
self._cluster_id = int(re.search('(?<=cluster |\*\* Proc )(\d*)', out).group(1))
except:
self._cluster_id = -1
return self.cluster_id
def remove(self, options=[], sub_job_num=None):
"""Removes a job from the job queue, or from being executed.
Args:
options (list of str, optional): A list of command line options for the condor_rm command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_rm.html.
Defaults to an empty list.
job_num (int, optional): The number of sub_job to remove rather than the whole cluster. Defaults to None.
"""
args = ['condor_rm']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
args.append(job_id)
out, err = self._execute(args)
return out,err
def sync_remote_output(self):
"""Sync the initial directory containing the output and log files with the remote server.
"""
self._copy_output_from_remote()
@set_cwd
def _execute(self, args, shell=False, run_in_job_dir=True):
out = None
err = None
if self._remote:
log.info('Executing remote command %s', ' '.join(args))
cmd = ' '.join(args)
try:
if run_in_job_dir:
cmd = 'cd %s && %s' % (self._remote_id, cmd)
out = '\n'.join(self._remote.execute(cmd))
except RuntimeError as e:
err = str(e)
except SSHException as e:
err = str(e)
else:
log.info('Executing local command %s', ' '.join(args))
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)
out, err = process.communicate()
out = out.decode() if isinstance(out, bytes) else out
err = err.decode() if isinstance(err, bytes) else err
log.info('Execute results - out: %s, err: %s', out, err)
return out, err
@set_cwd
def _copy_input_files_to_remote(self):
self._remote.put(self.remote_input_files, self._remote_id)
@set_cwd
def _copy_output_from_remote(self):
self._remote.get(os.path.join(self._remote_id, self.initial_dir))
@set_cwd
def _open(self, file_name, mode='w'):
if self._remote:
return self._remote.remote_file(os.path.join(self._remote_id, file_name), mode)
else:
return open(file_name, mode)
@set_cwd
def _make_dir(self, dir_name):
try:
log.info('making directory %s', dir_name)
if self._remote:
self._remote.makedirs(os.path.join(self._remote_id, dir_name))
else:
os.makedirs(dir_name)
except OSError:
log.warn('Unable to create directory %s. It may already exist.', dir_name)
|
tethysplatform/condorpy | condorpy/workflow.py | Workflow._update_status | python | def _update_status(self, sub_job_num=None):
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '').split('\n')
status_code = 0
for status_code_str in out:
try:
status_code = int(status_code_str.strip())
except:
pass
log.info('Job %s status: %d', job_id, status_code)
key = CONDOR_JOB_STATUSES[status_code]
return key | Gets the workflow status.
Return:
str: The current status of the workflow. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/workflow.py#L145-L178 | [
"def wrapped(self, *args, **kwargs):\n log.info('Calling function: %s with args=%s', fn, args if args else [])\n cwd = os.getcwd()\n log.info('Saved cwd: %s', cwd)\n os.chdir(self._cwd)\n log.info('Changing working directory to: %s', self._cwd)\n try:\n return fn(self, *args, **kwargs)\n finally:\n os.chdir(cwd)\n log.info('Restored working directory to: %s', cwd)\n"
] | class Workflow(HTCondorObjectBase):
"""
"""
def __init__(self,
name,
config,
max_jobs,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.'):
self._name = name
self._config = config
self._max_jobs = max_jobs
self._dag_file = ""
self._node_set = set()
super(Workflow, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
def __str__(self):
"""
"""
result = []
if self.config:
result.append('CONFIG {0}\n'.format(self.config))
self.complete_node_set()
list_functions = Node.all_list_functions()
options = ['']*len(list_functions)
for node in self._node_set:
for i, list_function_name in enumerate(list_functions):
list_function = getattr(node, list_function_name)
options[i] += list_function()
result.extend(options)
if self.max_jobs:
max_jobs_list = ''
for category, max_jobs in self.max_jobs.items():
max_jobs_list += 'MAXJOBS {0} {1}\n'.format(category, str(max_jobs))
result.append(max_jobs_list)
return '\n'.join(result)
def __repr__(self):
"""
"""
return '<DAG: %s>' % (self.name,)
@property
def name(self):
"""
"""
return self._name
@property
def num_jobs(self):
return len(self._node_set)
@property
def config(self):
"""
"""
return self._config
@config.setter
def config(self, config):
if os.path.exists(config):
self._config = config
@property
def max_jobs(self):
return self._max_jobs
def add_max_jobs_throttle(self, category, max_jobs):
"""
:param category:
:param max_jobs:
:return:
"""
self.max_jobs[category] = max_jobs
@property
def node_set(self):
"""
"""
if self.cluster_id != self.NULL_CLUSTER_ID:
self.update_node_ids()
return self._node_set
@property
def dag_file(self):
"""
"""
return '%s.dag' % (self.name,)
@property
def initial_dir(self):
"""
"""
return ''
@property
def status(self):
"""
Returns status of workflow as a whole (DAG status).
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def statuses(self):
"""
Get status of workflow nodes.
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_statuses()
def _update_statuses(self, sub_job_num=None):
"""
Update statuses of jobs nodes in workflow.
"""
# initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for node in self.node_set:
job = node.job
try:
job_status = job.status
status_dict[job_status] += 1
except (KeyError, HTCondorError):
status_dict['Unexpanded'] += 1
return status_dict
def update_node_ids(self, sub_job_num=None):
"""
Associate Jobs with respective cluster ids.
"""
# Build condor_q and condor_history commands
dag_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
job_delimiter = '+++'
attr_delimiter = ';;;'
format = [
'-format', '"%d' + attr_delimiter + '"', 'ClusterId',
'-format', '"%v' + attr_delimiter + '"', 'Cmd',
'-format', '"%v' + attr_delimiter + '"', 'Args', # Old way
'-format', '"%v' + job_delimiter + '"', 'Arguments' # New way
]
# Get ID, Executable, and Arguments for each job that is either started to be processed or finished in the workflow
cmd = 'condor_q -constraint DAGManJobID=={0} {1} && condor_history -constraint DAGManJobID=={0} {1}'.format(dag_id, ' '.join(format))
# 'condor_q -constraint DAGManJobID==1018 -format "%d\n" ClusterId -format "%s\n" CMD -format "%s\n" ARGS && condor_history -constraint DAGManJobID==1018 -format "%d\n" ClusterId -format "%s\n" CMD -format "%s\n" ARGS'
_args = [cmd]
out, err = self._execute(_args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while associating ids for jobs dag %s: %s', dag_id, err)
raise HTCondorError(err)
if not out:
log.warning('Error while associating ids for jobs in dag %s: No jobs found for dag.', dag_id)
try:
# Split into one line per job
jobs_out = out.split(job_delimiter)
# Match node to cluster id using combination of cmd and arguments
for node in self._node_set:
job = node.job
# Skip jobs that already have cluster id defined
if job.cluster_id != job.NULL_CLUSTER_ID:
continue
for job_out in jobs_out:
if not job_out or attr_delimiter not in job_out:
continue
# Split line by attributes
cluster_id, cmd, _args, _arguments = job_out.split(attr_delimiter)
# If new form of arguments is used, _args will be 'undefined' and _arguments will not
if _args == 'undefined' and _arguments != 'undefined':
args = _arguments.strip()
# If both are undefined, then there are no arguments
elif _args == 'undefined' and _arguments == 'undefined':
args = None
# Otherwise, using old form and _arguments will be 'undefined' and _args will not.
else:
args = _args.strip()
job_cmd = job.executable
job_args = job.arguments.strip() if job.arguments else None
if job_cmd in cmd and job_args == args:
log.info('Linking cluster_id %s to job with command and arguments: %s %s', cluster_id,
job_cmd, job_args)
job._cluster_id = int(cluster_id)
break
except ValueError as e:
log.warning(str(e))
def add_node(self, node):
"""
"""
assert isinstance(node, Node)
self._node_set.add(node)
def add_job(self, job):
"""
:param job:
:return:
"""
node = Node(job)
self.add_node(node)
return node
def submit(self, options=[]):
"""
ensures that all relatives of nodes in node_set are also added to the set before submitting
"""
self.complete_node_set()
self._write_job_file()
args = ['condor_submit_dag']
args.extend(options)
args.append(self.dag_file)
log.info('Submitting workflow %s with options: %s', self.name, args)
return super(Workflow, self).submit(args)
def wait(self, options=[]):
"""
:return:
"""
args = ['condor_wait']
args.extend(options)
args.append('%s.dagman.log' % (self.dag_file))
return self._execute(args)
def complete_node_set(self):
"""
"""
complete_node_set = set()
for node in self.node_set:
complete_node_set.add(node)
complete_node_set = complete_node_set.union(node.get_all_family_nodes())
self._node_set = complete_node_set
def _write_job_file(self):
"""
"""
log.debug('writing dag file "%s" in "%s".', self.dag_file, self._cwd)
self._make_dir(self.initial_dir)
dag_file = self._open(self.dag_file, 'w')
dag_file.write(self.__str__())
dag_file.close()
for node in self._node_set:
node.job._remote = self._remote
node.job._remote_id = self._remote_id
node.job._cwd = self._cwd
node.job._write_job_file()
|
tethysplatform/condorpy | condorpy/workflow.py | Workflow._update_statuses | python | def _update_statuses(self, sub_job_num=None):
# initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for node in self.node_set:
job = node.job
try:
job_status = job.status
status_dict[job_status] += 1
except (KeyError, HTCondorError):
status_dict['Unexpanded'] += 1
return status_dict | Update statuses of jobs nodes in workflow. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/workflow.py#L180-L198 | null | class Workflow(HTCondorObjectBase):
"""
"""
def __init__(self,
name,
config,
max_jobs,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.'):
self._name = name
self._config = config
self._max_jobs = max_jobs
self._dag_file = ""
self._node_set = set()
super(Workflow, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
def __str__(self):
"""
"""
result = []
if self.config:
result.append('CONFIG {0}\n'.format(self.config))
self.complete_node_set()
list_functions = Node.all_list_functions()
options = ['']*len(list_functions)
for node in self._node_set:
for i, list_function_name in enumerate(list_functions):
list_function = getattr(node, list_function_name)
options[i] += list_function()
result.extend(options)
if self.max_jobs:
max_jobs_list = ''
for category, max_jobs in self.max_jobs.items():
max_jobs_list += 'MAXJOBS {0} {1}\n'.format(category, str(max_jobs))
result.append(max_jobs_list)
return '\n'.join(result)
def __repr__(self):
"""
"""
return '<DAG: %s>' % (self.name,)
@property
def name(self):
"""
"""
return self._name
@property
def num_jobs(self):
return len(self._node_set)
@property
def config(self):
"""
"""
return self._config
@config.setter
def config(self, config):
if os.path.exists(config):
self._config = config
@property
def max_jobs(self):
return self._max_jobs
def add_max_jobs_throttle(self, category, max_jobs):
"""
:param category:
:param max_jobs:
:return:
"""
self.max_jobs[category] = max_jobs
@property
def node_set(self):
"""
"""
if self.cluster_id != self.NULL_CLUSTER_ID:
self.update_node_ids()
return self._node_set
@property
def dag_file(self):
"""
"""
return '%s.dag' % (self.name,)
@property
def initial_dir(self):
"""
"""
return ''
@property
def status(self):
"""
Returns status of workflow as a whole (DAG status).
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def statuses(self):
"""
Get status of workflow nodes.
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_statuses()
def _update_status(self, sub_job_num=None):
"""Gets the workflow status.
Return:
str: The current status of the workflow.
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '').split('\n')
status_code = 0
for status_code_str in out:
try:
status_code = int(status_code_str.strip())
except:
pass
log.info('Job %s status: %d', job_id, status_code)
key = CONDOR_JOB_STATUSES[status_code]
return key
def update_node_ids(self, sub_job_num=None):
"""
Associate Jobs with respective cluster ids.
"""
# Build condor_q and condor_history commands
dag_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
job_delimiter = '+++'
attr_delimiter = ';;;'
format = [
'-format', '"%d' + attr_delimiter + '"', 'ClusterId',
'-format', '"%v' + attr_delimiter + '"', 'Cmd',
'-format', '"%v' + attr_delimiter + '"', 'Args', # Old way
'-format', '"%v' + job_delimiter + '"', 'Arguments' # New way
]
# Get ID, Executable, and Arguments for each job that is either started to be processed or finished in the workflow
cmd = 'condor_q -constraint DAGManJobID=={0} {1} && condor_history -constraint DAGManJobID=={0} {1}'.format(dag_id, ' '.join(format))
# 'condor_q -constraint DAGManJobID==1018 -format "%d\n" ClusterId -format "%s\n" CMD -format "%s\n" ARGS && condor_history -constraint DAGManJobID==1018 -format "%d\n" ClusterId -format "%s\n" CMD -format "%s\n" ARGS'
_args = [cmd]
out, err = self._execute(_args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while associating ids for jobs dag %s: %s', dag_id, err)
raise HTCondorError(err)
if not out:
log.warning('Error while associating ids for jobs in dag %s: No jobs found for dag.', dag_id)
try:
# Split into one line per job
jobs_out = out.split(job_delimiter)
# Match node to cluster id using combination of cmd and arguments
for node in self._node_set:
job = node.job
# Skip jobs that already have cluster id defined
if job.cluster_id != job.NULL_CLUSTER_ID:
continue
for job_out in jobs_out:
if not job_out or attr_delimiter not in job_out:
continue
# Split line by attributes
cluster_id, cmd, _args, _arguments = job_out.split(attr_delimiter)
# If new form of arguments is used, _args will be 'undefined' and _arguments will not
if _args == 'undefined' and _arguments != 'undefined':
args = _arguments.strip()
# If both are undefined, then there are no arguments
elif _args == 'undefined' and _arguments == 'undefined':
args = None
# Otherwise, using old form and _arguments will be 'undefined' and _args will not.
else:
args = _args.strip()
job_cmd = job.executable
job_args = job.arguments.strip() if job.arguments else None
if job_cmd in cmd and job_args == args:
log.info('Linking cluster_id %s to job with command and arguments: %s %s', cluster_id,
job_cmd, job_args)
job._cluster_id = int(cluster_id)
break
except ValueError as e:
log.warning(str(e))
def add_node(self, node):
"""
"""
assert isinstance(node, Node)
self._node_set.add(node)
def add_job(self, job):
"""
:param job:
:return:
"""
node = Node(job)
self.add_node(node)
return node
def submit(self, options=[]):
"""
ensures that all relatives of nodes in node_set are also added to the set before submitting
"""
self.complete_node_set()
self._write_job_file()
args = ['condor_submit_dag']
args.extend(options)
args.append(self.dag_file)
log.info('Submitting workflow %s with options: %s', self.name, args)
return super(Workflow, self).submit(args)
def wait(self, options=[]):
"""
:return:
"""
args = ['condor_wait']
args.extend(options)
args.append('%s.dagman.log' % (self.dag_file))
return self._execute(args)
def complete_node_set(self):
"""
"""
complete_node_set = set()
for node in self.node_set:
complete_node_set.add(node)
complete_node_set = complete_node_set.union(node.get_all_family_nodes())
self._node_set = complete_node_set
def _write_job_file(self):
"""
"""
log.debug('writing dag file "%s" in "%s".', self.dag_file, self._cwd)
self._make_dir(self.initial_dir)
dag_file = self._open(self.dag_file, 'w')
dag_file.write(self.__str__())
dag_file.close()
for node in self._node_set:
node.job._remote = self._remote
node.job._remote_id = self._remote_id
node.job._cwd = self._cwd
node.job._write_job_file()
|
tethysplatform/condorpy | condorpy/workflow.py | Workflow.update_node_ids | python | def update_node_ids(self, sub_job_num=None):
# Build condor_q and condor_history commands
dag_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
job_delimiter = '+++'
attr_delimiter = ';;;'
format = [
'-format', '"%d' + attr_delimiter + '"', 'ClusterId',
'-format', '"%v' + attr_delimiter + '"', 'Cmd',
'-format', '"%v' + attr_delimiter + '"', 'Args', # Old way
'-format', '"%v' + job_delimiter + '"', 'Arguments' # New way
]
# Get ID, Executable, and Arguments for each job that is either started to be processed or finished in the workflow
cmd = 'condor_q -constraint DAGManJobID=={0} {1} && condor_history -constraint DAGManJobID=={0} {1}'.format(dag_id, ' '.join(format))
# 'condor_q -constraint DAGManJobID==1018 -format "%d\n" ClusterId -format "%s\n" CMD -format "%s\n" ARGS && condor_history -constraint DAGManJobID==1018 -format "%d\n" ClusterId -format "%s\n" CMD -format "%s\n" ARGS'
_args = [cmd]
out, err = self._execute(_args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while associating ids for jobs dag %s: %s', dag_id, err)
raise HTCondorError(err)
if not out:
log.warning('Error while associating ids for jobs in dag %s: No jobs found for dag.', dag_id)
try:
# Split into one line per job
jobs_out = out.split(job_delimiter)
# Match node to cluster id using combination of cmd and arguments
for node in self._node_set:
job = node.job
# Skip jobs that already have cluster id defined
if job.cluster_id != job.NULL_CLUSTER_ID:
continue
for job_out in jobs_out:
if not job_out or attr_delimiter not in job_out:
continue
# Split line by attributes
cluster_id, cmd, _args, _arguments = job_out.split(attr_delimiter)
# If new form of arguments is used, _args will be 'undefined' and _arguments will not
if _args == 'undefined' and _arguments != 'undefined':
args = _arguments.strip()
# If both are undefined, then there are no arguments
elif _args == 'undefined' and _arguments == 'undefined':
args = None
# Otherwise, using old form and _arguments will be 'undefined' and _args will not.
else:
args = _args.strip()
job_cmd = job.executable
job_args = job.arguments.strip() if job.arguments else None
if job_cmd in cmd and job_args == args:
log.info('Linking cluster_id %s to job with command and arguments: %s %s', cluster_id,
job_cmd, job_args)
job._cluster_id = int(cluster_id)
break
except ValueError as e:
log.warning(str(e)) | Associate Jobs with respective cluster ids. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/workflow.py#L200-L269 | [
"def wrapped(self, *args, **kwargs):\n log.info('Calling function: %s with args=%s', fn, args if args else [])\n cwd = os.getcwd()\n log.info('Saved cwd: %s', cwd)\n os.chdir(self._cwd)\n log.info('Changing working directory to: %s', self._cwd)\n try:\n return fn(self, *args, **kwargs)\n finally:\n os.chdir(cwd)\n log.info('Restored working directory to: %s', cwd)\n"
] | class Workflow(HTCondorObjectBase):
"""
"""
def __init__(self,
name,
config,
max_jobs,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.'):
self._name = name
self._config = config
self._max_jobs = max_jobs
self._dag_file = ""
self._node_set = set()
super(Workflow, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
def __str__(self):
"""
"""
result = []
if self.config:
result.append('CONFIG {0}\n'.format(self.config))
self.complete_node_set()
list_functions = Node.all_list_functions()
options = ['']*len(list_functions)
for node in self._node_set:
for i, list_function_name in enumerate(list_functions):
list_function = getattr(node, list_function_name)
options[i] += list_function()
result.extend(options)
if self.max_jobs:
max_jobs_list = ''
for category, max_jobs in self.max_jobs.items():
max_jobs_list += 'MAXJOBS {0} {1}\n'.format(category, str(max_jobs))
result.append(max_jobs_list)
return '\n'.join(result)
def __repr__(self):
"""
"""
return '<DAG: %s>' % (self.name,)
@property
def name(self):
"""
"""
return self._name
@property
def num_jobs(self):
return len(self._node_set)
@property
def config(self):
"""
"""
return self._config
@config.setter
def config(self, config):
if os.path.exists(config):
self._config = config
@property
def max_jobs(self):
return self._max_jobs
def add_max_jobs_throttle(self, category, max_jobs):
"""
:param category:
:param max_jobs:
:return:
"""
self.max_jobs[category] = max_jobs
@property
def node_set(self):
"""
"""
if self.cluster_id != self.NULL_CLUSTER_ID:
self.update_node_ids()
return self._node_set
@property
def dag_file(self):
"""
"""
return '%s.dag' % (self.name,)
@property
def initial_dir(self):
"""
"""
return ''
@property
def status(self):
"""
Returns status of workflow as a whole (DAG status).
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def statuses(self):
"""
Get status of workflow nodes.
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_statuses()
def _update_status(self, sub_job_num=None):
"""Gets the workflow status.
Return:
str: The current status of the workflow.
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '').split('\n')
status_code = 0
for status_code_str in out:
try:
status_code = int(status_code_str.strip())
except:
pass
log.info('Job %s status: %d', job_id, status_code)
key = CONDOR_JOB_STATUSES[status_code]
return key
def _update_statuses(self, sub_job_num=None):
"""
Update statuses of jobs nodes in workflow.
"""
# initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for node in self.node_set:
job = node.job
try:
job_status = job.status
status_dict[job_status] += 1
except (KeyError, HTCondorError):
status_dict['Unexpanded'] += 1
return status_dict
def add_node(self, node):
"""
"""
assert isinstance(node, Node)
self._node_set.add(node)
def add_job(self, job):
"""
:param job:
:return:
"""
node = Node(job)
self.add_node(node)
return node
def submit(self, options=[]):
"""
ensures that all relatives of nodes in node_set are also added to the set before submitting
"""
self.complete_node_set()
self._write_job_file()
args = ['condor_submit_dag']
args.extend(options)
args.append(self.dag_file)
log.info('Submitting workflow %s with options: %s', self.name, args)
return super(Workflow, self).submit(args)
def wait(self, options=[]):
"""
:return:
"""
args = ['condor_wait']
args.extend(options)
args.append('%s.dagman.log' % (self.dag_file))
return self._execute(args)
def complete_node_set(self):
"""
"""
complete_node_set = set()
for node in self.node_set:
complete_node_set.add(node)
complete_node_set = complete_node_set.union(node.get_all_family_nodes())
self._node_set = complete_node_set
def _write_job_file(self):
"""
"""
log.debug('writing dag file "%s" in "%s".', self.dag_file, self._cwd)
self._make_dir(self.initial_dir)
dag_file = self._open(self.dag_file, 'w')
dag_file.write(self.__str__())
dag_file.close()
for node in self._node_set:
node.job._remote = self._remote
node.job._remote_id = self._remote_id
node.job._cwd = self._cwd
node.job._write_job_file()
|
tethysplatform/condorpy | condorpy/workflow.py | Workflow.submit | python | def submit(self, options=[]):
self.complete_node_set()
self._write_job_file()
args = ['condor_submit_dag']
args.extend(options)
args.append(self.dag_file)
log.info('Submitting workflow %s with options: %s', self.name, args)
return super(Workflow, self).submit(args) | ensures that all relatives of nodes in node_set are also added to the set before submitting | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/workflow.py#L287-L299 | [
"def submit(self, args):\n \"\"\"\n\n\n \"\"\"\n out, err = self._execute(args)\n if err:\n if re.match('WARNING|Renaming', err):\n log.warning(err)\n else:\n raise HTCondorError(err)\n log.info(out)\n try:\n self._cluster_id = int(re.search('(?<=cluster |\\*\\* Proc )(\\d*)', out).group(1))\n except:\n self._cluster_id = -1\n return self.cluster_id\n",
"def complete_node_set(self):\n \"\"\"\n \"\"\"\n complete_node_set = set()\n for node in self.node_set:\n complete_node_set.add(node)\n complete_node_set = complete_node_set.union(node.get_all_family_nodes())\n\n self._node_set = complete_node_set\n",
"def _write_job_file(self):\n \"\"\"\n \"\"\"\n log.debug('writing dag file \"%s\" in \"%s\".', self.dag_file, self._cwd)\n self._make_dir(self.initial_dir)\n dag_file = self._open(self.dag_file, 'w')\n dag_file.write(self.__str__())\n dag_file.close()\n for node in self._node_set:\n node.job._remote = self._remote\n node.job._remote_id = self._remote_id\n node.job._cwd = self._cwd\n node.job._write_job_file()\n"
] | class Workflow(HTCondorObjectBase):
"""
"""
def __init__(self,
name,
config,
max_jobs,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.'):
self._name = name
self._config = config
self._max_jobs = max_jobs
self._dag_file = ""
self._node_set = set()
super(Workflow, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
def __str__(self):
"""
"""
result = []
if self.config:
result.append('CONFIG {0}\n'.format(self.config))
self.complete_node_set()
list_functions = Node.all_list_functions()
options = ['']*len(list_functions)
for node in self._node_set:
for i, list_function_name in enumerate(list_functions):
list_function = getattr(node, list_function_name)
options[i] += list_function()
result.extend(options)
if self.max_jobs:
max_jobs_list = ''
for category, max_jobs in self.max_jobs.items():
max_jobs_list += 'MAXJOBS {0} {1}\n'.format(category, str(max_jobs))
result.append(max_jobs_list)
return '\n'.join(result)
def __repr__(self):
"""
"""
return '<DAG: %s>' % (self.name,)
@property
def name(self):
"""
"""
return self._name
@property
def num_jobs(self):
return len(self._node_set)
@property
def config(self):
"""
"""
return self._config
@config.setter
def config(self, config):
if os.path.exists(config):
self._config = config
@property
def max_jobs(self):
return self._max_jobs
def add_max_jobs_throttle(self, category, max_jobs):
"""
:param category:
:param max_jobs:
:return:
"""
self.max_jobs[category] = max_jobs
@property
def node_set(self):
"""
"""
if self.cluster_id != self.NULL_CLUSTER_ID:
self.update_node_ids()
return self._node_set
@property
def dag_file(self):
"""
"""
return '%s.dag' % (self.name,)
@property
def initial_dir(self):
"""
"""
return ''
@property
def status(self):
"""
Returns status of workflow as a whole (DAG status).
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def statuses(self):
"""
Get status of workflow nodes.
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_statuses()
def _update_status(self, sub_job_num=None):
"""Gets the workflow status.
Return:
str: The current status of the workflow.
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '').split('\n')
status_code = 0
for status_code_str in out:
try:
status_code = int(status_code_str.strip())
except:
pass
log.info('Job %s status: %d', job_id, status_code)
key = CONDOR_JOB_STATUSES[status_code]
return key
def _update_statuses(self, sub_job_num=None):
"""
Update statuses of jobs nodes in workflow.
"""
# initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for node in self.node_set:
job = node.job
try:
job_status = job.status
status_dict[job_status] += 1
except (KeyError, HTCondorError):
status_dict['Unexpanded'] += 1
return status_dict
def update_node_ids(self, sub_job_num=None):
"""
Associate Jobs with respective cluster ids.
"""
# Build condor_q and condor_history commands
dag_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
job_delimiter = '+++'
attr_delimiter = ';;;'
format = [
'-format', '"%d' + attr_delimiter + '"', 'ClusterId',
'-format', '"%v' + attr_delimiter + '"', 'Cmd',
'-format', '"%v' + attr_delimiter + '"', 'Args', # Old way
'-format', '"%v' + job_delimiter + '"', 'Arguments' # New way
]
# Get ID, Executable, and Arguments for each job that is either started to be processed or finished in the workflow
cmd = 'condor_q -constraint DAGManJobID=={0} {1} && condor_history -constraint DAGManJobID=={0} {1}'.format(dag_id, ' '.join(format))
# 'condor_q -constraint DAGManJobID==1018 -format "%d\n" ClusterId -format "%s\n" CMD -format "%s\n" ARGS && condor_history -constraint DAGManJobID==1018 -format "%d\n" ClusterId -format "%s\n" CMD -format "%s\n" ARGS'
_args = [cmd]
out, err = self._execute(_args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while associating ids for jobs dag %s: %s', dag_id, err)
raise HTCondorError(err)
if not out:
log.warning('Error while associating ids for jobs in dag %s: No jobs found for dag.', dag_id)
try:
# Split into one line per job
jobs_out = out.split(job_delimiter)
# Match node to cluster id using combination of cmd and arguments
for node in self._node_set:
job = node.job
# Skip jobs that already have cluster id defined
if job.cluster_id != job.NULL_CLUSTER_ID:
continue
for job_out in jobs_out:
if not job_out or attr_delimiter not in job_out:
continue
# Split line by attributes
cluster_id, cmd, _args, _arguments = job_out.split(attr_delimiter)
# If new form of arguments is used, _args will be 'undefined' and _arguments will not
if _args == 'undefined' and _arguments != 'undefined':
args = _arguments.strip()
# If both are undefined, then there are no arguments
elif _args == 'undefined' and _arguments == 'undefined':
args = None
# Otherwise, using old form and _arguments will be 'undefined' and _args will not.
else:
args = _args.strip()
job_cmd = job.executable
job_args = job.arguments.strip() if job.arguments else None
if job_cmd in cmd and job_args == args:
log.info('Linking cluster_id %s to job with command and arguments: %s %s', cluster_id,
job_cmd, job_args)
job._cluster_id = int(cluster_id)
break
except ValueError as e:
log.warning(str(e))
def add_node(self, node):
"""
"""
assert isinstance(node, Node)
self._node_set.add(node)
def add_job(self, job):
"""
:param job:
:return:
"""
node = Node(job)
self.add_node(node)
return node
def wait(self, options=[]):
"""
:return:
"""
args = ['condor_wait']
args.extend(options)
args.append('%s.dagman.log' % (self.dag_file))
return self._execute(args)
def complete_node_set(self):
"""
"""
complete_node_set = set()
for node in self.node_set:
complete_node_set.add(node)
complete_node_set = complete_node_set.union(node.get_all_family_nodes())
self._node_set = complete_node_set
def _write_job_file(self):
"""
"""
log.debug('writing dag file "%s" in "%s".', self.dag_file, self._cwd)
self._make_dir(self.initial_dir)
dag_file = self._open(self.dag_file, 'w')
dag_file.write(self.__str__())
dag_file.close()
for node in self._node_set:
node.job._remote = self._remote
node.job._remote_id = self._remote_id
node.job._cwd = self._cwd
node.job._write_job_file()
|
tethysplatform/condorpy | condorpy/job.py | Job.status | python | def status(self):
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
status_dict = self.statuses
# determine job status
status = "Various"
for key, val in status_dict.items():
if val == self.num_jobs:
status = key
return status | The status | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L147-L160 | null | class Job(HTCondorObjectBase):
"""Represents a HTCondor job and the submit description file.
This class provides an object model representation for a computing job on HTCondor. It offers a wrapper for the
command line interface of HTCondor for interacting with the job. Jobs may be submitted to a local installation
of HTCondor or to a remote instance through ssh. An instance of this class will have certain pre-defined attributes
which are defined below. Any other attribute may be defined and is equivalent to calling the set method. This is
for ease in assigning attributes to the submit description file. For more information on valid attributes of the
submit description file see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
For more information about HTCondor see: http://research.cs.wisc.edu/htcondor/
Args:
name (str): A name to represent the job. This will be used to name the submit description file.
attributes (dict, optional): A dictionary of attributes that will be assigned to the submit description
file. Defaults to None.
executable (str, optional): The path to the executable file for the job. This path can be absolute or
relative to the current working directory when the code is executed. When submitting to a remote
scheduler the executable will be copied into the current working directory of the remote machine. This
is a shortcut to setting the 'Executable' attribute in the submit description file. Defaults to None.
arguments (str, optional): A space or comma delimited list of arguments for the executable. This is a
shortcut to setting the 'Arguments' attribute in the submit description file. Defaults to None.
num_jobs (int, optional): The number of sub-jobs that will be queued by this job. This is the argument
of the 'Queue' attribute in the submit description file. It can also be set when calling the submit
method. Defaults to 1.
host (str, optional): The host name of a remote server running an HTCondor scheduler daemon. Defaults to
None.
username (str, optional): The username for logging in to 'host'. Defaults to None.
password (str, optional): The password for 'username' when logging in to 'host'. Defaults to None.
private_key (str, optional): The path to a private key file providing passwordless ssh on 'host'.
Defaults to None.
private_key_pass (str, optional): The passphrase for the 'private_key' if required.
remote_input_files (list, optional): A list of files to be copied to a remote server for remote job submission.
working_directory (str, optional): The file path where execute calls should be run from.
"""
def __init__(self,
name,
attributes=None,
num_jobs=1,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.',
**kwargs):
object.__setattr__(self, '_name', name)
if attributes:
assert isinstance(attributes, dict)
object.__setattr__(self, '_attributes', OrderedDict())
object.__setattr__(self, '_num_jobs', int(num_jobs))
object.__setattr__(self, '_job_file', '')
super(Job, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
attributes = attributes or OrderedDict()
attributes['job_name'] = name
attributes.update(kwargs)
for attr, value in list(attributes.items()):
self.set(attr, value)
def __str__(self):
return '\n'.join(self._list_attributes()) + '\n\nqueue %d\n' % (self.num_jobs)
def __repr__(self):
return '<Job: name=%s, num_jobs=%d, cluster_id=%s>' % (self.name, self.num_jobs, self.cluster_id)
def __copy__(self):
copy = Job(self.name)
copy.__dict__.update(self.__dict__)
return copy
def __deepcopy__(self, memo):
from copy import deepcopy
copy = self.__copy__()
copy._attributes = deepcopy(self.attributes, memo)
return copy
def __getattr__(self, item):
"""
A shortcut for the 'get' method.
Args:
item (str): The name of the attribute to get.
Returns:
The value assigned to 'item' if defined. Otherwise None.
"""
return self.get(item)
def __setattr__(self, key, value):
"""
A shortcut for the 'set' method.
Args:
key (str): The name of the attribute to set.
value (str): The value to assign to 'key'.
"""
if key in self.__dict__ or '_' + key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.set(key, value)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attributes(self):
return self._attributes
@property
def num_jobs(self):
return self._num_jobs
@num_jobs.setter
def num_jobs(self, num_jobs):
self._num_jobs = int(num_jobs)
@property
@property
def statuses(self):
"""
Return dictionary of all process statuses
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def job_file(self):
"""The path to the submit description file representing this job.
"""
job_file_name = '%s.job' % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file
@property
def log_file(self):
"""The path to the log file for this job.
"""
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log'))
@property
def initial_dir(self):
"""The initial directory defined for the job.
All input files, and log files are relative to this directory. Output files will be copied into this
directory by default. This directory will be created if it doesn't already exist when the job is submitted.
Note:
The executable file is defined relative to the current working directory, NOT to the initial directory.
The initial directory is created in the current working directory.
"""
initial_dir = self.get('initialdir')
if not initial_dir:
initial_dir = os.curdir #TODO does this conflict with the working directory?
if self._remote and os.path.isabs(initial_dir):
raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler')
return initial_dir
def submit(self, queue=None, options=[]):
"""Submits the job either locally or to a remote server if it is defined.
Args:
queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of
this object. Defaults to None, meaning the value of num_jobs will be used.
options (list of str, optional): A list of command line options for the condor_submit command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
Defaults to an empty list.
"""
if not self.executable:
log.error('Job %s was submitted with no executable', self.name)
raise NoExecutable('You cannot submit a job without an executable')
self._num_jobs = queue or self.num_jobs
self._write_job_file()
args = ['condor_submit']
args.extend(options)
args.append(self.job_file)
log.info('Submitting job %s with options: %s', self.name, args)
return super(Job, self).submit(args)
def edit(self):
"""Interface for CLI edit command.
Note:
This method is not implemented.
"""
raise NotImplementedError("This method is not yet implemented")
def wait(self, options=[], sub_job_num=None):
"""Wait for the job, or a sub-job to complete.
Args:
options (list of str, optional): A list of command line options for the condor_wait command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html.
Defaults to an empty list.
job_num (int, optional): The number
"""
args = ['condor_wait']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
if self._remote:
abs_log_file = self.log_file
else:
abs_log_file = os.path.abspath(self.log_file)
args.extend([abs_log_file, job_id])
out, err = self._execute(args)
return out, err
def get(self, attr, value=None, resolve=True):
"""Get the value of an attribute from submit description file.
Args:
attr (str): The name of the attribute whose value should be returned.
value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.
resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If
False then return the raw value of 'attr'. Defaults to True.
Returns:
str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.
"""
try:
if resolve:
value = self._resolve_attribute(attr)
else:
value = self.attributes[attr]
except KeyError:
pass
return value
def set(self, attr, value):
"""Set the value of an attribute in the submit description file.
The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean).
The Python values will be reformatted into strings based on the standards described in
the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html
Args:
attr (str): The name of the attribute to set.
value (str): The value to assign to 'attr'.
"""
def escape_new_syntax(value, double_quote_escape='"'):
value = str(value)
value = value.replace("'", "''")
value = value.replace('"', '%s"' % double_quote_escape)
if ' ' in value or '\t' in value:
value = "'%s'" % value
return value
def escape_new_syntax_pre_post_script(value):
return escape_new_syntax(value, '\\')
def escape_remap(value):
value = value.replace('=', '\=')
value = value.replace(';', '\;')
return value
def join_function_template(join_string, escape_func):
return lambda value: join_string.join([escape_func(i) for i in value])
def quote_join_function_template(join_string, escape_func):
return lambda value: join_function_template(join_string, escape_func)(value)
join_functions = {'rempas': quote_join_function_template('; ', escape_remap),
'arguments': quote_join_function_template(' ', escape_new_syntax),
'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script)
}
if value is False:
value = 'false'
elif value is True:
value = 'true'
elif isinstance(value, list) or isinstance(value, tuple):
join_function = join_function_template(', ', str)
for key in list(join_functions.keys()):
if attr.endswith(key):
join_function = join_functions[key]
value = join_function(value)
self.attributes[attr] = value
def delete(self, attr):
"""Delete an attribute from the submit description file
Args:
attr (str): The name of the attribute to delete.
"""
self.attributes.pop(attr)
def _update_status(self, sub_job_num=None):
"""Gets the job status.
Return:
str: The current status of the job
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict
def _list_attributes(self):
attribute_list = []
for k, v in list(self.attributes.items()):
if v:
attribute_list.append(k + ' = ' + str(v))
return attribute_list
def _write_job_file(self):
self._make_job_dirs()
job_file = self._open(self.job_file, 'w')
job_file.write(self.__str__())
job_file.close()
if self._remote:
self._copy_input_files_to_remote()
def _make_job_dirs(self):
self._make_dir(self.initial_dir)
log_dir = self.get('logdir')
if log_dir:
self._make_dir(os.path.join(self.initial_dir, log_dir))
def _resolve_attribute(self, attribute):
"""Recursively replaces references to other attributes with their value.
Args:
attribute (str): The name of the attribute to resolve.
Returns:
str: The resolved value of 'attribute'.
"""
value = self.attributes[attribute]
if not value:
return None
resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value)
return resolved_value
def _resolve_attribute_match(self, match):
"""Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute.
"""
if match.group(1) == 'cluster':
return str(self.cluster_id)
return self.get(match.group(1), match.group(0))
|
tethysplatform/condorpy | condorpy/job.py | Job.job_file | python | def job_file(self):
job_file_name = '%s.job' % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file | The path to the submit description file representing this job. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L173-L180 | null | class Job(HTCondorObjectBase):
"""Represents a HTCondor job and the submit description file.
This class provides an object model representation for a computing job on HTCondor. It offers a wrapper for the
command line interface of HTCondor for interacting with the job. Jobs may be submitted to a local installation
of HTCondor or to a remote instance through ssh. An instance of this class will have certain pre-defined attributes
which are defined below. Any other attribute may be defined and is equivalent to calling the set method. This is
for ease in assigning attributes to the submit description file. For more information on valid attributes of the
submit description file see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
For more information about HTCondor see: http://research.cs.wisc.edu/htcondor/
Args:
name (str): A name to represent the job. This will be used to name the submit description file.
attributes (dict, optional): A dictionary of attributes that will be assigned to the submit description
file. Defaults to None.
executable (str, optional): The path to the executable file for the job. This path can be absolute or
relative to the current working directory when the code is executed. When submitting to a remote
scheduler the executable will be copied into the current working directory of the remote machine. This
is a shortcut to setting the 'Executable' attribute in the submit description file. Defaults to None.
arguments (str, optional): A space or comma delimited list of arguments for the executable. This is a
shortcut to setting the 'Arguments' attribute in the submit description file. Defaults to None.
num_jobs (int, optional): The number of sub-jobs that will be queued by this job. This is the argument
of the 'Queue' attribute in the submit description file. It can also be set when calling the submit
method. Defaults to 1.
host (str, optional): The host name of a remote server running an HTCondor scheduler daemon. Defaults to
None.
username (str, optional): The username for logging in to 'host'. Defaults to None.
password (str, optional): The password for 'username' when logging in to 'host'. Defaults to None.
private_key (str, optional): The path to a private key file providing passwordless ssh on 'host'.
Defaults to None.
private_key_pass (str, optional): The passphrase for the 'private_key' if required.
remote_input_files (list, optional): A list of files to be copied to a remote server for remote job submission.
working_directory (str, optional): The file path where execute calls should be run from.
"""
def __init__(self,
name,
attributes=None,
num_jobs=1,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.',
**kwargs):
object.__setattr__(self, '_name', name)
if attributes:
assert isinstance(attributes, dict)
object.__setattr__(self, '_attributes', OrderedDict())
object.__setattr__(self, '_num_jobs', int(num_jobs))
object.__setattr__(self, '_job_file', '')
super(Job, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
attributes = attributes or OrderedDict()
attributes['job_name'] = name
attributes.update(kwargs)
for attr, value in list(attributes.items()):
self.set(attr, value)
def __str__(self):
return '\n'.join(self._list_attributes()) + '\n\nqueue %d\n' % (self.num_jobs)
def __repr__(self):
return '<Job: name=%s, num_jobs=%d, cluster_id=%s>' % (self.name, self.num_jobs, self.cluster_id)
def __copy__(self):
copy = Job(self.name)
copy.__dict__.update(self.__dict__)
return copy
def __deepcopy__(self, memo):
from copy import deepcopy
copy = self.__copy__()
copy._attributes = deepcopy(self.attributes, memo)
return copy
def __getattr__(self, item):
"""
A shortcut for the 'get' method.
Args:
item (str): The name of the attribute to get.
Returns:
The value assigned to 'item' if defined. Otherwise None.
"""
return self.get(item)
def __setattr__(self, key, value):
"""
A shortcut for the 'set' method.
Args:
key (str): The name of the attribute to set.
value (str): The value to assign to 'key'.
"""
if key in self.__dict__ or '_' + key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.set(key, value)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attributes(self):
return self._attributes
@property
def num_jobs(self):
return self._num_jobs
@num_jobs.setter
def num_jobs(self, num_jobs):
self._num_jobs = int(num_jobs)
@property
def status(self):
"""The status
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
status_dict = self.statuses
# determine job status
status = "Various"
for key, val in status_dict.items():
if val == self.num_jobs:
status = key
return status
@property
def statuses(self):
"""
Return dictionary of all process statuses
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
@property
def log_file(self):
"""The path to the log file for this job.
"""
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log'))
@property
def initial_dir(self):
"""The initial directory defined for the job.
All input files, and log files are relative to this directory. Output files will be copied into this
directory by default. This directory will be created if it doesn't already exist when the job is submitted.
Note:
The executable file is defined relative to the current working directory, NOT to the initial directory.
The initial directory is created in the current working directory.
"""
initial_dir = self.get('initialdir')
if not initial_dir:
initial_dir = os.curdir #TODO does this conflict with the working directory?
if self._remote and os.path.isabs(initial_dir):
raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler')
return initial_dir
def submit(self, queue=None, options=[]):
"""Submits the job either locally or to a remote server if it is defined.
Args:
queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of
this object. Defaults to None, meaning the value of num_jobs will be used.
options (list of str, optional): A list of command line options for the condor_submit command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
Defaults to an empty list.
"""
if not self.executable:
log.error('Job %s was submitted with no executable', self.name)
raise NoExecutable('You cannot submit a job without an executable')
self._num_jobs = queue or self.num_jobs
self._write_job_file()
args = ['condor_submit']
args.extend(options)
args.append(self.job_file)
log.info('Submitting job %s with options: %s', self.name, args)
return super(Job, self).submit(args)
def edit(self):
"""Interface for CLI edit command.
Note:
This method is not implemented.
"""
raise NotImplementedError("This method is not yet implemented")
def wait(self, options=[], sub_job_num=None):
"""Wait for the job, or a sub-job to complete.
Args:
options (list of str, optional): A list of command line options for the condor_wait command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html.
Defaults to an empty list.
job_num (int, optional): The number
"""
args = ['condor_wait']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
if self._remote:
abs_log_file = self.log_file
else:
abs_log_file = os.path.abspath(self.log_file)
args.extend([abs_log_file, job_id])
out, err = self._execute(args)
return out, err
def get(self, attr, value=None, resolve=True):
"""Get the value of an attribute from submit description file.
Args:
attr (str): The name of the attribute whose value should be returned.
value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.
resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If
False then return the raw value of 'attr'. Defaults to True.
Returns:
str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.
"""
try:
if resolve:
value = self._resolve_attribute(attr)
else:
value = self.attributes[attr]
except KeyError:
pass
return value
def set(self, attr, value):
"""Set the value of an attribute in the submit description file.
The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean).
The Python values will be reformatted into strings based on the standards described in
the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html
Args:
attr (str): The name of the attribute to set.
value (str): The value to assign to 'attr'.
"""
def escape_new_syntax(value, double_quote_escape='"'):
value = str(value)
value = value.replace("'", "''")
value = value.replace('"', '%s"' % double_quote_escape)
if ' ' in value or '\t' in value:
value = "'%s'" % value
return value
def escape_new_syntax_pre_post_script(value):
return escape_new_syntax(value, '\\')
def escape_remap(value):
value = value.replace('=', '\=')
value = value.replace(';', '\;')
return value
def join_function_template(join_string, escape_func):
return lambda value: join_string.join([escape_func(i) for i in value])
def quote_join_function_template(join_string, escape_func):
return lambda value: join_function_template(join_string, escape_func)(value)
join_functions = {'rempas': quote_join_function_template('; ', escape_remap),
'arguments': quote_join_function_template(' ', escape_new_syntax),
'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script)
}
if value is False:
value = 'false'
elif value is True:
value = 'true'
elif isinstance(value, list) or isinstance(value, tuple):
join_function = join_function_template(', ', str)
for key in list(join_functions.keys()):
if attr.endswith(key):
join_function = join_functions[key]
value = join_function(value)
self.attributes[attr] = value
def delete(self, attr):
"""Delete an attribute from the submit description file
Args:
attr (str): The name of the attribute to delete.
"""
self.attributes.pop(attr)
def _update_status(self, sub_job_num=None):
"""Gets the job status.
Return:
str: The current status of the job
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict
def _list_attributes(self):
attribute_list = []
for k, v in list(self.attributes.items()):
if v:
attribute_list.append(k + ' = ' + str(v))
return attribute_list
def _write_job_file(self):
self._make_job_dirs()
job_file = self._open(self.job_file, 'w')
job_file.write(self.__str__())
job_file.close()
if self._remote:
self._copy_input_files_to_remote()
def _make_job_dirs(self):
self._make_dir(self.initial_dir)
log_dir = self.get('logdir')
if log_dir:
self._make_dir(os.path.join(self.initial_dir, log_dir))
def _resolve_attribute(self, attribute):
"""Recursively replaces references to other attributes with their value.
Args:
attribute (str): The name of the attribute to resolve.
Returns:
str: The resolved value of 'attribute'.
"""
value = self.attributes[attribute]
if not value:
return None
resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value)
return resolved_value
def _resolve_attribute_match(self, match):
"""Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute.
"""
if match.group(1) == 'cluster':
return str(self.cluster_id)
return self.get(match.group(1), match.group(0))
|
tethysplatform/condorpy | condorpy/job.py | Job.log_file | python | def log_file(self):
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log')) | The path to the log file for this job. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L183-L191 | [
"def get(self, attr, value=None, resolve=True):\n \"\"\"Get the value of an attribute from submit description file.\n\n Args:\n attr (str): The name of the attribute whose value should be returned.\n value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.\n resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If\n False then return the raw value of 'attr'. Defaults to True.\n\n Returns:\n str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.\n \"\"\"\n try:\n if resolve:\n value = self._resolve_attribute(attr)\n else:\n value = self.attributes[attr]\n except KeyError:\n pass\n return value\n",
"def set(self, attr, value):\n \"\"\"Set the value of an attribute in the submit description file.\n\n The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean).\n The Python values will be reformatted into strings based on the standards described in\n the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html\n\n Args:\n attr (str): The name of the attribute to set.\n value (str): The value to assign to 'attr'.\n\n \"\"\"\n\n def escape_new_syntax(value, double_quote_escape='\"'):\n value = str(value)\n value = value.replace(\"'\", \"''\")\n value = value.replace('\"', '%s\"' % double_quote_escape)\n if ' ' in value or '\\t' in value:\n value = \"'%s'\" % value\n return value\n\n def escape_new_syntax_pre_post_script(value):\n return escape_new_syntax(value, '\\\\')\n\n def escape_remap(value):\n value = value.replace('=', '\\=')\n value = value.replace(';', '\\;')\n return value\n\n def join_function_template(join_string, escape_func):\n return lambda value: join_string.join([escape_func(i) for i in value])\n\n def quote_join_function_template(join_string, escape_func):\n return lambda value: join_function_template(join_string, escape_func)(value)\n\n join_functions = {'rempas': quote_join_function_template('; ', escape_remap),\n 'arguments': quote_join_function_template(' ', escape_new_syntax),\n 'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script)\n }\n\n if value is False:\n value = 'false'\n elif value is True:\n value = 'true'\n elif isinstance(value, list) or isinstance(value, tuple):\n join_function = join_function_template(', ', str)\n for key in list(join_functions.keys()):\n if attr.endswith(key):\n join_function = join_functions[key]\n value = join_function(value)\n\n self.attributes[attr] = value\n"
] | class Job(HTCondorObjectBase):
"""Represents a HTCondor job and the submit description file.
This class provides an object model representation for a computing job on HTCondor. It offers a wrapper for the
command line interface of HTCondor for interacting with the job. Jobs may be submitted to a local installation
of HTCondor or to a remote instance through ssh. An instance of this class will have certain pre-defined attributes
which are defined below. Any other attribute may be defined and is equivalent to calling the set method. This is
for ease in assigning attributes to the submit description file. For more information on valid attributes of the
submit description file see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
For more information about HTCondor see: http://research.cs.wisc.edu/htcondor/
Args:
name (str): A name to represent the job. This will be used to name the submit description file.
attributes (dict, optional): A dictionary of attributes that will be assigned to the submit description
file. Defaults to None.
executable (str, optional): The path to the executable file for the job. This path can be absolute or
relative to the current working directory when the code is executed. When submitting to a remote
scheduler the executable will be copied into the current working directory of the remote machine. This
is a shortcut to setting the 'Executable' attribute in the submit description file. Defaults to None.
arguments (str, optional): A space or comma delimited list of arguments for the executable. This is a
shortcut to setting the 'Arguments' attribute in the submit description file. Defaults to None.
num_jobs (int, optional): The number of sub-jobs that will be queued by this job. This is the argument
of the 'Queue' attribute in the submit description file. It can also be set when calling the submit
method. Defaults to 1.
host (str, optional): The host name of a remote server running an HTCondor scheduler daemon. Defaults to
None.
username (str, optional): The username for logging in to 'host'. Defaults to None.
password (str, optional): The password for 'username' when logging in to 'host'. Defaults to None.
private_key (str, optional): The path to a private key file providing passwordless ssh on 'host'.
Defaults to None.
private_key_pass (str, optional): The passphrase for the 'private_key' if required.
remote_input_files (list, optional): A list of files to be copied to a remote server for remote job submission.
working_directory (str, optional): The file path where execute calls should be run from.
"""
def __init__(self,
name,
attributes=None,
num_jobs=1,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.',
**kwargs):
object.__setattr__(self, '_name', name)
if attributes:
assert isinstance(attributes, dict)
object.__setattr__(self, '_attributes', OrderedDict())
object.__setattr__(self, '_num_jobs', int(num_jobs))
object.__setattr__(self, '_job_file', '')
super(Job, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
attributes = attributes or OrderedDict()
attributes['job_name'] = name
attributes.update(kwargs)
for attr, value in list(attributes.items()):
self.set(attr, value)
def __str__(self):
return '\n'.join(self._list_attributes()) + '\n\nqueue %d\n' % (self.num_jobs)
def __repr__(self):
return '<Job: name=%s, num_jobs=%d, cluster_id=%s>' % (self.name, self.num_jobs, self.cluster_id)
def __copy__(self):
copy = Job(self.name)
copy.__dict__.update(self.__dict__)
return copy
def __deepcopy__(self, memo):
from copy import deepcopy
copy = self.__copy__()
copy._attributes = deepcopy(self.attributes, memo)
return copy
def __getattr__(self, item):
"""
A shortcut for the 'get' method.
Args:
item (str): The name of the attribute to get.
Returns:
The value assigned to 'item' if defined. Otherwise None.
"""
return self.get(item)
def __setattr__(self, key, value):
"""
A shortcut for the 'set' method.
Args:
key (str): The name of the attribute to set.
value (str): The value to assign to 'key'.
"""
if key in self.__dict__ or '_' + key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.set(key, value)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attributes(self):
return self._attributes
@property
def num_jobs(self):
return self._num_jobs
@num_jobs.setter
def num_jobs(self, num_jobs):
self._num_jobs = int(num_jobs)
@property
def status(self):
"""The status
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
status_dict = self.statuses
# determine job status
status = "Various"
for key, val in status_dict.items():
if val == self.num_jobs:
status = key
return status
@property
def statuses(self):
"""
Return dictionary of all process statuses
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def job_file(self):
"""The path to the submit description file representing this job.
"""
job_file_name = '%s.job' % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file
@property
@property
def initial_dir(self):
"""The initial directory defined for the job.
All input files, and log files are relative to this directory. Output files will be copied into this
directory by default. This directory will be created if it doesn't already exist when the job is submitted.
Note:
The executable file is defined relative to the current working directory, NOT to the initial directory.
The initial directory is created in the current working directory.
"""
initial_dir = self.get('initialdir')
if not initial_dir:
initial_dir = os.curdir #TODO does this conflict with the working directory?
if self._remote and os.path.isabs(initial_dir):
raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler')
return initial_dir
def submit(self, queue=None, options=[]):
"""Submits the job either locally or to a remote server if it is defined.
Args:
queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of
this object. Defaults to None, meaning the value of num_jobs will be used.
options (list of str, optional): A list of command line options for the condor_submit command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
Defaults to an empty list.
"""
if not self.executable:
log.error('Job %s was submitted with no executable', self.name)
raise NoExecutable('You cannot submit a job without an executable')
self._num_jobs = queue or self.num_jobs
self._write_job_file()
args = ['condor_submit']
args.extend(options)
args.append(self.job_file)
log.info('Submitting job %s with options: %s', self.name, args)
return super(Job, self).submit(args)
def edit(self):
"""Interface for CLI edit command.
Note:
This method is not implemented.
"""
raise NotImplementedError("This method is not yet implemented")
def wait(self, options=[], sub_job_num=None):
"""Wait for the job, or a sub-job to complete.
Args:
options (list of str, optional): A list of command line options for the condor_wait command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html.
Defaults to an empty list.
job_num (int, optional): The number
"""
args = ['condor_wait']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
if self._remote:
abs_log_file = self.log_file
else:
abs_log_file = os.path.abspath(self.log_file)
args.extend([abs_log_file, job_id])
out, err = self._execute(args)
return out, err
def get(self, attr, value=None, resolve=True):
"""Get the value of an attribute from submit description file.
Args:
attr (str): The name of the attribute whose value should be returned.
value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.
resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If
False then return the raw value of 'attr'. Defaults to True.
Returns:
str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.
"""
try:
if resolve:
value = self._resolve_attribute(attr)
else:
value = self.attributes[attr]
except KeyError:
pass
return value
def set(self, attr, value):
"""Set the value of an attribute in the submit description file.
The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean).
The Python values will be reformatted into strings based on the standards described in
the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html
Args:
attr (str): The name of the attribute to set.
value (str): The value to assign to 'attr'.
"""
def escape_new_syntax(value, double_quote_escape='"'):
value = str(value)
value = value.replace("'", "''")
value = value.replace('"', '%s"' % double_quote_escape)
if ' ' in value or '\t' in value:
value = "'%s'" % value
return value
def escape_new_syntax_pre_post_script(value):
return escape_new_syntax(value, '\\')
def escape_remap(value):
value = value.replace('=', '\=')
value = value.replace(';', '\;')
return value
def join_function_template(join_string, escape_func):
return lambda value: join_string.join([escape_func(i) for i in value])
def quote_join_function_template(join_string, escape_func):
return lambda value: join_function_template(join_string, escape_func)(value)
join_functions = {'rempas': quote_join_function_template('; ', escape_remap),
'arguments': quote_join_function_template(' ', escape_new_syntax),
'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script)
}
if value is False:
value = 'false'
elif value is True:
value = 'true'
elif isinstance(value, list) or isinstance(value, tuple):
join_function = join_function_template(', ', str)
for key in list(join_functions.keys()):
if attr.endswith(key):
join_function = join_functions[key]
value = join_function(value)
self.attributes[attr] = value
def delete(self, attr):
"""Delete an attribute from the submit description file
Args:
attr (str): The name of the attribute to delete.
"""
self.attributes.pop(attr)
def _update_status(self, sub_job_num=None):
"""Gets the job status.
Return:
str: The current status of the job
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict
def _list_attributes(self):
attribute_list = []
for k, v in list(self.attributes.items()):
if v:
attribute_list.append(k + ' = ' + str(v))
return attribute_list
def _write_job_file(self):
self._make_job_dirs()
job_file = self._open(self.job_file, 'w')
job_file.write(self.__str__())
job_file.close()
if self._remote:
self._copy_input_files_to_remote()
def _make_job_dirs(self):
self._make_dir(self.initial_dir)
log_dir = self.get('logdir')
if log_dir:
self._make_dir(os.path.join(self.initial_dir, log_dir))
def _resolve_attribute(self, attribute):
"""Recursively replaces references to other attributes with their value.
Args:
attribute (str): The name of the attribute to resolve.
Returns:
str: The resolved value of 'attribute'.
"""
value = self.attributes[attribute]
if not value:
return None
resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value)
return resolved_value
def _resolve_attribute_match(self, match):
"""Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute.
"""
if match.group(1) == 'cluster':
return str(self.cluster_id)
return self.get(match.group(1), match.group(0))
|
tethysplatform/condorpy | condorpy/job.py | Job.initial_dir | python | def initial_dir(self):
initial_dir = self.get('initialdir')
if not initial_dir:
initial_dir = os.curdir #TODO does this conflict with the working directory?
if self._remote and os.path.isabs(initial_dir):
raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler')
return initial_dir | The initial directory defined for the job.
All input files, and log files are relative to this directory. Output files will be copied into this
directory by default. This directory will be created if it doesn't already exist when the job is submitted.
Note:
The executable file is defined relative to the current working directory, NOT to the initial directory.
The initial directory is created in the current working directory. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L194-L210 | [
"def get(self, attr, value=None, resolve=True):\n \"\"\"Get the value of an attribute from submit description file.\n\n Args:\n attr (str): The name of the attribute whose value should be returned.\n value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.\n resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If\n False then return the raw value of 'attr'. Defaults to True.\n\n Returns:\n str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.\n \"\"\"\n try:\n if resolve:\n value = self._resolve_attribute(attr)\n else:\n value = self.attributes[attr]\n except KeyError:\n pass\n return value\n"
] | class Job(HTCondorObjectBase):
"""Represents a HTCondor job and the submit description file.
This class provides an object model representation for a computing job on HTCondor. It offers a wrapper for the
command line interface of HTCondor for interacting with the job. Jobs may be submitted to a local installation
of HTCondor or to a remote instance through ssh. An instance of this class will have certain pre-defined attributes
which are defined below. Any other attribute may be defined and is equivalent to calling the set method. This is
for ease in assigning attributes to the submit description file. For more information on valid attributes of the
submit description file see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
For more information about HTCondor see: http://research.cs.wisc.edu/htcondor/
Args:
name (str): A name to represent the job. This will be used to name the submit description file.
attributes (dict, optional): A dictionary of attributes that will be assigned to the submit description
file. Defaults to None.
executable (str, optional): The path to the executable file for the job. This path can be absolute or
relative to the current working directory when the code is executed. When submitting to a remote
scheduler the executable will be copied into the current working directory of the remote machine. This
is a shortcut to setting the 'Executable' attribute in the submit description file. Defaults to None.
arguments (str, optional): A space or comma delimited list of arguments for the executable. This is a
shortcut to setting the 'Arguments' attribute in the submit description file. Defaults to None.
num_jobs (int, optional): The number of sub-jobs that will be queued by this job. This is the argument
of the 'Queue' attribute in the submit description file. It can also be set when calling the submit
method. Defaults to 1.
host (str, optional): The host name of a remote server running an HTCondor scheduler daemon. Defaults to
None.
username (str, optional): The username for logging in to 'host'. Defaults to None.
password (str, optional): The password for 'username' when logging in to 'host'. Defaults to None.
private_key (str, optional): The path to a private key file providing passwordless ssh on 'host'.
Defaults to None.
private_key_pass (str, optional): The passphrase for the 'private_key' if required.
remote_input_files (list, optional): A list of files to be copied to a remote server for remote job submission.
working_directory (str, optional): The file path where execute calls should be run from.
"""
def __init__(self,
name,
attributes=None,
num_jobs=1,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.',
**kwargs):
object.__setattr__(self, '_name', name)
if attributes:
assert isinstance(attributes, dict)
object.__setattr__(self, '_attributes', OrderedDict())
object.__setattr__(self, '_num_jobs', int(num_jobs))
object.__setattr__(self, '_job_file', '')
super(Job, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
attributes = attributes or OrderedDict()
attributes['job_name'] = name
attributes.update(kwargs)
for attr, value in list(attributes.items()):
self.set(attr, value)
def __str__(self):
return '\n'.join(self._list_attributes()) + '\n\nqueue %d\n' % (self.num_jobs)
def __repr__(self):
return '<Job: name=%s, num_jobs=%d, cluster_id=%s>' % (self.name, self.num_jobs, self.cluster_id)
def __copy__(self):
copy = Job(self.name)
copy.__dict__.update(self.__dict__)
return copy
def __deepcopy__(self, memo):
from copy import deepcopy
copy = self.__copy__()
copy._attributes = deepcopy(self.attributes, memo)
return copy
def __getattr__(self, item):
"""
A shortcut for the 'get' method.
Args:
item (str): The name of the attribute to get.
Returns:
The value assigned to 'item' if defined. Otherwise None.
"""
return self.get(item)
def __setattr__(self, key, value):
"""
A shortcut for the 'set' method.
Args:
key (str): The name of the attribute to set.
value (str): The value to assign to 'key'.
"""
if key in self.__dict__ or '_' + key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.set(key, value)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attributes(self):
return self._attributes
@property
def num_jobs(self):
return self._num_jobs
@num_jobs.setter
def num_jobs(self, num_jobs):
self._num_jobs = int(num_jobs)
@property
def status(self):
"""The status
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
status_dict = self.statuses
# determine job status
status = "Various"
for key, val in status_dict.items():
if val == self.num_jobs:
status = key
return status
@property
def statuses(self):
"""
Return dictionary of all process statuses
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def job_file(self):
"""The path to the submit description file representing this job.
"""
job_file_name = '%s.job' % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file
@property
def log_file(self):
"""The path to the log file for this job.
"""
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log'))
@property
def submit(self, queue=None, options=[]):
"""Submits the job either locally or to a remote server if it is defined.
Args:
queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of
this object. Defaults to None, meaning the value of num_jobs will be used.
options (list of str, optional): A list of command line options for the condor_submit command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
Defaults to an empty list.
"""
if not self.executable:
log.error('Job %s was submitted with no executable', self.name)
raise NoExecutable('You cannot submit a job without an executable')
self._num_jobs = queue or self.num_jobs
self._write_job_file()
args = ['condor_submit']
args.extend(options)
args.append(self.job_file)
log.info('Submitting job %s with options: %s', self.name, args)
return super(Job, self).submit(args)
def edit(self):
"""Interface for CLI edit command.
Note:
This method is not implemented.
"""
raise NotImplementedError("This method is not yet implemented")
def wait(self, options=[], sub_job_num=None):
"""Wait for the job, or a sub-job to complete.
Args:
options (list of str, optional): A list of command line options for the condor_wait command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html.
Defaults to an empty list.
job_num (int, optional): The number
"""
args = ['condor_wait']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
if self._remote:
abs_log_file = self.log_file
else:
abs_log_file = os.path.abspath(self.log_file)
args.extend([abs_log_file, job_id])
out, err = self._execute(args)
return out, err
def get(self, attr, value=None, resolve=True):
"""Get the value of an attribute from submit description file.
Args:
attr (str): The name of the attribute whose value should be returned.
value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.
resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If
False then return the raw value of 'attr'. Defaults to True.
Returns:
str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.
"""
try:
if resolve:
value = self._resolve_attribute(attr)
else:
value = self.attributes[attr]
except KeyError:
pass
return value
def set(self, attr, value):
"""Set the value of an attribute in the submit description file.
The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean).
The Python values will be reformatted into strings based on the standards described in
the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html
Args:
attr (str): The name of the attribute to set.
value (str): The value to assign to 'attr'.
"""
def escape_new_syntax(value, double_quote_escape='"'):
value = str(value)
value = value.replace("'", "''")
value = value.replace('"', '%s"' % double_quote_escape)
if ' ' in value or '\t' in value:
value = "'%s'" % value
return value
def escape_new_syntax_pre_post_script(value):
return escape_new_syntax(value, '\\')
def escape_remap(value):
value = value.replace('=', '\=')
value = value.replace(';', '\;')
return value
def join_function_template(join_string, escape_func):
return lambda value: join_string.join([escape_func(i) for i in value])
def quote_join_function_template(join_string, escape_func):
return lambda value: join_function_template(join_string, escape_func)(value)
join_functions = {'rempas': quote_join_function_template('; ', escape_remap),
'arguments': quote_join_function_template(' ', escape_new_syntax),
'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script)
}
if value is False:
value = 'false'
elif value is True:
value = 'true'
elif isinstance(value, list) or isinstance(value, tuple):
join_function = join_function_template(', ', str)
for key in list(join_functions.keys()):
if attr.endswith(key):
join_function = join_functions[key]
value = join_function(value)
self.attributes[attr] = value
def delete(self, attr):
"""Delete an attribute from the submit description file
Args:
attr (str): The name of the attribute to delete.
"""
self.attributes.pop(attr)
def _update_status(self, sub_job_num=None):
"""Gets the job status.
Return:
str: The current status of the job
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict
def _list_attributes(self):
attribute_list = []
for k, v in list(self.attributes.items()):
if v:
attribute_list.append(k + ' = ' + str(v))
return attribute_list
def _write_job_file(self):
self._make_job_dirs()
job_file = self._open(self.job_file, 'w')
job_file.write(self.__str__())
job_file.close()
if self._remote:
self._copy_input_files_to_remote()
def _make_job_dirs(self):
self._make_dir(self.initial_dir)
log_dir = self.get('logdir')
if log_dir:
self._make_dir(os.path.join(self.initial_dir, log_dir))
def _resolve_attribute(self, attribute):
"""Recursively replaces references to other attributes with their value.
Args:
attribute (str): The name of the attribute to resolve.
Returns:
str: The resolved value of 'attribute'.
"""
value = self.attributes[attribute]
if not value:
return None
resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value)
return resolved_value
def _resolve_attribute_match(self, match):
"""Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute.
"""
if match.group(1) == 'cluster':
return str(self.cluster_id)
return self.get(match.group(1), match.group(0))
|
tethysplatform/condorpy | condorpy/job.py | Job.submit | python | def submit(self, queue=None, options=[]):
if not self.executable:
log.error('Job %s was submitted with no executable', self.name)
raise NoExecutable('You cannot submit a job without an executable')
self._num_jobs = queue or self.num_jobs
self._write_job_file()
args = ['condor_submit']
args.extend(options)
args.append(self.job_file)
log.info('Submitting job %s with options: %s', self.name, args)
return super(Job, self).submit(args) | Submits the job either locally or to a remote server if it is defined.
Args:
queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of
this object. Defaults to None, meaning the value of num_jobs will be used.
options (list of str, optional): A list of command line options for the condor_submit command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
Defaults to an empty list. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L212-L236 | [
"def submit(self, args):\n \"\"\"\n\n\n \"\"\"\n out, err = self._execute(args)\n if err:\n if re.match('WARNING|Renaming', err):\n log.warning(err)\n else:\n raise HTCondorError(err)\n log.info(out)\n try:\n self._cluster_id = int(re.search('(?<=cluster |\\*\\* Proc )(\\d*)', out).group(1))\n except:\n self._cluster_id = -1\n return self.cluster_id\n",
"def _write_job_file(self):\n self._make_job_dirs()\n job_file = self._open(self.job_file, 'w')\n job_file.write(self.__str__())\n job_file.close()\n if self._remote:\n self._copy_input_files_to_remote()\n"
] | class Job(HTCondorObjectBase):
"""Represents a HTCondor job and the submit description file.
This class provides an object model representation for a computing job on HTCondor. It offers a wrapper for the
command line interface of HTCondor for interacting with the job. Jobs may be submitted to a local installation
of HTCondor or to a remote instance through ssh. An instance of this class will have certain pre-defined attributes
which are defined below. Any other attribute may be defined and is equivalent to calling the set method. This is
for ease in assigning attributes to the submit description file. For more information on valid attributes of the
submit description file see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
For more information about HTCondor see: http://research.cs.wisc.edu/htcondor/
Args:
name (str): A name to represent the job. This will be used to name the submit description file.
attributes (dict, optional): A dictionary of attributes that will be assigned to the submit description
file. Defaults to None.
executable (str, optional): The path to the executable file for the job. This path can be absolute or
relative to the current working directory when the code is executed. When submitting to a remote
scheduler the executable will be copied into the current working directory of the remote machine. This
is a shortcut to setting the 'Executable' attribute in the submit description file. Defaults to None.
arguments (str, optional): A space or comma delimited list of arguments for the executable. This is a
shortcut to setting the 'Arguments' attribute in the submit description file. Defaults to None.
num_jobs (int, optional): The number of sub-jobs that will be queued by this job. This is the argument
of the 'Queue' attribute in the submit description file. It can also be set when calling the submit
method. Defaults to 1.
host (str, optional): The host name of a remote server running an HTCondor scheduler daemon. Defaults to
None.
username (str, optional): The username for logging in to 'host'. Defaults to None.
password (str, optional): The password for 'username' when logging in to 'host'. Defaults to None.
private_key (str, optional): The path to a private key file providing passwordless ssh on 'host'.
Defaults to None.
private_key_pass (str, optional): The passphrase for the 'private_key' if required.
remote_input_files (list, optional): A list of files to be copied to a remote server for remote job submission.
working_directory (str, optional): The file path where execute calls should be run from.
"""
def __init__(self,
name,
attributes=None,
num_jobs=1,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.',
**kwargs):
object.__setattr__(self, '_name', name)
if attributes:
assert isinstance(attributes, dict)
object.__setattr__(self, '_attributes', OrderedDict())
object.__setattr__(self, '_num_jobs', int(num_jobs))
object.__setattr__(self, '_job_file', '')
super(Job, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
attributes = attributes or OrderedDict()
attributes['job_name'] = name
attributes.update(kwargs)
for attr, value in list(attributes.items()):
self.set(attr, value)
def __str__(self):
return '\n'.join(self._list_attributes()) + '\n\nqueue %d\n' % (self.num_jobs)
def __repr__(self):
return '<Job: name=%s, num_jobs=%d, cluster_id=%s>' % (self.name, self.num_jobs, self.cluster_id)
def __copy__(self):
copy = Job(self.name)
copy.__dict__.update(self.__dict__)
return copy
def __deepcopy__(self, memo):
from copy import deepcopy
copy = self.__copy__()
copy._attributes = deepcopy(self.attributes, memo)
return copy
def __getattr__(self, item):
"""
A shortcut for the 'get' method.
Args:
item (str): The name of the attribute to get.
Returns:
The value assigned to 'item' if defined. Otherwise None.
"""
return self.get(item)
def __setattr__(self, key, value):
"""
A shortcut for the 'set' method.
Args:
key (str): The name of the attribute to set.
value (str): The value to assign to 'key'.
"""
if key in self.__dict__ or '_' + key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.set(key, value)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attributes(self):
return self._attributes
@property
def num_jobs(self):
return self._num_jobs
@num_jobs.setter
def num_jobs(self, num_jobs):
self._num_jobs = int(num_jobs)
@property
def status(self):
"""The status
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
status_dict = self.statuses
# determine job status
status = "Various"
for key, val in status_dict.items():
if val == self.num_jobs:
status = key
return status
@property
def statuses(self):
"""
Return dictionary of all process statuses
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def job_file(self):
"""The path to the submit description file representing this job.
"""
job_file_name = '%s.job' % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file
@property
def log_file(self):
"""The path to the log file for this job.
"""
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log'))
@property
def initial_dir(self):
"""The initial directory defined for the job.
All input files, and log files are relative to this directory. Output files will be copied into this
directory by default. This directory will be created if it doesn't already exist when the job is submitted.
Note:
The executable file is defined relative to the current working directory, NOT to the initial directory.
The initial directory is created in the current working directory.
"""
initial_dir = self.get('initialdir')
if not initial_dir:
initial_dir = os.curdir #TODO does this conflict with the working directory?
if self._remote and os.path.isabs(initial_dir):
raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler')
return initial_dir
def edit(self):
"""Interface for CLI edit command.
Note:
This method is not implemented.
"""
raise NotImplementedError("This method is not yet implemented")
def wait(self, options=[], sub_job_num=None):
"""Wait for the job, or a sub-job to complete.
Args:
options (list of str, optional): A list of command line options for the condor_wait command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html.
Defaults to an empty list.
job_num (int, optional): The number
"""
args = ['condor_wait']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
if self._remote:
abs_log_file = self.log_file
else:
abs_log_file = os.path.abspath(self.log_file)
args.extend([abs_log_file, job_id])
out, err = self._execute(args)
return out, err
def get(self, attr, value=None, resolve=True):
"""Get the value of an attribute from submit description file.
Args:
attr (str): The name of the attribute whose value should be returned.
value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.
resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If
False then return the raw value of 'attr'. Defaults to True.
Returns:
str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.
"""
try:
if resolve:
value = self._resolve_attribute(attr)
else:
value = self.attributes[attr]
except KeyError:
pass
return value
def set(self, attr, value):
"""Set the value of an attribute in the submit description file.
The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean).
The Python values will be reformatted into strings based on the standards described in
the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html
Args:
attr (str): The name of the attribute to set.
value (str): The value to assign to 'attr'.
"""
def escape_new_syntax(value, double_quote_escape='"'):
value = str(value)
value = value.replace("'", "''")
value = value.replace('"', '%s"' % double_quote_escape)
if ' ' in value or '\t' in value:
value = "'%s'" % value
return value
def escape_new_syntax_pre_post_script(value):
return escape_new_syntax(value, '\\')
def escape_remap(value):
value = value.replace('=', '\=')
value = value.replace(';', '\;')
return value
def join_function_template(join_string, escape_func):
return lambda value: join_string.join([escape_func(i) for i in value])
def quote_join_function_template(join_string, escape_func):
return lambda value: join_function_template(join_string, escape_func)(value)
join_functions = {'rempas': quote_join_function_template('; ', escape_remap),
'arguments': quote_join_function_template(' ', escape_new_syntax),
'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script)
}
if value is False:
value = 'false'
elif value is True:
value = 'true'
elif isinstance(value, list) or isinstance(value, tuple):
join_function = join_function_template(', ', str)
for key in list(join_functions.keys()):
if attr.endswith(key):
join_function = join_functions[key]
value = join_function(value)
self.attributes[attr] = value
def delete(self, attr):
"""Delete an attribute from the submit description file
Args:
attr (str): The name of the attribute to delete.
"""
self.attributes.pop(attr)
def _update_status(self, sub_job_num=None):
"""Gets the job status.
Return:
str: The current status of the job
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict
def _list_attributes(self):
attribute_list = []
for k, v in list(self.attributes.items()):
if v:
attribute_list.append(k + ' = ' + str(v))
return attribute_list
def _write_job_file(self):
self._make_job_dirs()
job_file = self._open(self.job_file, 'w')
job_file.write(self.__str__())
job_file.close()
if self._remote:
self._copy_input_files_to_remote()
def _make_job_dirs(self):
self._make_dir(self.initial_dir)
log_dir = self.get('logdir')
if log_dir:
self._make_dir(os.path.join(self.initial_dir, log_dir))
def _resolve_attribute(self, attribute):
"""Recursively replaces references to other attributes with their value.
Args:
attribute (str): The name of the attribute to resolve.
Returns:
str: The resolved value of 'attribute'.
"""
value = self.attributes[attribute]
if not value:
return None
resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value)
return resolved_value
def _resolve_attribute_match(self, match):
"""Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute.
"""
if match.group(1) == 'cluster':
return str(self.cluster_id)
return self.get(match.group(1), match.group(0))
|
tethysplatform/condorpy | condorpy/job.py | Job.wait | python | def wait(self, options=[], sub_job_num=None):
args = ['condor_wait']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
if self._remote:
abs_log_file = self.log_file
else:
abs_log_file = os.path.abspath(self.log_file)
args.extend([abs_log_file, job_id])
out, err = self._execute(args)
return out, err | Wait for the job, or a sub-job to complete.
Args:
options (list of str, optional): A list of command line options for the condor_wait command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html.
Defaults to an empty list.
job_num (int, optional): The number | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L247-L265 | [
"def wrapped(self, *args, **kwargs):\n log.info('Calling function: %s with args=%s', fn, args if args else [])\n cwd = os.getcwd()\n log.info('Saved cwd: %s', cwd)\n os.chdir(self._cwd)\n log.info('Changing working directory to: %s', self._cwd)\n try:\n return fn(self, *args, **kwargs)\n finally:\n os.chdir(cwd)\n log.info('Restored working directory to: %s', cwd)\n"
] | class Job(HTCondorObjectBase):
"""Represents a HTCondor job and the submit description file.
This class provides an object model representation for a computing job on HTCondor. It offers a wrapper for the
command line interface of HTCondor for interacting with the job. Jobs may be submitted to a local installation
of HTCondor or to a remote instance through ssh. An instance of this class will have certain pre-defined attributes
which are defined below. Any other attribute may be defined and is equivalent to calling the set method. This is
for ease in assigning attributes to the submit description file. For more information on valid attributes of the
submit description file see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
For more information about HTCondor see: http://research.cs.wisc.edu/htcondor/
Args:
name (str): A name to represent the job. This will be used to name the submit description file.
attributes (dict, optional): A dictionary of attributes that will be assigned to the submit description
file. Defaults to None.
executable (str, optional): The path to the executable file for the job. This path can be absolute or
relative to the current working directory when the code is executed. When submitting to a remote
scheduler the executable will be copied into the current working directory of the remote machine. This
is a shortcut to setting the 'Executable' attribute in the submit description file. Defaults to None.
arguments (str, optional): A space or comma delimited list of arguments for the executable. This is a
shortcut to setting the 'Arguments' attribute in the submit description file. Defaults to None.
num_jobs (int, optional): The number of sub-jobs that will be queued by this job. This is the argument
of the 'Queue' attribute in the submit description file. It can also be set when calling the submit
method. Defaults to 1.
host (str, optional): The host name of a remote server running an HTCondor scheduler daemon. Defaults to
None.
username (str, optional): The username for logging in to 'host'. Defaults to None.
password (str, optional): The password for 'username' when logging in to 'host'. Defaults to None.
private_key (str, optional): The path to a private key file providing passwordless ssh on 'host'.
Defaults to None.
private_key_pass (str, optional): The passphrase for the 'private_key' if required.
remote_input_files (list, optional): A list of files to be copied to a remote server for remote job submission.
working_directory (str, optional): The file path where execute calls should be run from.
"""
def __init__(self,
name,
attributes=None,
num_jobs=1,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.',
**kwargs):
object.__setattr__(self, '_name', name)
if attributes:
assert isinstance(attributes, dict)
object.__setattr__(self, '_attributes', OrderedDict())
object.__setattr__(self, '_num_jobs', int(num_jobs))
object.__setattr__(self, '_job_file', '')
super(Job, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
attributes = attributes or OrderedDict()
attributes['job_name'] = name
attributes.update(kwargs)
for attr, value in list(attributes.items()):
self.set(attr, value)
def __str__(self):
return '\n'.join(self._list_attributes()) + '\n\nqueue %d\n' % (self.num_jobs)
def __repr__(self):
return '<Job: name=%s, num_jobs=%d, cluster_id=%s>' % (self.name, self.num_jobs, self.cluster_id)
def __copy__(self):
copy = Job(self.name)
copy.__dict__.update(self.__dict__)
return copy
def __deepcopy__(self, memo):
from copy import deepcopy
copy = self.__copy__()
copy._attributes = deepcopy(self.attributes, memo)
return copy
def __getattr__(self, item):
"""
A shortcut for the 'get' method.
Args:
item (str): The name of the attribute to get.
Returns:
The value assigned to 'item' if defined. Otherwise None.
"""
return self.get(item)
def __setattr__(self, key, value):
"""
A shortcut for the 'set' method.
Args:
key (str): The name of the attribute to set.
value (str): The value to assign to 'key'.
"""
if key in self.__dict__ or '_' + key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.set(key, value)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attributes(self):
return self._attributes
@property
def num_jobs(self):
return self._num_jobs
@num_jobs.setter
def num_jobs(self, num_jobs):
self._num_jobs = int(num_jobs)
@property
def status(self):
"""The status
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
status_dict = self.statuses
# determine job status
status = "Various"
for key, val in status_dict.items():
if val == self.num_jobs:
status = key
return status
@property
def statuses(self):
"""
Return dictionary of all process statuses
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def job_file(self):
"""The path to the submit description file representing this job.
"""
job_file_name = '%s.job' % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file
@property
def log_file(self):
"""The path to the log file for this job.
"""
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log'))
@property
def initial_dir(self):
"""The initial directory defined for the job.
All input files, and log files are relative to this directory. Output files will be copied into this
directory by default. This directory will be created if it doesn't already exist when the job is submitted.
Note:
The executable file is defined relative to the current working directory, NOT to the initial directory.
The initial directory is created in the current working directory.
"""
initial_dir = self.get('initialdir')
if not initial_dir:
initial_dir = os.curdir #TODO does this conflict with the working directory?
if self._remote and os.path.isabs(initial_dir):
raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler')
return initial_dir
def submit(self, queue=None, options=[]):
"""Submits the job either locally or to a remote server if it is defined.
Args:
queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of
this object. Defaults to None, meaning the value of num_jobs will be used.
options (list of str, optional): A list of command line options for the condor_submit command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
Defaults to an empty list.
"""
if not self.executable:
log.error('Job %s was submitted with no executable', self.name)
raise NoExecutable('You cannot submit a job without an executable')
self._num_jobs = queue or self.num_jobs
self._write_job_file()
args = ['condor_submit']
args.extend(options)
args.append(self.job_file)
log.info('Submitting job %s with options: %s', self.name, args)
return super(Job, self).submit(args)
def edit(self):
"""Interface for CLI edit command.
Note:
This method is not implemented.
"""
raise NotImplementedError("This method is not yet implemented")
def get(self, attr, value=None, resolve=True):
"""Get the value of an attribute from submit description file.
Args:
attr (str): The name of the attribute whose value should be returned.
value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.
resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If
False then return the raw value of 'attr'. Defaults to True.
Returns:
str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.
"""
try:
if resolve:
value = self._resolve_attribute(attr)
else:
value = self.attributes[attr]
except KeyError:
pass
return value
def set(self, attr, value):
"""Set the value of an attribute in the submit description file.
The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean).
The Python values will be reformatted into strings based on the standards described in
the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html
Args:
attr (str): The name of the attribute to set.
value (str): The value to assign to 'attr'.
"""
def escape_new_syntax(value, double_quote_escape='"'):
value = str(value)
value = value.replace("'", "''")
value = value.replace('"', '%s"' % double_quote_escape)
if ' ' in value or '\t' in value:
value = "'%s'" % value
return value
def escape_new_syntax_pre_post_script(value):
return escape_new_syntax(value, '\\')
def escape_remap(value):
value = value.replace('=', '\=')
value = value.replace(';', '\;')
return value
def join_function_template(join_string, escape_func):
return lambda value: join_string.join([escape_func(i) for i in value])
def quote_join_function_template(join_string, escape_func):
return lambda value: join_function_template(join_string, escape_func)(value)
join_functions = {'rempas': quote_join_function_template('; ', escape_remap),
'arguments': quote_join_function_template(' ', escape_new_syntax),
'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script)
}
if value is False:
value = 'false'
elif value is True:
value = 'true'
elif isinstance(value, list) or isinstance(value, tuple):
join_function = join_function_template(', ', str)
for key in list(join_functions.keys()):
if attr.endswith(key):
join_function = join_functions[key]
value = join_function(value)
self.attributes[attr] = value
def delete(self, attr):
"""Delete an attribute from the submit description file
Args:
attr (str): The name of the attribute to delete.
"""
self.attributes.pop(attr)
def _update_status(self, sub_job_num=None):
"""Gets the job status.
Return:
str: The current status of the job
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict
def _list_attributes(self):
attribute_list = []
for k, v in list(self.attributes.items()):
if v:
attribute_list.append(k + ' = ' + str(v))
return attribute_list
def _write_job_file(self):
self._make_job_dirs()
job_file = self._open(self.job_file, 'w')
job_file.write(self.__str__())
job_file.close()
if self._remote:
self._copy_input_files_to_remote()
def _make_job_dirs(self):
self._make_dir(self.initial_dir)
log_dir = self.get('logdir')
if log_dir:
self._make_dir(os.path.join(self.initial_dir, log_dir))
def _resolve_attribute(self, attribute):
"""Recursively replaces references to other attributes with their value.
Args:
attribute (str): The name of the attribute to resolve.
Returns:
str: The resolved value of 'attribute'.
"""
value = self.attributes[attribute]
if not value:
return None
resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value)
return resolved_value
def _resolve_attribute_match(self, match):
"""Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute.
"""
if match.group(1) == 'cluster':
return str(self.cluster_id)
return self.get(match.group(1), match.group(0))
|
tethysplatform/condorpy | condorpy/job.py | Job.get | python | def get(self, attr, value=None, resolve=True):
try:
if resolve:
value = self._resolve_attribute(attr)
else:
value = self.attributes[attr]
except KeyError:
pass
return value | Get the value of an attribute from submit description file.
Args:
attr (str): The name of the attribute whose value should be returned.
value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.
resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If
False then return the raw value of 'attr'. Defaults to True.
Returns:
str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L267-L286 | [
"def _resolve_attribute(self, attribute):\n \"\"\"Recursively replaces references to other attributes with their value.\n\n Args:\n attribute (str): The name of the attribute to resolve.\n\n Returns:\n str: The resolved value of 'attribute'.\n\n \"\"\"\n value = self.attributes[attribute]\n if not value:\n return None\n resolved_value = re.sub('\\$\\((.*?)\\)',self._resolve_attribute_match, value)\n return resolved_value\n"
] | class Job(HTCondorObjectBase):
"""Represents a HTCondor job and the submit description file.
This class provides an object model representation for a computing job on HTCondor. It offers a wrapper for the
command line interface of HTCondor for interacting with the job. Jobs may be submitted to a local installation
of HTCondor or to a remote instance through ssh. An instance of this class will have certain pre-defined attributes
which are defined below. Any other attribute may be defined and is equivalent to calling the set method. This is
for ease in assigning attributes to the submit description file. For more information on valid attributes of the
submit description file see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
For more information about HTCondor see: http://research.cs.wisc.edu/htcondor/
Args:
name (str): A name to represent the job. This will be used to name the submit description file.
attributes (dict, optional): A dictionary of attributes that will be assigned to the submit description
file. Defaults to None.
executable (str, optional): The path to the executable file for the job. This path can be absolute or
relative to the current working directory when the code is executed. When submitting to a remote
scheduler the executable will be copied into the current working directory of the remote machine. This
is a shortcut to setting the 'Executable' attribute in the submit description file. Defaults to None.
arguments (str, optional): A space or comma delimited list of arguments for the executable. This is a
shortcut to setting the 'Arguments' attribute in the submit description file. Defaults to None.
num_jobs (int, optional): The number of sub-jobs that will be queued by this job. This is the argument
of the 'Queue' attribute in the submit description file. It can also be set when calling the submit
method. Defaults to 1.
host (str, optional): The host name of a remote server running an HTCondor scheduler daemon. Defaults to
None.
username (str, optional): The username for logging in to 'host'. Defaults to None.
password (str, optional): The password for 'username' when logging in to 'host'. Defaults to None.
private_key (str, optional): The path to a private key file providing passwordless ssh on 'host'.
Defaults to None.
private_key_pass (str, optional): The passphrase for the 'private_key' if required.
remote_input_files (list, optional): A list of files to be copied to a remote server for remote job submission.
working_directory (str, optional): The file path where execute calls should be run from.
"""
def __init__(self,
name,
attributes=None,
num_jobs=1,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.',
**kwargs):
object.__setattr__(self, '_name', name)
if attributes:
assert isinstance(attributes, dict)
object.__setattr__(self, '_attributes', OrderedDict())
object.__setattr__(self, '_num_jobs', int(num_jobs))
object.__setattr__(self, '_job_file', '')
super(Job, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
attributes = attributes or OrderedDict()
attributes['job_name'] = name
attributes.update(kwargs)
for attr, value in list(attributes.items()):
self.set(attr, value)
def __str__(self):
return '\n'.join(self._list_attributes()) + '\n\nqueue %d\n' % (self.num_jobs)
def __repr__(self):
return '<Job: name=%s, num_jobs=%d, cluster_id=%s>' % (self.name, self.num_jobs, self.cluster_id)
def __copy__(self):
copy = Job(self.name)
copy.__dict__.update(self.__dict__)
return copy
def __deepcopy__(self, memo):
from copy import deepcopy
copy = self.__copy__()
copy._attributes = deepcopy(self.attributes, memo)
return copy
def __getattr__(self, item):
"""
A shortcut for the 'get' method.
Args:
item (str): The name of the attribute to get.
Returns:
The value assigned to 'item' if defined. Otherwise None.
"""
return self.get(item)
def __setattr__(self, key, value):
"""
A shortcut for the 'set' method.
Args:
key (str): The name of the attribute to set.
value (str): The value to assign to 'key'.
"""
if key in self.__dict__ or '_' + key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.set(key, value)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attributes(self):
return self._attributes
@property
def num_jobs(self):
return self._num_jobs
@num_jobs.setter
def num_jobs(self, num_jobs):
self._num_jobs = int(num_jobs)
@property
def status(self):
"""The status
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
status_dict = self.statuses
# determine job status
status = "Various"
for key, val in status_dict.items():
if val == self.num_jobs:
status = key
return status
@property
def statuses(self):
"""
Return dictionary of all process statuses
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def job_file(self):
"""The path to the submit description file representing this job.
"""
job_file_name = '%s.job' % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file
@property
def log_file(self):
"""The path to the log file for this job.
"""
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log'))
@property
def initial_dir(self):
"""The initial directory defined for the job.
All input files, and log files are relative to this directory. Output files will be copied into this
directory by default. This directory will be created if it doesn't already exist when the job is submitted.
Note:
The executable file is defined relative to the current working directory, NOT to the initial directory.
The initial directory is created in the current working directory.
"""
initial_dir = self.get('initialdir')
if not initial_dir:
initial_dir = os.curdir #TODO does this conflict with the working directory?
if self._remote and os.path.isabs(initial_dir):
raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler')
return initial_dir
def submit(self, queue=None, options=[]):
"""Submits the job either locally or to a remote server if it is defined.
Args:
queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of
this object. Defaults to None, meaning the value of num_jobs will be used.
options (list of str, optional): A list of command line options for the condor_submit command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
Defaults to an empty list.
"""
if not self.executable:
log.error('Job %s was submitted with no executable', self.name)
raise NoExecutable('You cannot submit a job without an executable')
self._num_jobs = queue or self.num_jobs
self._write_job_file()
args = ['condor_submit']
args.extend(options)
args.append(self.job_file)
log.info('Submitting job %s with options: %s', self.name, args)
return super(Job, self).submit(args)
def edit(self):
"""Interface for CLI edit command.
Note:
This method is not implemented.
"""
raise NotImplementedError("This method is not yet implemented")
def wait(self, options=[], sub_job_num=None):
"""Wait for the job, or a sub-job to complete.
Args:
options (list of str, optional): A list of command line options for the condor_wait command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html.
Defaults to an empty list.
job_num (int, optional): The number
"""
args = ['condor_wait']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
if self._remote:
abs_log_file = self.log_file
else:
abs_log_file = os.path.abspath(self.log_file)
args.extend([abs_log_file, job_id])
out, err = self._execute(args)
return out, err
def set(self, attr, value):
"""Set the value of an attribute in the submit description file.
The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean).
The Python values will be reformatted into strings based on the standards described in
the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html
Args:
attr (str): The name of the attribute to set.
value (str): The value to assign to 'attr'.
"""
def escape_new_syntax(value, double_quote_escape='"'):
value = str(value)
value = value.replace("'", "''")
value = value.replace('"', '%s"' % double_quote_escape)
if ' ' in value or '\t' in value:
value = "'%s'" % value
return value
def escape_new_syntax_pre_post_script(value):
return escape_new_syntax(value, '\\')
def escape_remap(value):
value = value.replace('=', '\=')
value = value.replace(';', '\;')
return value
def join_function_template(join_string, escape_func):
return lambda value: join_string.join([escape_func(i) for i in value])
def quote_join_function_template(join_string, escape_func):
return lambda value: join_function_template(join_string, escape_func)(value)
join_functions = {'rempas': quote_join_function_template('; ', escape_remap),
'arguments': quote_join_function_template(' ', escape_new_syntax),
'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script)
}
if value is False:
value = 'false'
elif value is True:
value = 'true'
elif isinstance(value, list) or isinstance(value, tuple):
join_function = join_function_template(', ', str)
for key in list(join_functions.keys()):
if attr.endswith(key):
join_function = join_functions[key]
value = join_function(value)
self.attributes[attr] = value
def delete(self, attr):
"""Delete an attribute from the submit description file
Args:
attr (str): The name of the attribute to delete.
"""
self.attributes.pop(attr)
def _update_status(self, sub_job_num=None):
"""Gets the job status.
Return:
str: The current status of the job
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict
def _list_attributes(self):
attribute_list = []
for k, v in list(self.attributes.items()):
if v:
attribute_list.append(k + ' = ' + str(v))
return attribute_list
def _write_job_file(self):
self._make_job_dirs()
job_file = self._open(self.job_file, 'w')
job_file.write(self.__str__())
job_file.close()
if self._remote:
self._copy_input_files_to_remote()
def _make_job_dirs(self):
self._make_dir(self.initial_dir)
log_dir = self.get('logdir')
if log_dir:
self._make_dir(os.path.join(self.initial_dir, log_dir))
def _resolve_attribute(self, attribute):
"""Recursively replaces references to other attributes with their value.
Args:
attribute (str): The name of the attribute to resolve.
Returns:
str: The resolved value of 'attribute'.
"""
value = self.attributes[attribute]
if not value:
return None
resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value)
return resolved_value
def _resolve_attribute_match(self, match):
"""Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute.
"""
if match.group(1) == 'cluster':
return str(self.cluster_id)
return self.get(match.group(1), match.group(0))
|
tethysplatform/condorpy | condorpy/job.py | Job.set | python | def set(self, attr, value):
def escape_new_syntax(value, double_quote_escape='"'):
value = str(value)
value = value.replace("'", "''")
value = value.replace('"', '%s"' % double_quote_escape)
if ' ' in value or '\t' in value:
value = "'%s'" % value
return value
def escape_new_syntax_pre_post_script(value):
return escape_new_syntax(value, '\\')
def escape_remap(value):
value = value.replace('=', '\=')
value = value.replace(';', '\;')
return value
def join_function_template(join_string, escape_func):
return lambda value: join_string.join([escape_func(i) for i in value])
def quote_join_function_template(join_string, escape_func):
return lambda value: join_function_template(join_string, escape_func)(value)
join_functions = {'rempas': quote_join_function_template('; ', escape_remap),
'arguments': quote_join_function_template(' ', escape_new_syntax),
'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script)
}
if value is False:
value = 'false'
elif value is True:
value = 'true'
elif isinstance(value, list) or isinstance(value, tuple):
join_function = join_function_template(', ', str)
for key in list(join_functions.keys()):
if attr.endswith(key):
join_function = join_functions[key]
value = join_function(value)
self.attributes[attr] = value | Set the value of an attribute in the submit description file.
The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean).
The Python values will be reformatted into strings based on the standards described in
the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html
Args:
attr (str): The name of the attribute to set.
value (str): The value to assign to 'attr'. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L288-L339 | [
"def join_function_template(join_string, escape_func):\n return lambda value: join_string.join([escape_func(i) for i in value])\n",
"def quote_join_function_template(join_string, escape_func):\n return lambda value: join_function_template(join_string, escape_func)(value)\n"
] | class Job(HTCondorObjectBase):
"""Represents a HTCondor job and the submit description file.
This class provides an object model representation for a computing job on HTCondor. It offers a wrapper for the
command line interface of HTCondor for interacting with the job. Jobs may be submitted to a local installation
of HTCondor or to a remote instance through ssh. An instance of this class will have certain pre-defined attributes
which are defined below. Any other attribute may be defined and is equivalent to calling the set method. This is
for ease in assigning attributes to the submit description file. For more information on valid attributes of the
submit description file see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
For more information about HTCondor see: http://research.cs.wisc.edu/htcondor/
Args:
name (str): A name to represent the job. This will be used to name the submit description file.
attributes (dict, optional): A dictionary of attributes that will be assigned to the submit description
file. Defaults to None.
executable (str, optional): The path to the executable file for the job. This path can be absolute or
relative to the current working directory when the code is executed. When submitting to a remote
scheduler the executable will be copied into the current working directory of the remote machine. This
is a shortcut to setting the 'Executable' attribute in the submit description file. Defaults to None.
arguments (str, optional): A space or comma delimited list of arguments for the executable. This is a
shortcut to setting the 'Arguments' attribute in the submit description file. Defaults to None.
num_jobs (int, optional): The number of sub-jobs that will be queued by this job. This is the argument
of the 'Queue' attribute in the submit description file. It can also be set when calling the submit
method. Defaults to 1.
host (str, optional): The host name of a remote server running an HTCondor scheduler daemon. Defaults to
None.
username (str, optional): The username for logging in to 'host'. Defaults to None.
password (str, optional): The password for 'username' when logging in to 'host'. Defaults to None.
private_key (str, optional): The path to a private key file providing passwordless ssh on 'host'.
Defaults to None.
private_key_pass (str, optional): The passphrase for the 'private_key' if required.
remote_input_files (list, optional): A list of files to be copied to a remote server for remote job submission.
working_directory (str, optional): The file path where execute calls should be run from.
"""
def __init__(self,
name,
attributes=None,
num_jobs=1,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.',
**kwargs):
object.__setattr__(self, '_name', name)
if attributes:
assert isinstance(attributes, dict)
object.__setattr__(self, '_attributes', OrderedDict())
object.__setattr__(self, '_num_jobs', int(num_jobs))
object.__setattr__(self, '_job_file', '')
super(Job, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
attributes = attributes or OrderedDict()
attributes['job_name'] = name
attributes.update(kwargs)
for attr, value in list(attributes.items()):
self.set(attr, value)
def __str__(self):
return '\n'.join(self._list_attributes()) + '\n\nqueue %d\n' % (self.num_jobs)
def __repr__(self):
return '<Job: name=%s, num_jobs=%d, cluster_id=%s>' % (self.name, self.num_jobs, self.cluster_id)
def __copy__(self):
copy = Job(self.name)
copy.__dict__.update(self.__dict__)
return copy
def __deepcopy__(self, memo):
from copy import deepcopy
copy = self.__copy__()
copy._attributes = deepcopy(self.attributes, memo)
return copy
def __getattr__(self, item):
"""
A shortcut for the 'get' method.
Args:
item (str): The name of the attribute to get.
Returns:
The value assigned to 'item' if defined. Otherwise None.
"""
return self.get(item)
def __setattr__(self, key, value):
"""
A shortcut for the 'set' method.
Args:
key (str): The name of the attribute to set.
value (str): The value to assign to 'key'.
"""
if key in self.__dict__ or '_' + key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.set(key, value)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attributes(self):
return self._attributes
@property
def num_jobs(self):
return self._num_jobs
@num_jobs.setter
def num_jobs(self, num_jobs):
self._num_jobs = int(num_jobs)
@property
def status(self):
"""The status
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
status_dict = self.statuses
# determine job status
status = "Various"
for key, val in status_dict.items():
if val == self.num_jobs:
status = key
return status
@property
def statuses(self):
"""
Return dictionary of all process statuses
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def job_file(self):
"""The path to the submit description file representing this job.
"""
job_file_name = '%s.job' % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file
@property
def log_file(self):
"""The path to the log file for this job.
"""
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log'))
@property
def initial_dir(self):
"""The initial directory defined for the job.
All input files, and log files are relative to this directory. Output files will be copied into this
directory by default. This directory will be created if it doesn't already exist when the job is submitted.
Note:
The executable file is defined relative to the current working directory, NOT to the initial directory.
The initial directory is created in the current working directory.
"""
initial_dir = self.get('initialdir')
if not initial_dir:
initial_dir = os.curdir #TODO does this conflict with the working directory?
if self._remote and os.path.isabs(initial_dir):
raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler')
return initial_dir
def submit(self, queue=None, options=[]):
"""Submits the job either locally or to a remote server if it is defined.
Args:
queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of
this object. Defaults to None, meaning the value of num_jobs will be used.
options (list of str, optional): A list of command line options for the condor_submit command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
Defaults to an empty list.
"""
if not self.executable:
log.error('Job %s was submitted with no executable', self.name)
raise NoExecutable('You cannot submit a job without an executable')
self._num_jobs = queue or self.num_jobs
self._write_job_file()
args = ['condor_submit']
args.extend(options)
args.append(self.job_file)
log.info('Submitting job %s with options: %s', self.name, args)
return super(Job, self).submit(args)
def edit(self):
"""Interface for CLI edit command.
Note:
This method is not implemented.
"""
raise NotImplementedError("This method is not yet implemented")
def wait(self, options=[], sub_job_num=None):
"""Wait for the job, or a sub-job to complete.
Args:
options (list of str, optional): A list of command line options for the condor_wait command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html.
Defaults to an empty list.
job_num (int, optional): The number
"""
args = ['condor_wait']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
if self._remote:
abs_log_file = self.log_file
else:
abs_log_file = os.path.abspath(self.log_file)
args.extend([abs_log_file, job_id])
out, err = self._execute(args)
return out, err
def get(self, attr, value=None, resolve=True):
"""Get the value of an attribute from submit description file.
Args:
attr (str): The name of the attribute whose value should be returned.
value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.
resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If
False then return the raw value of 'attr'. Defaults to True.
Returns:
str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.
"""
try:
if resolve:
value = self._resolve_attribute(attr)
else:
value = self.attributes[attr]
except KeyError:
pass
return value
def delete(self, attr):
"""Delete an attribute from the submit description file
Args:
attr (str): The name of the attribute to delete.
"""
self.attributes.pop(attr)
def _update_status(self, sub_job_num=None):
"""Gets the job status.
Return:
str: The current status of the job
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict
def _list_attributes(self):
attribute_list = []
for k, v in list(self.attributes.items()):
if v:
attribute_list.append(k + ' = ' + str(v))
return attribute_list
def _write_job_file(self):
self._make_job_dirs()
job_file = self._open(self.job_file, 'w')
job_file.write(self.__str__())
job_file.close()
if self._remote:
self._copy_input_files_to_remote()
def _make_job_dirs(self):
self._make_dir(self.initial_dir)
log_dir = self.get('logdir')
if log_dir:
self._make_dir(os.path.join(self.initial_dir, log_dir))
def _resolve_attribute(self, attribute):
"""Recursively replaces references to other attributes with their value.
Args:
attribute (str): The name of the attribute to resolve.
Returns:
str: The resolved value of 'attribute'.
"""
value = self.attributes[attribute]
if not value:
return None
resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value)
return resolved_value
def _resolve_attribute_match(self, match):
"""Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute.
"""
if match.group(1) == 'cluster':
return str(self.cluster_id)
return self.get(match.group(1), match.group(0))
|
tethysplatform/condorpy | condorpy/job.py | Job._update_status | python | def _update_status(self, sub_job_num=None):
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict | Gets the job status.
Return:
str: The current status of the job | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L350-L394 | [
"def wrapped(self, *args, **kwargs):\n log.info('Calling function: %s with args=%s', fn, args if args else [])\n cwd = os.getcwd()\n log.info('Saved cwd: %s', cwd)\n os.chdir(self._cwd)\n log.info('Changing working directory to: %s', self._cwd)\n try:\n return fn(self, *args, **kwargs)\n finally:\n os.chdir(cwd)\n log.info('Restored working directory to: %s', cwd)\n"
] | class Job(HTCondorObjectBase):
"""Represents a HTCondor job and the submit description file.
This class provides an object model representation for a computing job on HTCondor. It offers a wrapper for the
command line interface of HTCondor for interacting with the job. Jobs may be submitted to a local installation
of HTCondor or to a remote instance through ssh. An instance of this class will have certain pre-defined attributes
which are defined below. Any other attribute may be defined and is equivalent to calling the set method. This is
for ease in assigning attributes to the submit description file. For more information on valid attributes of the
submit description file see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
For more information about HTCondor see: http://research.cs.wisc.edu/htcondor/
Args:
name (str): A name to represent the job. This will be used to name the submit description file.
attributes (dict, optional): A dictionary of attributes that will be assigned to the submit description
file. Defaults to None.
executable (str, optional): The path to the executable file for the job. This path can be absolute or
relative to the current working directory when the code is executed. When submitting to a remote
scheduler the executable will be copied into the current working directory of the remote machine. This
is a shortcut to setting the 'Executable' attribute in the submit description file. Defaults to None.
arguments (str, optional): A space or comma delimited list of arguments for the executable. This is a
shortcut to setting the 'Arguments' attribute in the submit description file. Defaults to None.
num_jobs (int, optional): The number of sub-jobs that will be queued by this job. This is the argument
of the 'Queue' attribute in the submit description file. It can also be set when calling the submit
method. Defaults to 1.
host (str, optional): The host name of a remote server running an HTCondor scheduler daemon. Defaults to
None.
username (str, optional): The username for logging in to 'host'. Defaults to None.
password (str, optional): The password for 'username' when logging in to 'host'. Defaults to None.
private_key (str, optional): The path to a private key file providing passwordless ssh on 'host'.
Defaults to None.
private_key_pass (str, optional): The passphrase for the 'private_key' if required.
remote_input_files (list, optional): A list of files to be copied to a remote server for remote job submission.
working_directory (str, optional): The file path where execute calls should be run from.
"""
def __init__(self,
name,
attributes=None,
num_jobs=1,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.',
**kwargs):
object.__setattr__(self, '_name', name)
if attributes:
assert isinstance(attributes, dict)
object.__setattr__(self, '_attributes', OrderedDict())
object.__setattr__(self, '_num_jobs', int(num_jobs))
object.__setattr__(self, '_job_file', '')
super(Job, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
attributes = attributes or OrderedDict()
attributes['job_name'] = name
attributes.update(kwargs)
for attr, value in list(attributes.items()):
self.set(attr, value)
def __str__(self):
return '\n'.join(self._list_attributes()) + '\n\nqueue %d\n' % (self.num_jobs)
def __repr__(self):
return '<Job: name=%s, num_jobs=%d, cluster_id=%s>' % (self.name, self.num_jobs, self.cluster_id)
def __copy__(self):
copy = Job(self.name)
copy.__dict__.update(self.__dict__)
return copy
def __deepcopy__(self, memo):
from copy import deepcopy
copy = self.__copy__()
copy._attributes = deepcopy(self.attributes, memo)
return copy
def __getattr__(self, item):
"""
A shortcut for the 'get' method.
Args:
item (str): The name of the attribute to get.
Returns:
The value assigned to 'item' if defined. Otherwise None.
"""
return self.get(item)
def __setattr__(self, key, value):
"""
A shortcut for the 'set' method.
Args:
key (str): The name of the attribute to set.
value (str): The value to assign to 'key'.
"""
if key in self.__dict__ or '_' + key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.set(key, value)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attributes(self):
return self._attributes
@property
def num_jobs(self):
return self._num_jobs
@num_jobs.setter
def num_jobs(self, num_jobs):
self._num_jobs = int(num_jobs)
@property
def status(self):
"""The status
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
status_dict = self.statuses
# determine job status
status = "Various"
for key, val in status_dict.items():
if val == self.num_jobs:
status = key
return status
@property
def statuses(self):
"""
Return dictionary of all process statuses
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def job_file(self):
"""The path to the submit description file representing this job.
"""
job_file_name = '%s.job' % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file
@property
def log_file(self):
"""The path to the log file for this job.
"""
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log'))
@property
def initial_dir(self):
"""The initial directory defined for the job.
All input files, and log files are relative to this directory. Output files will be copied into this
directory by default. This directory will be created if it doesn't already exist when the job is submitted.
Note:
The executable file is defined relative to the current working directory, NOT to the initial directory.
The initial directory is created in the current working directory.
"""
initial_dir = self.get('initialdir')
if not initial_dir:
initial_dir = os.curdir #TODO does this conflict with the working directory?
if self._remote and os.path.isabs(initial_dir):
raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler')
return initial_dir
def submit(self, queue=None, options=[]):
"""Submits the job either locally or to a remote server if it is defined.
Args:
queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of
this object. Defaults to None, meaning the value of num_jobs will be used.
options (list of str, optional): A list of command line options for the condor_submit command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
Defaults to an empty list.
"""
if not self.executable:
log.error('Job %s was submitted with no executable', self.name)
raise NoExecutable('You cannot submit a job without an executable')
self._num_jobs = queue or self.num_jobs
self._write_job_file()
args = ['condor_submit']
args.extend(options)
args.append(self.job_file)
log.info('Submitting job %s with options: %s', self.name, args)
return super(Job, self).submit(args)
def edit(self):
"""Interface for CLI edit command.
Note:
This method is not implemented.
"""
raise NotImplementedError("This method is not yet implemented")
def wait(self, options=[], sub_job_num=None):
"""Wait for the job, or a sub-job to complete.
Args:
options (list of str, optional): A list of command line options for the condor_wait command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html.
Defaults to an empty list.
job_num (int, optional): The number
"""
args = ['condor_wait']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
if self._remote:
abs_log_file = self.log_file
else:
abs_log_file = os.path.abspath(self.log_file)
args.extend([abs_log_file, job_id])
out, err = self._execute(args)
return out, err
def get(self, attr, value=None, resolve=True):
"""Get the value of an attribute from submit description file.
Args:
attr (str): The name of the attribute whose value should be returned.
value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.
resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If
False then return the raw value of 'attr'. Defaults to True.
Returns:
str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.
"""
try:
if resolve:
value = self._resolve_attribute(attr)
else:
value = self.attributes[attr]
except KeyError:
pass
return value
def set(self, attr, value):
"""Set the value of an attribute in the submit description file.
The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean).
The Python values will be reformatted into strings based on the standards described in
the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html
Args:
attr (str): The name of the attribute to set.
value (str): The value to assign to 'attr'.
"""
def escape_new_syntax(value, double_quote_escape='"'):
value = str(value)
value = value.replace("'", "''")
value = value.replace('"', '%s"' % double_quote_escape)
if ' ' in value or '\t' in value:
value = "'%s'" % value
return value
def escape_new_syntax_pre_post_script(value):
return escape_new_syntax(value, '\\')
def escape_remap(value):
value = value.replace('=', '\=')
value = value.replace(';', '\;')
return value
def join_function_template(join_string, escape_func):
return lambda value: join_string.join([escape_func(i) for i in value])
def quote_join_function_template(join_string, escape_func):
return lambda value: join_function_template(join_string, escape_func)(value)
join_functions = {'rempas': quote_join_function_template('; ', escape_remap),
'arguments': quote_join_function_template(' ', escape_new_syntax),
'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script)
}
if value is False:
value = 'false'
elif value is True:
value = 'true'
elif isinstance(value, list) or isinstance(value, tuple):
join_function = join_function_template(', ', str)
for key in list(join_functions.keys()):
if attr.endswith(key):
join_function = join_functions[key]
value = join_function(value)
self.attributes[attr] = value
def delete(self, attr):
"""Delete an attribute from the submit description file
Args:
attr (str): The name of the attribute to delete.
"""
self.attributes.pop(attr)
def _list_attributes(self):
attribute_list = []
for k, v in list(self.attributes.items()):
if v:
attribute_list.append(k + ' = ' + str(v))
return attribute_list
def _write_job_file(self):
self._make_job_dirs()
job_file = self._open(self.job_file, 'w')
job_file.write(self.__str__())
job_file.close()
if self._remote:
self._copy_input_files_to_remote()
def _make_job_dirs(self):
self._make_dir(self.initial_dir)
log_dir = self.get('logdir')
if log_dir:
self._make_dir(os.path.join(self.initial_dir, log_dir))
def _resolve_attribute(self, attribute):
"""Recursively replaces references to other attributes with their value.
Args:
attribute (str): The name of the attribute to resolve.
Returns:
str: The resolved value of 'attribute'.
"""
value = self.attributes[attribute]
if not value:
return None
resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value)
return resolved_value
def _resolve_attribute_match(self, match):
"""Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute.
"""
if match.group(1) == 'cluster':
return str(self.cluster_id)
return self.get(match.group(1), match.group(0))
|
tethysplatform/condorpy | condorpy/job.py | Job._resolve_attribute | python | def _resolve_attribute(self, attribute):
value = self.attributes[attribute]
if not value:
return None
resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value)
return resolved_value | Recursively replaces references to other attributes with their value.
Args:
attribute (str): The name of the attribute to resolve.
Returns:
str: The resolved value of 'attribute'. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L417-L431 | null | class Job(HTCondorObjectBase):
"""Represents a HTCondor job and the submit description file.
This class provides an object model representation for a computing job on HTCondor. It offers a wrapper for the
command line interface of HTCondor for interacting with the job. Jobs may be submitted to a local installation
of HTCondor or to a remote instance through ssh. An instance of this class will have certain pre-defined attributes
which are defined below. Any other attribute may be defined and is equivalent to calling the set method. This is
for ease in assigning attributes to the submit description file. For more information on valid attributes of the
submit description file see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
For more information about HTCondor see: http://research.cs.wisc.edu/htcondor/
Args:
name (str): A name to represent the job. This will be used to name the submit description file.
attributes (dict, optional): A dictionary of attributes that will be assigned to the submit description
file. Defaults to None.
executable (str, optional): The path to the executable file for the job. This path can be absolute or
relative to the current working directory when the code is executed. When submitting to a remote
scheduler the executable will be copied into the current working directory of the remote machine. This
is a shortcut to setting the 'Executable' attribute in the submit description file. Defaults to None.
arguments (str, optional): A space or comma delimited list of arguments for the executable. This is a
shortcut to setting the 'Arguments' attribute in the submit description file. Defaults to None.
num_jobs (int, optional): The number of sub-jobs that will be queued by this job. This is the argument
of the 'Queue' attribute in the submit description file. It can also be set when calling the submit
method. Defaults to 1.
host (str, optional): The host name of a remote server running an HTCondor scheduler daemon. Defaults to
None.
username (str, optional): The username for logging in to 'host'. Defaults to None.
password (str, optional): The password for 'username' when logging in to 'host'. Defaults to None.
private_key (str, optional): The path to a private key file providing passwordless ssh on 'host'.
Defaults to None.
private_key_pass (str, optional): The passphrase for the 'private_key' if required.
remote_input_files (list, optional): A list of files to be copied to a remote server for remote job submission.
working_directory (str, optional): The file path where execute calls should be run from.
"""
def __init__(self,
name,
attributes=None,
num_jobs=1,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.',
**kwargs):
object.__setattr__(self, '_name', name)
if attributes:
assert isinstance(attributes, dict)
object.__setattr__(self, '_attributes', OrderedDict())
object.__setattr__(self, '_num_jobs', int(num_jobs))
object.__setattr__(self, '_job_file', '')
super(Job, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
attributes = attributes or OrderedDict()
attributes['job_name'] = name
attributes.update(kwargs)
for attr, value in list(attributes.items()):
self.set(attr, value)
def __str__(self):
return '\n'.join(self._list_attributes()) + '\n\nqueue %d\n' % (self.num_jobs)
def __repr__(self):
return '<Job: name=%s, num_jobs=%d, cluster_id=%s>' % (self.name, self.num_jobs, self.cluster_id)
def __copy__(self):
copy = Job(self.name)
copy.__dict__.update(self.__dict__)
return copy
def __deepcopy__(self, memo):
from copy import deepcopy
copy = self.__copy__()
copy._attributes = deepcopy(self.attributes, memo)
return copy
def __getattr__(self, item):
"""
A shortcut for the 'get' method.
Args:
item (str): The name of the attribute to get.
Returns:
The value assigned to 'item' if defined. Otherwise None.
"""
return self.get(item)
def __setattr__(self, key, value):
"""
A shortcut for the 'set' method.
Args:
key (str): The name of the attribute to set.
value (str): The value to assign to 'key'.
"""
if key in self.__dict__ or '_' + key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.set(key, value)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attributes(self):
return self._attributes
@property
def num_jobs(self):
return self._num_jobs
@num_jobs.setter
def num_jobs(self, num_jobs):
self._num_jobs = int(num_jobs)
@property
def status(self):
"""The status
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
status_dict = self.statuses
# determine job status
status = "Various"
for key, val in status_dict.items():
if val == self.num_jobs:
status = key
return status
@property
def statuses(self):
"""
Return dictionary of all process statuses
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def job_file(self):
"""The path to the submit description file representing this job.
"""
job_file_name = '%s.job' % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file
@property
def log_file(self):
"""The path to the log file for this job.
"""
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log'))
@property
def initial_dir(self):
"""The initial directory defined for the job.
All input files, and log files are relative to this directory. Output files will be copied into this
directory by default. This directory will be created if it doesn't already exist when the job is submitted.
Note:
The executable file is defined relative to the current working directory, NOT to the initial directory.
The initial directory is created in the current working directory.
"""
initial_dir = self.get('initialdir')
if not initial_dir:
initial_dir = os.curdir #TODO does this conflict with the working directory?
if self._remote and os.path.isabs(initial_dir):
raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler')
return initial_dir
def submit(self, queue=None, options=[]):
"""Submits the job either locally or to a remote server if it is defined.
Args:
queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of
this object. Defaults to None, meaning the value of num_jobs will be used.
options (list of str, optional): A list of command line options for the condor_submit command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
Defaults to an empty list.
"""
if not self.executable:
log.error('Job %s was submitted with no executable', self.name)
raise NoExecutable('You cannot submit a job without an executable')
self._num_jobs = queue or self.num_jobs
self._write_job_file()
args = ['condor_submit']
args.extend(options)
args.append(self.job_file)
log.info('Submitting job %s with options: %s', self.name, args)
return super(Job, self).submit(args)
def edit(self):
"""Interface for CLI edit command.
Note:
This method is not implemented.
"""
raise NotImplementedError("This method is not yet implemented")
def wait(self, options=[], sub_job_num=None):
"""Wait for the job, or a sub-job to complete.
Args:
options (list of str, optional): A list of command line options for the condor_wait command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html.
Defaults to an empty list.
job_num (int, optional): The number
"""
args = ['condor_wait']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
if self._remote:
abs_log_file = self.log_file
else:
abs_log_file = os.path.abspath(self.log_file)
args.extend([abs_log_file, job_id])
out, err = self._execute(args)
return out, err
def get(self, attr, value=None, resolve=True):
"""Get the value of an attribute from submit description file.
Args:
attr (str): The name of the attribute whose value should be returned.
value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.
resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If
False then return the raw value of 'attr'. Defaults to True.
Returns:
str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.
"""
try:
if resolve:
value = self._resolve_attribute(attr)
else:
value = self.attributes[attr]
except KeyError:
pass
return value
def set(self, attr, value):
"""Set the value of an attribute in the submit description file.
The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean).
The Python values will be reformatted into strings based on the standards described in
the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html
Args:
attr (str): The name of the attribute to set.
value (str): The value to assign to 'attr'.
"""
def escape_new_syntax(value, double_quote_escape='"'):
value = str(value)
value = value.replace("'", "''")
value = value.replace('"', '%s"' % double_quote_escape)
if ' ' in value or '\t' in value:
value = "'%s'" % value
return value
def escape_new_syntax_pre_post_script(value):
return escape_new_syntax(value, '\\')
def escape_remap(value):
value = value.replace('=', '\=')
value = value.replace(';', '\;')
return value
def join_function_template(join_string, escape_func):
return lambda value: join_string.join([escape_func(i) for i in value])
def quote_join_function_template(join_string, escape_func):
return lambda value: join_function_template(join_string, escape_func)(value)
join_functions = {'rempas': quote_join_function_template('; ', escape_remap),
'arguments': quote_join_function_template(' ', escape_new_syntax),
'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script)
}
if value is False:
value = 'false'
elif value is True:
value = 'true'
elif isinstance(value, list) or isinstance(value, tuple):
join_function = join_function_template(', ', str)
for key in list(join_functions.keys()):
if attr.endswith(key):
join_function = join_functions[key]
value = join_function(value)
self.attributes[attr] = value
def delete(self, attr):
"""Delete an attribute from the submit description file
Args:
attr (str): The name of the attribute to delete.
"""
self.attributes.pop(attr)
def _update_status(self, sub_job_num=None):
"""Gets the job status.
Return:
str: The current status of the job
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict
def _list_attributes(self):
attribute_list = []
for k, v in list(self.attributes.items()):
if v:
attribute_list.append(k + ' = ' + str(v))
return attribute_list
def _write_job_file(self):
self._make_job_dirs()
job_file = self._open(self.job_file, 'w')
job_file.write(self.__str__())
job_file.close()
if self._remote:
self._copy_input_files_to_remote()
def _make_job_dirs(self):
self._make_dir(self.initial_dir)
log_dir = self.get('logdir')
if log_dir:
self._make_dir(os.path.join(self.initial_dir, log_dir))
def _resolve_attribute_match(self, match):
"""Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute.
"""
if match.group(1) == 'cluster':
return str(self.cluster_id)
return self.get(match.group(1), match.group(0))
|
tethysplatform/condorpy | condorpy/job.py | Job._resolve_attribute_match | python | def _resolve_attribute_match(self, match):
if match.group(1) == 'cluster':
return str(self.cluster_id)
return self.get(match.group(1), match.group(0)) | Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute. | train | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L433-L443 | null | class Job(HTCondorObjectBase):
"""Represents a HTCondor job and the submit description file.
This class provides an object model representation for a computing job on HTCondor. It offers a wrapper for the
command line interface of HTCondor for interacting with the job. Jobs may be submitted to a local installation
of HTCondor or to a remote instance through ssh. An instance of this class will have certain pre-defined attributes
which are defined below. Any other attribute may be defined and is equivalent to calling the set method. This is
for ease in assigning attributes to the submit description file. For more information on valid attributes of the
submit description file see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
For more information about HTCondor see: http://research.cs.wisc.edu/htcondor/
Args:
name (str): A name to represent the job. This will be used to name the submit description file.
attributes (dict, optional): A dictionary of attributes that will be assigned to the submit description
file. Defaults to None.
executable (str, optional): The path to the executable file for the job. This path can be absolute or
relative to the current working directory when the code is executed. When submitting to a remote
scheduler the executable will be copied into the current working directory of the remote machine. This
is a shortcut to setting the 'Executable' attribute in the submit description file. Defaults to None.
arguments (str, optional): A space or comma delimited list of arguments for the executable. This is a
shortcut to setting the 'Arguments' attribute in the submit description file. Defaults to None.
num_jobs (int, optional): The number of sub-jobs that will be queued by this job. This is the argument
of the 'Queue' attribute in the submit description file. It can also be set when calling the submit
method. Defaults to 1.
host (str, optional): The host name of a remote server running an HTCondor scheduler daemon. Defaults to
None.
username (str, optional): The username for logging in to 'host'. Defaults to None.
password (str, optional): The password for 'username' when logging in to 'host'. Defaults to None.
private_key (str, optional): The path to a private key file providing passwordless ssh on 'host'.
Defaults to None.
private_key_pass (str, optional): The passphrase for the 'private_key' if required.
remote_input_files (list, optional): A list of files to be copied to a remote server for remote job submission.
working_directory (str, optional): The file path where execute calls should be run from.
"""
def __init__(self,
name,
attributes=None,
num_jobs=1,
host=None,
username=None,
password=None,
private_key=None,
private_key_pass=None,
remote_input_files=None,
working_directory='.',
**kwargs):
object.__setattr__(self, '_name', name)
if attributes:
assert isinstance(attributes, dict)
object.__setattr__(self, '_attributes', OrderedDict())
object.__setattr__(self, '_num_jobs', int(num_jobs))
object.__setattr__(self, '_job_file', '')
super(Job, self).__init__(host, username, password, private_key, private_key_pass, remote_input_files, working_directory)
attributes = attributes or OrderedDict()
attributes['job_name'] = name
attributes.update(kwargs)
for attr, value in list(attributes.items()):
self.set(attr, value)
def __str__(self):
return '\n'.join(self._list_attributes()) + '\n\nqueue %d\n' % (self.num_jobs)
def __repr__(self):
return '<Job: name=%s, num_jobs=%d, cluster_id=%s>' % (self.name, self.num_jobs, self.cluster_id)
def __copy__(self):
copy = Job(self.name)
copy.__dict__.update(self.__dict__)
return copy
def __deepcopy__(self, memo):
from copy import deepcopy
copy = self.__copy__()
copy._attributes = deepcopy(self.attributes, memo)
return copy
def __getattr__(self, item):
"""
A shortcut for the 'get' method.
Args:
item (str): The name of the attribute to get.
Returns:
The value assigned to 'item' if defined. Otherwise None.
"""
return self.get(item)
def __setattr__(self, key, value):
"""
A shortcut for the 'set' method.
Args:
key (str): The name of the attribute to set.
value (str): The value to assign to 'key'.
"""
if key in self.__dict__ or '_' + key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.set(key, value)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def attributes(self):
return self._attributes
@property
def num_jobs(self):
return self._num_jobs
@num_jobs.setter
def num_jobs(self, num_jobs):
self._num_jobs = int(num_jobs)
@property
def status(self):
"""The status
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
status_dict = self.statuses
# determine job status
status = "Various"
for key, val in status_dict.items():
if val == self.num_jobs:
status = key
return status
@property
def statuses(self):
"""
Return dictionary of all process statuses
"""
if self.cluster_id == self.NULL_CLUSTER_ID:
return "Unexpanded"
return self._update_status()
@property
def job_file(self):
"""The path to the submit description file representing this job.
"""
job_file_name = '%s.job' % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file
@property
def log_file(self):
"""The path to the log file for this job.
"""
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log'))
@property
def initial_dir(self):
"""The initial directory defined for the job.
All input files, and log files are relative to this directory. Output files will be copied into this
directory by default. This directory will be created if it doesn't already exist when the job is submitted.
Note:
The executable file is defined relative to the current working directory, NOT to the initial directory.
The initial directory is created in the current working directory.
"""
initial_dir = self.get('initialdir')
if not initial_dir:
initial_dir = os.curdir #TODO does this conflict with the working directory?
if self._remote and os.path.isabs(initial_dir):
raise RemoteError('Cannot define an absolute path as an initial_dir on a remote scheduler')
return initial_dir
def submit(self, queue=None, options=[]):
"""Submits the job either locally or to a remote server if it is defined.
Args:
queue (int, optional): The number of sub-jobs to run. This argmuent will set the num_jobs attribute of
this object. Defaults to None, meaning the value of num_jobs will be used.
options (list of str, optional): A list of command line options for the condor_submit command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html.
Defaults to an empty list.
"""
if not self.executable:
log.error('Job %s was submitted with no executable', self.name)
raise NoExecutable('You cannot submit a job without an executable')
self._num_jobs = queue or self.num_jobs
self._write_job_file()
args = ['condor_submit']
args.extend(options)
args.append(self.job_file)
log.info('Submitting job %s with options: %s', self.name, args)
return super(Job, self).submit(args)
def edit(self):
"""Interface for CLI edit command.
Note:
This method is not implemented.
"""
raise NotImplementedError("This method is not yet implemented")
def wait(self, options=[], sub_job_num=None):
"""Wait for the job, or a sub-job to complete.
Args:
options (list of str, optional): A list of command line options for the condor_wait command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html.
Defaults to an empty list.
job_num (int, optional): The number
"""
args = ['condor_wait']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
if self._remote:
abs_log_file = self.log_file
else:
abs_log_file = os.path.abspath(self.log_file)
args.extend([abs_log_file, job_id])
out, err = self._execute(args)
return out, err
def get(self, attr, value=None, resolve=True):
"""Get the value of an attribute from submit description file.
Args:
attr (str): The name of the attribute whose value should be returned.
value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.
resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If
False then return the raw value of 'attr'. Defaults to True.
Returns:
str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.
"""
try:
if resolve:
value = self._resolve_attribute(attr)
else:
value = self.attributes[attr]
except KeyError:
pass
return value
def set(self, attr, value):
"""Set the value of an attribute in the submit description file.
The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean).
The Python values will be reformatted into strings based on the standards described in
the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html
Args:
attr (str): The name of the attribute to set.
value (str): The value to assign to 'attr'.
"""
def escape_new_syntax(value, double_quote_escape='"'):
value = str(value)
value = value.replace("'", "''")
value = value.replace('"', '%s"' % double_quote_escape)
if ' ' in value or '\t' in value:
value = "'%s'" % value
return value
def escape_new_syntax_pre_post_script(value):
return escape_new_syntax(value, '\\')
def escape_remap(value):
value = value.replace('=', '\=')
value = value.replace(';', '\;')
return value
def join_function_template(join_string, escape_func):
return lambda value: join_string.join([escape_func(i) for i in value])
def quote_join_function_template(join_string, escape_func):
return lambda value: join_function_template(join_string, escape_func)(value)
join_functions = {'rempas': quote_join_function_template('; ', escape_remap),
'arguments': quote_join_function_template(' ', escape_new_syntax),
'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script)
}
if value is False:
value = 'false'
elif value is True:
value = 'true'
elif isinstance(value, list) or isinstance(value, tuple):
join_function = join_function_template(', ', str)
for key in list(join_functions.keys()):
if attr.endswith(key):
join_function = join_functions[key]
value = join_function(value)
self.attributes[attr] = value
def delete(self, attr):
"""Delete an attribute from the submit description file
Args:
attr (str): The name of the attribute to delete.
"""
self.attributes.pop(attr)
def _update_status(self, sub_job_num=None):
"""Gets the job status.
Return:
str: The current status of the job
"""
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
format = ['-format', '"%d"', 'JobStatus']
cmd = 'condor_q {0} {1} && condor_history {0} {1}'.format(job_id, ' '.join(format))
args = [cmd]
out, err = self._execute(args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while updating status for job %s: %s', job_id, err)
raise HTCondorError(err)
if not out:
log.error('Error while updating status for job %s: Job not found.', job_id)
raise HTCondorError('Job not found.')
out = out.replace('\"', '')
log.info('Job %s status: %s', job_id, out)
if not sub_job_num:
if len(out) >= self.num_jobs:
out = out[:self.num_jobs]
else:
msg = 'There are {0} sub-jobs, but {1} status(es).'.format(self.num_jobs, len(out))
log.error(msg)
raise HTCondorError(msg)
#initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for status_code_str in out:
status_code = 0
try:
status_code = int(status_code_str)
except ValueError:
pass
key = CONDOR_JOB_STATUSES[status_code]
status_dict[key] += 1
return status_dict
def _list_attributes(self):
attribute_list = []
for k, v in list(self.attributes.items()):
if v:
attribute_list.append(k + ' = ' + str(v))
return attribute_list
def _write_job_file(self):
self._make_job_dirs()
job_file = self._open(self.job_file, 'w')
job_file.write(self.__str__())
job_file.close()
if self._remote:
self._copy_input_files_to_remote()
def _make_job_dirs(self):
self._make_dir(self.initial_dir)
log_dir = self.get('logdir')
if log_dir:
self._make_dir(os.path.join(self.initial_dir, log_dir))
def _resolve_attribute(self, attribute):
"""Recursively replaces references to other attributes with their value.
Args:
attribute (str): The name of the attribute to resolve.
Returns:
str: The resolved value of 'attribute'.
"""
value = self.attributes[attribute]
if not value:
return None
resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value)
return resolved_value
|
tylerbutler/engineer | engineer/util.py | ensure_exists | python | def ensure_exists(p, assume_dirs=False):
if path(p).ext and not assume_dirs:
path(p).dirname().makedirs_p()
else:
path(p).makedirs_p()
return p | Ensures a given path *p* exists.
If a path to a file is passed in, then the path to the file will be checked. This can be overridden by passing a
value of ``True`` to ``assume_dirs``, in which case the paths will be assumed to be to directories, not files. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/util.py#L214-L225 | null | # coding=utf-8
import collections
import errno
import filecmp
import hashlib
import itertools
import logging
import posixpath
import re
from itertools import chain, islice
import urlparse
# noinspection PyUnresolvedReferences
import translitcodec
# noinspection PyPackageRequirements
from path import path
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
_punctuation_regex = re.compile(r'[\t :!"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, length_limit=0, delimiter=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punctuation_regex.split(unicode(text).lower()):
word = word.encode('translit/long')
if word:
result.append(word)
slug = unicode(delimiter.join(result))
if length_limit > 0:
return slug[0:length_limit]
return slug
def get_class(class_string):
"""Given a string representing a path to a class, instantiates that class."""
parts = class_string.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def get_class_string(obj):
if isinstance(obj, basestring):
return obj
mod = obj.__module__
cls = getattr(obj, '__name__', obj.__class__.__name__)
return '.'.join((mod, cls))
def count_iterable(i):
# noinspection PyUnusedLocal
return sum(1 for e in i)
def expand_url(home, url):
join = urlparse.urljoin(home, url)
url2 = urlparse.urlparse(join)
the_path = posixpath.normpath(url2[2])
return urlparse.urlunparse(
(url2.scheme, url2.netloc, the_path, url2.params, url2.query, url2.fragment)
)
def urljoin(url1, *url2):
# This method is necessary because sometimes urlparse.urljoin simply doesn't work correctly
# when joining URL fragments.
return posixpath.join(url1, *url2)
def checksum(the_file):
with open(the_file) as f:
_checksum = hashlib.sha256(f.read()).hexdigest()
return _checksum
def chunk(seq, chunksize, process=iter):
it = iter(seq)
while True:
yield process(chain([it.next()], islice(it, chunksize - 1)))
def expand_path(path_list, root_path=None):
"""
Given a list of paths, returns a list of all parent paths (including the original paths).
If provided, ``root_path`` is used as the outermost path when expanding parent paths. If path_list contains
paths to files, only the directories where those files exist will be returned. This function only returns paths
to directories.
Example:
expand_path(['/tmp/foo/bar', '/tmp/foo/baz/file.txt' '/tmp/bar'], root_path='/tmp')
This call would return the following:
['/tmp/foo/bar/',
'/tmp/foo',
'/tmp/foo/baz/',
'/tmp/bar/']
If the ``root_path`` argument were ommitted in the above example,
"""
to_return = set()
path_list = wrap_list(path_list)
# expand ignore list to include all directories as individual entries
for p in path_list:
p = path(p)
if p.isdir():
to_return.add(p)
head, tail = p.splitpath()
while head and tail:
if root_path is not None and head == root_path:
break
to_return.add(head)
head, tail = head.splitpath()
return list(to_return)
def mirror_folder(source, target, delete_orphans=True, recurse=True, ignore_list=None, _level=0):
"""Mirrors a folder *source* into a target folder *target*."""
logger = logging.getLogger('engineer.util.mirror_folder')
def expand_tree(p):
tree = []
for node in path(p).walk():
tree.append(node)
return tree
report = {
'deleted': set([]),
'overwritten': set([]),
'new': set([])
}
d1 = source
d2 = target
logger.debug("Mirroring %s ==> %s" % (d1, d2))
if not d2.exists():
d2.makedirs()
compare = filecmp.dircmp(d1, d2)
# Expand the ignore list to be full paths
if ignore_list is None:
ignore_list = []
else:
ignore_list = [path(d2 / i).normpath() for i in ignore_list]
ignore_files = [f for f in ignore_list if f.isfile()]
ignore_list.extend(expand_path(ignore_files, root_path=d2))
# Delete orphan files/folders in the target folder
if delete_orphans:
for item in compare.right_only:
fullpath = path(d2 / item).normpath()
if fullpath in ignore_list:
logger.debug(
"%s ==> Ignored - path is in ignore list" % fullpath)
continue
if fullpath.isdir() and recurse:
logger.debug(
"%s ==> Deleted - doesn't exist in source" % fullpath)
report['deleted'].add(fullpath)
if len(fullpath.listdir()) > 0:
report['deleted'].update(expand_tree(fullpath))
# noinspection PyArgumentList
fullpath.rmtree()
elif fullpath.isfile():
logger.debug(
"%s ==> Deleted - doesn't exist in source" % fullpath)
report['deleted'].add(fullpath)
fullpath.remove()
# Copy new files and folders from the source to the target
for item in compare.left_only:
fullpath = d1 / item
if fullpath.isdir() and recurse:
logger.debug(
"Copying new directory %s ==> %s" % (fullpath, (d2 / item)))
fullpath.copytree(d2 / item)
report['new'].add(d2 / item)
report['new'].update(expand_tree(d2 / item))
elif fullpath.isfile():
logger.debug("Copying new file %s ==> %s" % (fullpath, (d2 / item)))
fullpath.copy2(d2)
report['new'].add(d2 / item)
# Copy modified files in the source to the target, overwriting the target file
for item in compare.diff_files:
logger.debug(
"Overwriting existing file %s ==> %s" % ((d1 / item), (d2 / item)))
(d1 / item).copy2(d2)
report['overwritten'].add(d2 / item)
# Recurse into subfolders that exist in both the source and target
if recurse:
for item in compare.common_dirs:
rpt = mirror_folder(d1 / item, d2 / item, delete_orphans, _level=_level + 1)
report['new'].update(rpt['new'])
report['overwritten'].update(rpt['overwritten'])
report['deleted'].update(rpt['deleted'])
return report
def wrap_list(item):
"""
Returns an object as a list.
If the object is a list, it is returned directly. If it is a tuple or set, it
is returned as a list. If it is another object, it is wrapped in a list and
returned.
"""
if item is None:
return []
elif isinstance(item, list):
return item
elif isinstance(item, (tuple, set)):
return list(item)
else:
return [item]
class Borg(object):
"""
A class that shares state among all instances of the class.
There seem to be a lot of differing opinions about whether this design
pattern is A Good Idea (tm) or not. It definitely seems better than
Singletons since it enforces *behavior*, not *structure*,
but it's also possible there's a better way to do it in Python with
judicious use of globals.
"""
_state = {}
def __new__(cls, *p, **k):
self = object.__new__(cls)
self.__dict__ = cls._state
return self
def relpath(the_path):
from engineer.conf import settings
return '/' + settings.OUTPUT_CACHE_DIR.relpathto(the_path)
def _min_css(css_string):
from cssmin import cssmin
return cssmin(css_string)
def _min_js(js_string):
import lpjsmin
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
the_input = StringIO(js_string)
output = StringIO()
lpjsmin.minify_stream(the_input, output)
to_return = output.getvalue()
output.close()
the_input.close()
return to_return
def compress(item, compression_type):
if compression_type == 'css':
return _min_css(item)
elif compression_type == 'js':
return _min_js(item)
else:
raise ValueError("Unexpected compression_type: %s" % compression_type)
# setonce class from Ian Bicking: http://blog.ianbicking.org/easy-readonly-attributes.html
_setonce_count = itertools.count()
# noinspection PyPep8Naming
class setonce(object):
"""
Allows an attribute to be set once (typically in __init__), but
be read-only afterwards.
Example::
>>> class A(object):
... x = setonce()
>>> a = A()
>>> a.x
Traceback (most recent call last):
...
AttributeError: 'A' object has no attribute '_setonce_attr_0'
>>> a.x = 10
>>> a.x
10
>>> a.x = 20
Traceback (most recent call last):
...
AttributeError: Attribute already set
>>> del a.x
>>> a.x = 20
>>> a.x
20
You can also force a set to occur::
>>> A.x.set(a, 30)
>>> a.x
30
"""
def __init__(self, doc=None):
self._count = _setonce_count.next()
self._name = '_setonce_attr_%s' % self._count
self.__doc__ = doc
# noinspection PyUnusedLocal
def __get__(self, obj, obj_type=None):
if obj is None:
return self
return getattr(obj, self._name)
def __set__(self, obj, value):
try:
getattr(obj, self._name)
except AttributeError:
setattr(obj, self._name, value)
else:
raise AttributeError("Attribute already set")
def set(self, obj, value):
setattr(obj, self._name, value)
def __delete__(self, obj):
delattr(obj, self._name)
def update_additive(dict1, dict2):
"""
A utility method to update a dict or other mapping type with the contents of another dict.
This method updates the contents of ``dict1``, overwriting any existing key/value pairs in ``dict1`` with the
corresponding key/value pair in ``dict2``. If the value in ``dict2`` is a mapping type itself, then
``update_additive`` is called recursively. This ensures that nested maps are updated rather than simply
overwritten.
This method should be functionally equivalent to ``dict.update()`` except in the case of values that are
themselves nested maps. If you know that ``dict1`` does not have nested maps,
or you want to overwrite all values with the exact content of then you should simply use ``dict.update()``.
"""
for key, value in dict2.items():
if key not in dict1:
dict1[key] = value
else: # key in dict1
if isinstance(dict1[key], collections.Mapping):
assert isinstance(value, collections.Mapping)
update_additive(dict1[key], value)
else: # value is not a mapping type
assert not isinstance(value, collections.Mapping)
dict1[key] = value
def flatten_dict(d, parent_key='', separator='_'):
"""
Flattens any nested dict-like object into a non-nested form. The resulting dict will have keys of the form
``k1_nestedk2_nestedk3`` for nested keys. You can change the separator by passing in a value to
``separator``.
Example::
>>> import collections
>>> d = { 'a': 1,
... 'b': { 'a': 2,
... 'b': 3 },
... 'c': { 'a': 4,
... 'b': { 'a': 5,
... 'b': 6 },
... 'c': { 'a': 7 }
... }
... }
>>> flatten(d)
{'a': 1, 'b_a': 2, 'b_b': 3, 'c_a': 4, 'c_b_a': 5, 'c_b_b': 6, 'c_c_a': 7}
"""
items = []
for k, v in d.iteritems():
new_key = parent_key + separator + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def flatten_list(l):
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
for sub in flatten_list(el):
yield sub
else:
yield el
def has_files(the_path):
"""Given a path, returns whether the path has any files in it or any subfolders. Works recursively."""
the_path = path(the_path)
try:
for _ in the_path.walkfiles():
return True
return False
except OSError as ex:
if ex.errno == errno.ENOENT:
# ignore
return False
else:
raise
def diff_dir(dir_cmp, left_path=True):
"""
A generator that, given a ``filecmp.dircmp`` object, yields the paths to all files that are different. Works
recursively.
:param dir_cmp: A ``filecmp.dircmp`` object representing the comparison.
:param left_path: If ``True``, paths will be relative to dircmp.left. Else paths will be relative to dircmp.right.
"""
for name in dir_cmp.diff_files:
if left_path:
path_root = dir_cmp.left
else:
path_root = dir_cmp.right
yield path.joinpath(path_root, name)
for sub in dir_cmp.subdirs.values():
# Need to iterate over the recursive call to make sure the individual values are yielded up the stack
for the_dir in diff_dir(sub, left_path):
yield the_dir
def make_precompiled_reference(a_path):
if not a_path.endswith('.less'):
return a_path
else:
return '.'.join(a_path.split('.')[0:-1]) + '_precompiled.css'
|
tylerbutler/engineer | engineer/util.py | wrap_list | python | def wrap_list(item):
if item is None:
return []
elif isinstance(item, list):
return item
elif isinstance(item, (tuple, set)):
return list(item)
else:
return [item] | Returns an object as a list.
If the object is a list, it is returned directly. If it is a tuple or set, it
is returned as a list. If it is another object, it is wrapped in a list and
returned. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/util.py#L228-L243 | null | # coding=utf-8
import collections
import errno
import filecmp
import hashlib
import itertools
import logging
import posixpath
import re
from itertools import chain, islice
import urlparse
# noinspection PyUnresolvedReferences
import translitcodec
# noinspection PyPackageRequirements
from path import path
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
_punctuation_regex = re.compile(r'[\t :!"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, length_limit=0, delimiter=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punctuation_regex.split(unicode(text).lower()):
word = word.encode('translit/long')
if word:
result.append(word)
slug = unicode(delimiter.join(result))
if length_limit > 0:
return slug[0:length_limit]
return slug
def get_class(class_string):
"""Given a string representing a path to a class, instantiates that class."""
parts = class_string.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def get_class_string(obj):
if isinstance(obj, basestring):
return obj
mod = obj.__module__
cls = getattr(obj, '__name__', obj.__class__.__name__)
return '.'.join((mod, cls))
def count_iterable(i):
# noinspection PyUnusedLocal
return sum(1 for e in i)
def expand_url(home, url):
join = urlparse.urljoin(home, url)
url2 = urlparse.urlparse(join)
the_path = posixpath.normpath(url2[2])
return urlparse.urlunparse(
(url2.scheme, url2.netloc, the_path, url2.params, url2.query, url2.fragment)
)
def urljoin(url1, *url2):
# This method is necessary because sometimes urlparse.urljoin simply doesn't work correctly
# when joining URL fragments.
return posixpath.join(url1, *url2)
def checksum(the_file):
with open(the_file) as f:
_checksum = hashlib.sha256(f.read()).hexdigest()
return _checksum
def chunk(seq, chunksize, process=iter):
it = iter(seq)
while True:
yield process(chain([it.next()], islice(it, chunksize - 1)))
def expand_path(path_list, root_path=None):
"""
Given a list of paths, returns a list of all parent paths (including the original paths).
If provided, ``root_path`` is used as the outermost path when expanding parent paths. If path_list contains
paths to files, only the directories where those files exist will be returned. This function only returns paths
to directories.
Example:
expand_path(['/tmp/foo/bar', '/tmp/foo/baz/file.txt' '/tmp/bar'], root_path='/tmp')
This call would return the following:
['/tmp/foo/bar/',
'/tmp/foo',
'/tmp/foo/baz/',
'/tmp/bar/']
If the ``root_path`` argument were ommitted in the above example,
"""
to_return = set()
path_list = wrap_list(path_list)
# expand ignore list to include all directories as individual entries
for p in path_list:
p = path(p)
if p.isdir():
to_return.add(p)
head, tail = p.splitpath()
while head and tail:
if root_path is not None and head == root_path:
break
to_return.add(head)
head, tail = head.splitpath()
return list(to_return)
def mirror_folder(source, target, delete_orphans=True, recurse=True, ignore_list=None, _level=0):
"""Mirrors a folder *source* into a target folder *target*."""
logger = logging.getLogger('engineer.util.mirror_folder')
def expand_tree(p):
tree = []
for node in path(p).walk():
tree.append(node)
return tree
report = {
'deleted': set([]),
'overwritten': set([]),
'new': set([])
}
d1 = source
d2 = target
logger.debug("Mirroring %s ==> %s" % (d1, d2))
if not d2.exists():
d2.makedirs()
compare = filecmp.dircmp(d1, d2)
# Expand the ignore list to be full paths
if ignore_list is None:
ignore_list = []
else:
ignore_list = [path(d2 / i).normpath() for i in ignore_list]
ignore_files = [f for f in ignore_list if f.isfile()]
ignore_list.extend(expand_path(ignore_files, root_path=d2))
# Delete orphan files/folders in the target folder
if delete_orphans:
for item in compare.right_only:
fullpath = path(d2 / item).normpath()
if fullpath in ignore_list:
logger.debug(
"%s ==> Ignored - path is in ignore list" % fullpath)
continue
if fullpath.isdir() and recurse:
logger.debug(
"%s ==> Deleted - doesn't exist in source" % fullpath)
report['deleted'].add(fullpath)
if len(fullpath.listdir()) > 0:
report['deleted'].update(expand_tree(fullpath))
# noinspection PyArgumentList
fullpath.rmtree()
elif fullpath.isfile():
logger.debug(
"%s ==> Deleted - doesn't exist in source" % fullpath)
report['deleted'].add(fullpath)
fullpath.remove()
# Copy new files and folders from the source to the target
for item in compare.left_only:
fullpath = d1 / item
if fullpath.isdir() and recurse:
logger.debug(
"Copying new directory %s ==> %s" % (fullpath, (d2 / item)))
fullpath.copytree(d2 / item)
report['new'].add(d2 / item)
report['new'].update(expand_tree(d2 / item))
elif fullpath.isfile():
logger.debug("Copying new file %s ==> %s" % (fullpath, (d2 / item)))
fullpath.copy2(d2)
report['new'].add(d2 / item)
# Copy modified files in the source to the target, overwriting the target file
for item in compare.diff_files:
logger.debug(
"Overwriting existing file %s ==> %s" % ((d1 / item), (d2 / item)))
(d1 / item).copy2(d2)
report['overwritten'].add(d2 / item)
# Recurse into subfolders that exist in both the source and target
if recurse:
for item in compare.common_dirs:
rpt = mirror_folder(d1 / item, d2 / item, delete_orphans, _level=_level + 1)
report['new'].update(rpt['new'])
report['overwritten'].update(rpt['overwritten'])
report['deleted'].update(rpt['deleted'])
return report
def ensure_exists(p, assume_dirs=False):
"""
Ensures a given path *p* exists.
If a path to a file is passed in, then the path to the file will be checked. This can be overridden by passing a
value of ``True`` to ``assume_dirs``, in which case the paths will be assumed to be to directories, not files.
"""
if path(p).ext and not assume_dirs:
path(p).dirname().makedirs_p()
else:
path(p).makedirs_p()
return p
class Borg(object):
"""
A class that shares state among all instances of the class.
There seem to be a lot of differing opinions about whether this design
pattern is A Good Idea (tm) or not. It definitely seems better than
Singletons since it enforces *behavior*, not *structure*,
but it's also possible there's a better way to do it in Python with
judicious use of globals.
"""
_state = {}
def __new__(cls, *p, **k):
self = object.__new__(cls)
self.__dict__ = cls._state
return self
def relpath(the_path):
from engineer.conf import settings
return '/' + settings.OUTPUT_CACHE_DIR.relpathto(the_path)
def _min_css(css_string):
from cssmin import cssmin
return cssmin(css_string)
def _min_js(js_string):
import lpjsmin
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
the_input = StringIO(js_string)
output = StringIO()
lpjsmin.minify_stream(the_input, output)
to_return = output.getvalue()
output.close()
the_input.close()
return to_return
def compress(item, compression_type):
if compression_type == 'css':
return _min_css(item)
elif compression_type == 'js':
return _min_js(item)
else:
raise ValueError("Unexpected compression_type: %s" % compression_type)
# setonce class from Ian Bicking: http://blog.ianbicking.org/easy-readonly-attributes.html
_setonce_count = itertools.count()
# noinspection PyPep8Naming
class setonce(object):
"""
Allows an attribute to be set once (typically in __init__), but
be read-only afterwards.
Example::
>>> class A(object):
... x = setonce()
>>> a = A()
>>> a.x
Traceback (most recent call last):
...
AttributeError: 'A' object has no attribute '_setonce_attr_0'
>>> a.x = 10
>>> a.x
10
>>> a.x = 20
Traceback (most recent call last):
...
AttributeError: Attribute already set
>>> del a.x
>>> a.x = 20
>>> a.x
20
You can also force a set to occur::
>>> A.x.set(a, 30)
>>> a.x
30
"""
def __init__(self, doc=None):
self._count = _setonce_count.next()
self._name = '_setonce_attr_%s' % self._count
self.__doc__ = doc
# noinspection PyUnusedLocal
def __get__(self, obj, obj_type=None):
if obj is None:
return self
return getattr(obj, self._name)
def __set__(self, obj, value):
try:
getattr(obj, self._name)
except AttributeError:
setattr(obj, self._name, value)
else:
raise AttributeError("Attribute already set")
def set(self, obj, value):
setattr(obj, self._name, value)
def __delete__(self, obj):
delattr(obj, self._name)
def update_additive(dict1, dict2):
"""
A utility method to update a dict or other mapping type with the contents of another dict.
This method updates the contents of ``dict1``, overwriting any existing key/value pairs in ``dict1`` with the
corresponding key/value pair in ``dict2``. If the value in ``dict2`` is a mapping type itself, then
``update_additive`` is called recursively. This ensures that nested maps are updated rather than simply
overwritten.
This method should be functionally equivalent to ``dict.update()`` except in the case of values that are
themselves nested maps. If you know that ``dict1`` does not have nested maps,
or you want to overwrite all values with the exact content of then you should simply use ``dict.update()``.
"""
for key, value in dict2.items():
if key not in dict1:
dict1[key] = value
else: # key in dict1
if isinstance(dict1[key], collections.Mapping):
assert isinstance(value, collections.Mapping)
update_additive(dict1[key], value)
else: # value is not a mapping type
assert not isinstance(value, collections.Mapping)
dict1[key] = value
def flatten_dict(d, parent_key='', separator='_'):
"""
Flattens any nested dict-like object into a non-nested form. The resulting dict will have keys of the form
``k1_nestedk2_nestedk3`` for nested keys. You can change the separator by passing in a value to
``separator``.
Example::
>>> import collections
>>> d = { 'a': 1,
... 'b': { 'a': 2,
... 'b': 3 },
... 'c': { 'a': 4,
... 'b': { 'a': 5,
... 'b': 6 },
... 'c': { 'a': 7 }
... }
... }
>>> flatten(d)
{'a': 1, 'b_a': 2, 'b_b': 3, 'c_a': 4, 'c_b_a': 5, 'c_b_b': 6, 'c_c_a': 7}
"""
items = []
for k, v in d.iteritems():
new_key = parent_key + separator + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def flatten_list(l):
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
for sub in flatten_list(el):
yield sub
else:
yield el
def has_files(the_path):
"""Given a path, returns whether the path has any files in it or any subfolders. Works recursively."""
the_path = path(the_path)
try:
for _ in the_path.walkfiles():
return True
return False
except OSError as ex:
if ex.errno == errno.ENOENT:
# ignore
return False
else:
raise
def diff_dir(dir_cmp, left_path=True):
"""
A generator that, given a ``filecmp.dircmp`` object, yields the paths to all files that are different. Works
recursively.
:param dir_cmp: A ``filecmp.dircmp`` object representing the comparison.
:param left_path: If ``True``, paths will be relative to dircmp.left. Else paths will be relative to dircmp.right.
"""
for name in dir_cmp.diff_files:
if left_path:
path_root = dir_cmp.left
else:
path_root = dir_cmp.right
yield path.joinpath(path_root, name)
for sub in dir_cmp.subdirs.values():
# Need to iterate over the recursive call to make sure the individual values are yielded up the stack
for the_dir in diff_dir(sub, left_path):
yield the_dir
def make_precompiled_reference(a_path):
if not a_path.endswith('.less'):
return a_path
else:
return '.'.join(a_path.split('.')[0:-1]) + '_precompiled.css'
|
tylerbutler/engineer | engineer/util.py | update_additive | python | def update_additive(dict1, dict2):
for key, value in dict2.items():
if key not in dict1:
dict1[key] = value
else: # key in dict1
if isinstance(dict1[key], collections.Mapping):
assert isinstance(value, collections.Mapping)
update_additive(dict1[key], value)
else: # value is not a mapping type
assert not isinstance(value, collections.Mapping)
dict1[key] = value | A utility method to update a dict or other mapping type with the contents of another dict.
This method updates the contents of ``dict1``, overwriting any existing key/value pairs in ``dict1`` with the
corresponding key/value pair in ``dict2``. If the value in ``dict2`` is a mapping type itself, then
``update_additive`` is called recursively. This ensures that nested maps are updated rather than simply
overwritten.
This method should be functionally equivalent to ``dict.update()`` except in the case of values that are
themselves nested maps. If you know that ``dict1`` does not have nested maps,
or you want to overwrite all values with the exact content of then you should simply use ``dict.update()``. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/util.py#L366-L388 | null | # coding=utf-8
import collections
import errno
import filecmp
import hashlib
import itertools
import logging
import posixpath
import re
from itertools import chain, islice
import urlparse
# noinspection PyUnresolvedReferences
import translitcodec
# noinspection PyPackageRequirements
from path import path
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
_punctuation_regex = re.compile(r'[\t :!"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, length_limit=0, delimiter=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punctuation_regex.split(unicode(text).lower()):
word = word.encode('translit/long')
if word:
result.append(word)
slug = unicode(delimiter.join(result))
if length_limit > 0:
return slug[0:length_limit]
return slug
def get_class(class_string):
"""Given a string representing a path to a class, instantiates that class."""
parts = class_string.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def get_class_string(obj):
if isinstance(obj, basestring):
return obj
mod = obj.__module__
cls = getattr(obj, '__name__', obj.__class__.__name__)
return '.'.join((mod, cls))
def count_iterable(i):
# noinspection PyUnusedLocal
return sum(1 for e in i)
def expand_url(home, url):
join = urlparse.urljoin(home, url)
url2 = urlparse.urlparse(join)
the_path = posixpath.normpath(url2[2])
return urlparse.urlunparse(
(url2.scheme, url2.netloc, the_path, url2.params, url2.query, url2.fragment)
)
def urljoin(url1, *url2):
# This method is necessary because sometimes urlparse.urljoin simply doesn't work correctly
# when joining URL fragments.
return posixpath.join(url1, *url2)
def checksum(the_file):
with open(the_file) as f:
_checksum = hashlib.sha256(f.read()).hexdigest()
return _checksum
def chunk(seq, chunksize, process=iter):
it = iter(seq)
while True:
yield process(chain([it.next()], islice(it, chunksize - 1)))
def expand_path(path_list, root_path=None):
"""
Given a list of paths, returns a list of all parent paths (including the original paths).
If provided, ``root_path`` is used as the outermost path when expanding parent paths. If path_list contains
paths to files, only the directories where those files exist will be returned. This function only returns paths
to directories.
Example:
expand_path(['/tmp/foo/bar', '/tmp/foo/baz/file.txt' '/tmp/bar'], root_path='/tmp')
This call would return the following:
['/tmp/foo/bar/',
'/tmp/foo',
'/tmp/foo/baz/',
'/tmp/bar/']
If the ``root_path`` argument were ommitted in the above example,
"""
to_return = set()
path_list = wrap_list(path_list)
# expand ignore list to include all directories as individual entries
for p in path_list:
p = path(p)
if p.isdir():
to_return.add(p)
head, tail = p.splitpath()
while head and tail:
if root_path is not None and head == root_path:
break
to_return.add(head)
head, tail = head.splitpath()
return list(to_return)
def mirror_folder(source, target, delete_orphans=True, recurse=True, ignore_list=None, _level=0):
"""Mirrors a folder *source* into a target folder *target*."""
logger = logging.getLogger('engineer.util.mirror_folder')
def expand_tree(p):
tree = []
for node in path(p).walk():
tree.append(node)
return tree
report = {
'deleted': set([]),
'overwritten': set([]),
'new': set([])
}
d1 = source
d2 = target
logger.debug("Mirroring %s ==> %s" % (d1, d2))
if not d2.exists():
d2.makedirs()
compare = filecmp.dircmp(d1, d2)
# Expand the ignore list to be full paths
if ignore_list is None:
ignore_list = []
else:
ignore_list = [path(d2 / i).normpath() for i in ignore_list]
ignore_files = [f for f in ignore_list if f.isfile()]
ignore_list.extend(expand_path(ignore_files, root_path=d2))
# Delete orphan files/folders in the target folder
if delete_orphans:
for item in compare.right_only:
fullpath = path(d2 / item).normpath()
if fullpath in ignore_list:
logger.debug(
"%s ==> Ignored - path is in ignore list" % fullpath)
continue
if fullpath.isdir() and recurse:
logger.debug(
"%s ==> Deleted - doesn't exist in source" % fullpath)
report['deleted'].add(fullpath)
if len(fullpath.listdir()) > 0:
report['deleted'].update(expand_tree(fullpath))
# noinspection PyArgumentList
fullpath.rmtree()
elif fullpath.isfile():
logger.debug(
"%s ==> Deleted - doesn't exist in source" % fullpath)
report['deleted'].add(fullpath)
fullpath.remove()
# Copy new files and folders from the source to the target
for item in compare.left_only:
fullpath = d1 / item
if fullpath.isdir() and recurse:
logger.debug(
"Copying new directory %s ==> %s" % (fullpath, (d2 / item)))
fullpath.copytree(d2 / item)
report['new'].add(d2 / item)
report['new'].update(expand_tree(d2 / item))
elif fullpath.isfile():
logger.debug("Copying new file %s ==> %s" % (fullpath, (d2 / item)))
fullpath.copy2(d2)
report['new'].add(d2 / item)
# Copy modified files in the source to the target, overwriting the target file
for item in compare.diff_files:
logger.debug(
"Overwriting existing file %s ==> %s" % ((d1 / item), (d2 / item)))
(d1 / item).copy2(d2)
report['overwritten'].add(d2 / item)
# Recurse into subfolders that exist in both the source and target
if recurse:
for item in compare.common_dirs:
rpt = mirror_folder(d1 / item, d2 / item, delete_orphans, _level=_level + 1)
report['new'].update(rpt['new'])
report['overwritten'].update(rpt['overwritten'])
report['deleted'].update(rpt['deleted'])
return report
def ensure_exists(p, assume_dirs=False):
"""
Ensures a given path *p* exists.
If a path to a file is passed in, then the path to the file will be checked. This can be overridden by passing a
value of ``True`` to ``assume_dirs``, in which case the paths will be assumed to be to directories, not files.
"""
if path(p).ext and not assume_dirs:
path(p).dirname().makedirs_p()
else:
path(p).makedirs_p()
return p
def wrap_list(item):
"""
Returns an object as a list.
If the object is a list, it is returned directly. If it is a tuple or set, it
is returned as a list. If it is another object, it is wrapped in a list and
returned.
"""
if item is None:
return []
elif isinstance(item, list):
return item
elif isinstance(item, (tuple, set)):
return list(item)
else:
return [item]
class Borg(object):
"""
A class that shares state among all instances of the class.
There seem to be a lot of differing opinions about whether this design
pattern is A Good Idea (tm) or not. It definitely seems better than
Singletons since it enforces *behavior*, not *structure*,
but it's also possible there's a better way to do it in Python with
judicious use of globals.
"""
_state = {}
def __new__(cls, *p, **k):
self = object.__new__(cls)
self.__dict__ = cls._state
return self
def relpath(the_path):
from engineer.conf import settings
return '/' + settings.OUTPUT_CACHE_DIR.relpathto(the_path)
def _min_css(css_string):
from cssmin import cssmin
return cssmin(css_string)
def _min_js(js_string):
import lpjsmin
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
the_input = StringIO(js_string)
output = StringIO()
lpjsmin.minify_stream(the_input, output)
to_return = output.getvalue()
output.close()
the_input.close()
return to_return
def compress(item, compression_type):
if compression_type == 'css':
return _min_css(item)
elif compression_type == 'js':
return _min_js(item)
else:
raise ValueError("Unexpected compression_type: %s" % compression_type)
# setonce class from Ian Bicking: http://blog.ianbicking.org/easy-readonly-attributes.html
_setonce_count = itertools.count()
# noinspection PyPep8Naming
class setonce(object):
"""
Allows an attribute to be set once (typically in __init__), but
be read-only afterwards.
Example::
>>> class A(object):
... x = setonce()
>>> a = A()
>>> a.x
Traceback (most recent call last):
...
AttributeError: 'A' object has no attribute '_setonce_attr_0'
>>> a.x = 10
>>> a.x
10
>>> a.x = 20
Traceback (most recent call last):
...
AttributeError: Attribute already set
>>> del a.x
>>> a.x = 20
>>> a.x
20
You can also force a set to occur::
>>> A.x.set(a, 30)
>>> a.x
30
"""
def __init__(self, doc=None):
self._count = _setonce_count.next()
self._name = '_setonce_attr_%s' % self._count
self.__doc__ = doc
# noinspection PyUnusedLocal
def __get__(self, obj, obj_type=None):
if obj is None:
return self
return getattr(obj, self._name)
def __set__(self, obj, value):
try:
getattr(obj, self._name)
except AttributeError:
setattr(obj, self._name, value)
else:
raise AttributeError("Attribute already set")
def set(self, obj, value):
setattr(obj, self._name, value)
def __delete__(self, obj):
delattr(obj, self._name)
def flatten_dict(d, parent_key='', separator='_'):
"""
Flattens any nested dict-like object into a non-nested form. The resulting dict will have keys of the form
``k1_nestedk2_nestedk3`` for nested keys. You can change the separator by passing in a value to
``separator``.
Example::
>>> import collections
>>> d = { 'a': 1,
... 'b': { 'a': 2,
... 'b': 3 },
... 'c': { 'a': 4,
... 'b': { 'a': 5,
... 'b': 6 },
... 'c': { 'a': 7 }
... }
... }
>>> flatten(d)
{'a': 1, 'b_a': 2, 'b_b': 3, 'c_a': 4, 'c_b_a': 5, 'c_b_b': 6, 'c_c_a': 7}
"""
items = []
for k, v in d.iteritems():
new_key = parent_key + separator + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def flatten_list(l):
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
for sub in flatten_list(el):
yield sub
else:
yield el
def has_files(the_path):
"""Given a path, returns whether the path has any files in it or any subfolders. Works recursively."""
the_path = path(the_path)
try:
for _ in the_path.walkfiles():
return True
return False
except OSError as ex:
if ex.errno == errno.ENOENT:
# ignore
return False
else:
raise
def diff_dir(dir_cmp, left_path=True):
"""
A generator that, given a ``filecmp.dircmp`` object, yields the paths to all files that are different. Works
recursively.
:param dir_cmp: A ``filecmp.dircmp`` object representing the comparison.
:param left_path: If ``True``, paths will be relative to dircmp.left. Else paths will be relative to dircmp.right.
"""
for name in dir_cmp.diff_files:
if left_path:
path_root = dir_cmp.left
else:
path_root = dir_cmp.right
yield path.joinpath(path_root, name)
for sub in dir_cmp.subdirs.values():
# Need to iterate over the recursive call to make sure the individual values are yielded up the stack
for the_dir in diff_dir(sub, left_path):
yield the_dir
def make_precompiled_reference(a_path):
if not a_path.endswith('.less'):
return a_path
else:
return '.'.join(a_path.split('.')[0:-1]) + '_precompiled.css'
|
tylerbutler/engineer | engineer/util.py | diff_dir | python | def diff_dir(dir_cmp, left_path=True):
for name in dir_cmp.diff_files:
if left_path:
path_root = dir_cmp.left
else:
path_root = dir_cmp.right
yield path.joinpath(path_root, name)
for sub in dir_cmp.subdirs.values():
# Need to iterate over the recursive call to make sure the individual values are yielded up the stack
for the_dir in diff_dir(sub, left_path):
yield the_dir | A generator that, given a ``filecmp.dircmp`` object, yields the paths to all files that are different. Works
recursively.
:param dir_cmp: A ``filecmp.dircmp`` object representing the comparison.
:param left_path: If ``True``, paths will be relative to dircmp.left. Else paths will be relative to dircmp.right. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/util.py#L446-L463 | [
"def diff_dir(dir_cmp, left_path=True):\n \"\"\"\n A generator that, given a ``filecmp.dircmp`` object, yields the paths to all files that are different. Works\n recursively.\n\n :param dir_cmp: A ``filecmp.dircmp`` object representing the comparison.\n :param left_path: If ``True``, paths will be relative to dircmp.left. Else paths will be relative to dircmp.right.\n \"\"\"\n for name in dir_cmp.diff_files:\n if left_path:\n path_root = dir_cmp.left\n else:\n path_root = dir_cmp.right\n yield path.joinpath(path_root, name)\n for sub in dir_cmp.subdirs.values():\n # Need to iterate over the recursive call to make sure the individual values are yielded up the stack\n for the_dir in diff_dir(sub, left_path):\n yield the_dir\n"
] | # coding=utf-8
import collections
import errno
import filecmp
import hashlib
import itertools
import logging
import posixpath
import re
from itertools import chain, islice
import urlparse
# noinspection PyUnresolvedReferences
import translitcodec
# noinspection PyPackageRequirements
from path import path
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
_punctuation_regex = re.compile(r'[\t :!"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, length_limit=0, delimiter=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punctuation_regex.split(unicode(text).lower()):
word = word.encode('translit/long')
if word:
result.append(word)
slug = unicode(delimiter.join(result))
if length_limit > 0:
return slug[0:length_limit]
return slug
def get_class(class_string):
"""Given a string representing a path to a class, instantiates that class."""
parts = class_string.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def get_class_string(obj):
if isinstance(obj, basestring):
return obj
mod = obj.__module__
cls = getattr(obj, '__name__', obj.__class__.__name__)
return '.'.join((mod, cls))
def count_iterable(i):
# noinspection PyUnusedLocal
return sum(1 for e in i)
def expand_url(home, url):
join = urlparse.urljoin(home, url)
url2 = urlparse.urlparse(join)
the_path = posixpath.normpath(url2[2])
return urlparse.urlunparse(
(url2.scheme, url2.netloc, the_path, url2.params, url2.query, url2.fragment)
)
def urljoin(url1, *url2):
# This method is necessary because sometimes urlparse.urljoin simply doesn't work correctly
# when joining URL fragments.
return posixpath.join(url1, *url2)
def checksum(the_file):
with open(the_file) as f:
_checksum = hashlib.sha256(f.read()).hexdigest()
return _checksum
def chunk(seq, chunksize, process=iter):
it = iter(seq)
while True:
yield process(chain([it.next()], islice(it, chunksize - 1)))
def expand_path(path_list, root_path=None):
"""
Given a list of paths, returns a list of all parent paths (including the original paths).
If provided, ``root_path`` is used as the outermost path when expanding parent paths. If path_list contains
paths to files, only the directories where those files exist will be returned. This function only returns paths
to directories.
Example:
expand_path(['/tmp/foo/bar', '/tmp/foo/baz/file.txt' '/tmp/bar'], root_path='/tmp')
This call would return the following:
['/tmp/foo/bar/',
'/tmp/foo',
'/tmp/foo/baz/',
'/tmp/bar/']
If the ``root_path`` argument were ommitted in the above example,
"""
to_return = set()
path_list = wrap_list(path_list)
# expand ignore list to include all directories as individual entries
for p in path_list:
p = path(p)
if p.isdir():
to_return.add(p)
head, tail = p.splitpath()
while head and tail:
if root_path is not None and head == root_path:
break
to_return.add(head)
head, tail = head.splitpath()
return list(to_return)
def mirror_folder(source, target, delete_orphans=True, recurse=True, ignore_list=None, _level=0):
"""Mirrors a folder *source* into a target folder *target*."""
logger = logging.getLogger('engineer.util.mirror_folder')
def expand_tree(p):
tree = []
for node in path(p).walk():
tree.append(node)
return tree
report = {
'deleted': set([]),
'overwritten': set([]),
'new': set([])
}
d1 = source
d2 = target
logger.debug("Mirroring %s ==> %s" % (d1, d2))
if not d2.exists():
d2.makedirs()
compare = filecmp.dircmp(d1, d2)
# Expand the ignore list to be full paths
if ignore_list is None:
ignore_list = []
else:
ignore_list = [path(d2 / i).normpath() for i in ignore_list]
ignore_files = [f for f in ignore_list if f.isfile()]
ignore_list.extend(expand_path(ignore_files, root_path=d2))
# Delete orphan files/folders in the target folder
if delete_orphans:
for item in compare.right_only:
fullpath = path(d2 / item).normpath()
if fullpath in ignore_list:
logger.debug(
"%s ==> Ignored - path is in ignore list" % fullpath)
continue
if fullpath.isdir() and recurse:
logger.debug(
"%s ==> Deleted - doesn't exist in source" % fullpath)
report['deleted'].add(fullpath)
if len(fullpath.listdir()) > 0:
report['deleted'].update(expand_tree(fullpath))
# noinspection PyArgumentList
fullpath.rmtree()
elif fullpath.isfile():
logger.debug(
"%s ==> Deleted - doesn't exist in source" % fullpath)
report['deleted'].add(fullpath)
fullpath.remove()
# Copy new files and folders from the source to the target
for item in compare.left_only:
fullpath = d1 / item
if fullpath.isdir() and recurse:
logger.debug(
"Copying new directory %s ==> %s" % (fullpath, (d2 / item)))
fullpath.copytree(d2 / item)
report['new'].add(d2 / item)
report['new'].update(expand_tree(d2 / item))
elif fullpath.isfile():
logger.debug("Copying new file %s ==> %s" % (fullpath, (d2 / item)))
fullpath.copy2(d2)
report['new'].add(d2 / item)
# Copy modified files in the source to the target, overwriting the target file
for item in compare.diff_files:
logger.debug(
"Overwriting existing file %s ==> %s" % ((d1 / item), (d2 / item)))
(d1 / item).copy2(d2)
report['overwritten'].add(d2 / item)
# Recurse into subfolders that exist in both the source and target
if recurse:
for item in compare.common_dirs:
rpt = mirror_folder(d1 / item, d2 / item, delete_orphans, _level=_level + 1)
report['new'].update(rpt['new'])
report['overwritten'].update(rpt['overwritten'])
report['deleted'].update(rpt['deleted'])
return report
def ensure_exists(p, assume_dirs=False):
"""
Ensures a given path *p* exists.
If a path to a file is passed in, then the path to the file will be checked. This can be overridden by passing a
value of ``True`` to ``assume_dirs``, in which case the paths will be assumed to be to directories, not files.
"""
if path(p).ext and not assume_dirs:
path(p).dirname().makedirs_p()
else:
path(p).makedirs_p()
return p
def wrap_list(item):
"""
Returns an object as a list.
If the object is a list, it is returned directly. If it is a tuple or set, it
is returned as a list. If it is another object, it is wrapped in a list and
returned.
"""
if item is None:
return []
elif isinstance(item, list):
return item
elif isinstance(item, (tuple, set)):
return list(item)
else:
return [item]
class Borg(object):
"""
A class that shares state among all instances of the class.
There seem to be a lot of differing opinions about whether this design
pattern is A Good Idea (tm) or not. It definitely seems better than
Singletons since it enforces *behavior*, not *structure*,
but it's also possible there's a better way to do it in Python with
judicious use of globals.
"""
_state = {}
def __new__(cls, *p, **k):
self = object.__new__(cls)
self.__dict__ = cls._state
return self
def relpath(the_path):
from engineer.conf import settings
return '/' + settings.OUTPUT_CACHE_DIR.relpathto(the_path)
def _min_css(css_string):
from cssmin import cssmin
return cssmin(css_string)
def _min_js(js_string):
import lpjsmin
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
the_input = StringIO(js_string)
output = StringIO()
lpjsmin.minify_stream(the_input, output)
to_return = output.getvalue()
output.close()
the_input.close()
return to_return
def compress(item, compression_type):
if compression_type == 'css':
return _min_css(item)
elif compression_type == 'js':
return _min_js(item)
else:
raise ValueError("Unexpected compression_type: %s" % compression_type)
# setonce class from Ian Bicking: http://blog.ianbicking.org/easy-readonly-attributes.html
_setonce_count = itertools.count()
# noinspection PyPep8Naming
class setonce(object):
"""
Allows an attribute to be set once (typically in __init__), but
be read-only afterwards.
Example::
>>> class A(object):
... x = setonce()
>>> a = A()
>>> a.x
Traceback (most recent call last):
...
AttributeError: 'A' object has no attribute '_setonce_attr_0'
>>> a.x = 10
>>> a.x
10
>>> a.x = 20
Traceback (most recent call last):
...
AttributeError: Attribute already set
>>> del a.x
>>> a.x = 20
>>> a.x
20
You can also force a set to occur::
>>> A.x.set(a, 30)
>>> a.x
30
"""
def __init__(self, doc=None):
self._count = _setonce_count.next()
self._name = '_setonce_attr_%s' % self._count
self.__doc__ = doc
# noinspection PyUnusedLocal
def __get__(self, obj, obj_type=None):
if obj is None:
return self
return getattr(obj, self._name)
def __set__(self, obj, value):
try:
getattr(obj, self._name)
except AttributeError:
setattr(obj, self._name, value)
else:
raise AttributeError("Attribute already set")
def set(self, obj, value):
setattr(obj, self._name, value)
def __delete__(self, obj):
delattr(obj, self._name)
def update_additive(dict1, dict2):
"""
A utility method to update a dict or other mapping type with the contents of another dict.
This method updates the contents of ``dict1``, overwriting any existing key/value pairs in ``dict1`` with the
corresponding key/value pair in ``dict2``. If the value in ``dict2`` is a mapping type itself, then
``update_additive`` is called recursively. This ensures that nested maps are updated rather than simply
overwritten.
This method should be functionally equivalent to ``dict.update()`` except in the case of values that are
themselves nested maps. If you know that ``dict1`` does not have nested maps,
or you want to overwrite all values with the exact content of then you should simply use ``dict.update()``.
"""
for key, value in dict2.items():
if key not in dict1:
dict1[key] = value
else: # key in dict1
if isinstance(dict1[key], collections.Mapping):
assert isinstance(value, collections.Mapping)
update_additive(dict1[key], value)
else: # value is not a mapping type
assert not isinstance(value, collections.Mapping)
dict1[key] = value
def flatten_dict(d, parent_key='', separator='_'):
"""
Flattens any nested dict-like object into a non-nested form. The resulting dict will have keys of the form
``k1_nestedk2_nestedk3`` for nested keys. You can change the separator by passing in a value to
``separator``.
Example::
>>> import collections
>>> d = { 'a': 1,
... 'b': { 'a': 2,
... 'b': 3 },
... 'c': { 'a': 4,
... 'b': { 'a': 5,
... 'b': 6 },
... 'c': { 'a': 7 }
... }
... }
>>> flatten(d)
{'a': 1, 'b_a': 2, 'b_b': 3, 'c_a': 4, 'c_b_a': 5, 'c_b_b': 6, 'c_c_a': 7}
"""
items = []
for k, v in d.iteritems():
new_key = parent_key + separator + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def flatten_list(l):
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
for sub in flatten_list(el):
yield sub
else:
yield el
def has_files(the_path):
"""Given a path, returns whether the path has any files in it or any subfolders. Works recursively."""
the_path = path(the_path)
try:
for _ in the_path.walkfiles():
return True
return False
except OSError as ex:
if ex.errno == errno.ENOENT:
# ignore
return False
else:
raise
def make_precompiled_reference(a_path):
if not a_path.endswith('.less'):
return a_path
else:
return '.'.join(a_path.split('.')[0:-1]) + '_precompiled.css'
|
tylerbutler/engineer | engineer/conf.py | EngineerConfiguration.create_required_directories | python | def create_required_directories(self):
required = (self.CACHE_DIR,
self.LOG_DIR,
self.OUTPUT_DIR,
self.ENGINEER.JINJA_CACHE_DIR,)
for folder in required:
ensure_exists(folder, assume_dirs=True) | Creates any directories required for Engineer to function if they don't already exist. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/conf.py#L432-L440 | [
"def ensure_exists(p, assume_dirs=False):\n \"\"\"\n Ensures a given path *p* exists.\n\n If a path to a file is passed in, then the path to the file will be checked. This can be overridden by passing a\n value of ``True`` to ``assume_dirs``, in which case the paths will be assumed to be to directories, not files.\n \"\"\"\n if path(p).ext and not assume_dirs:\n path(p).dirname().makedirs_p()\n else:\n path(p).makedirs_p()\n return p\n"
] | class EngineerConfiguration(object):
"""
Stores all of the configuration settings for a given Engineer site.
This class uses the Borg design pattern and shares state among all
instances of the class.
There seem to be a lot of differing opinions about whether this design
pattern is A Good Idea (tm) or not. It definitely seems better than
Singletons since it enforces *behavior*, not *structure*,
but it's also possible there's a better way to do it in Python with
judicious use of globals.
"""
_state = {}
def __new__(cls, *p, **k):
self = object.__new__(cls)
self.__dict__ = cls._state
return self
_required_params = ('SITE_URL',)
class _EngineerConstants(object):
# ENGINEER 'CONSTANTS'
ENGINEER_APP_WIDE_SETTINGS_DIR = ensure_exists(user_data_dir('Engineer', 'Engineer'))
ROOT_DIR = path(__file__).dirname().abspath()
TEMPLATE_DIR = (ROOT_DIR / '_templates').abspath()
STATIC_DIR = (ROOT_DIR / 'static').abspath()
THEMES_DIR = (ROOT_DIR / '_themes').abspath()
LIB_DIR = (STATIC_DIR / 'engineer/lib/').abspath()
JINJA_CACHE_DIR = ensure_exists(path(user_cache_dir('Engineer', 'Engineer')) / '_jinja_cache')
def __init__(self, settings_file=None, override=None):
self.reload(settings_file)
self.COMPRESS_FILE_LIST = set()
def reload(self, settings_file=None, override=None):
if settings_file is None:
if hasattr(self, 'SETTINGS_FILE') and self.SETTINGS_FILE is not None:
# First check if SETTINGS_FILE has been defined. If so, we'll reload from that file.
settings_file = self.SETTINGS_FILE
else:
# Looks like we're just loading the 'empty' config.
assert isinstance(logger, CustomLogger)
logger.info("Initializing empty configuration.")
self.SETTINGS_FILE = None
self._initialize({})
return
if path(settings_file).exists() and path(settings_file).isfile():
self.SETTINGS_FILE = settings_file = path(settings_file).expand().abspath()
logger.console("Loading configuration from %s." % settings_file)
# Find the complete set of settings files based on inheritance
all_configs = []
config = {}
try:
while True:
with open(settings_file, mode='rb') as the_file:
temp_config = yaml.load(the_file)
logger.info("Loaded %s file." % settings_file)
all_configs.append((temp_config, settings_file))
if 'SUPER' not in temp_config:
break
else:
new_settings = path(temp_config['SUPER']).expand()
if not new_settings.isabs():
settings_file = (settings_file.dirname() / new_settings).abspath()
else:
settings_file = new_settings.abspath()
logger.debug("Going to next settings file... %s" % settings_file)
except Exception as e:
logger.exception(e.message)
# load parent configs
all_configs.reverse()
for c in all_configs[:-1]:
logger.debug("Loading parent configuration from %s." % path(c[1]).abspath())
update_additive(config, c[0])
# load main config
logger.debug("Finalizing configuration from %s." % path(all_configs[-1][1]).abspath())
update_additive(config, all_configs[-1][0])
if override:
logger.debug("Override dict was passed into setting initializer. Applying overrides: %s" % override)
update_additive(config, override)
for param in self._required_params:
if param not in config:
raise Exception("Required setting '%s' is missing from config file %s." % (param,
self.SETTINGS_FILE))
self._initialize(config)
self.SETTINGS_FILE_LOAD_TIME = arrow.now()
else:
raise SettingsFileNotFoundException("Settings file %s not found!" % settings_file)
def _initialize(self, config):
self._check_deprecated_settings(config)
self.ENGINEER = EngineerConfiguration._EngineerConstants()
# CONTENT DIRECTORIES
self.SETTINGS_DIR = path(config.pop('SETTINGS_DIR',
self.SETTINGS_FILE.dirname().abspath() if self.SETTINGS_FILE is not None
else path.getcwd()))
self.CONTENT_DIR = self.normalize(config.pop('CONTENT_DIR', 'content'))
self.POST_DIR = self.normalize_list(config.pop('POST_DIR', 'posts'))
self.OUTPUT_DIR = self.normalize(config.pop('OUTPUT_DIR', 'output'))
self.OUTPUT_DIR_IGNORE = wrap_list(config.pop('OUTPUT_DIR_IGNORE', ['.git', '.gitignore']))
self.TEMPLATE_DIR = self.normalize(config.pop('TEMPLATE_DIR', 'templates'))
self.TEMPLATE_PAGE_DIR = self.normalize(
config.pop('TEMPLATE_PAGE_DIR', (self.TEMPLATE_DIR / 'pages').abspath())
)
self.LOG_DIR = self.normalize(config.pop('LOG_DIR', 'logs'))
if self.SETTINGS_FILE is None:
self.LOG_FILE = self.normalize(config.pop('LOG_FILE', (self.LOG_DIR / 'build.log').abspath()))
else:
self.LOG_FILE = self.normalize(
config.pop(
'LOG_FILE',
(self.LOG_DIR / ('%s-%s.log' % (datetime.now().strftime('%m.%d_%H.%M.%S'),
self.SETTINGS_FILE.name))).abspath())
)
self.CACHE_DIR = self.normalize(config.pop('CACHE_DIR', None))
if self.CACHE_DIR is None:
if self.SETTINGS_FILE is not None:
self.CACHE_DIR = self.normalize('_cache/%s' % self.SETTINGS_FILE.name)
else:
self.CACHE_DIR = self.normalize('_cache/None')
else:
self.CACHE_DIR = self.normalize(self.CACHE_DIR)
self.CACHE_FILE = self.normalize(
config.pop('CACHE_FILE', (self.CACHE_DIR / 'engineer.cache').abspath())
)
self.OUTPUT_CACHE_DIR = self.normalize(
config.pop('OUTPUT_CACHE_DIR', (self.CACHE_DIR / 'output_cache').abspath())
)
self.JINJA_CACHE_DIR = self.normalize(
config.pop('JINJA_CACHE_DIR', (self.CACHE_DIR / 'jinja_cache').abspath())
)
self.BUILD_STATS_FILE = self.normalize(
config.pop('BUILD_STATS_FILE', (self.CACHE_DIR / 'build_stats.cache').abspath())
)
# PLUGINS
self.PLUGINS = wrap_list(config.pop('PLUGINS', None))
if self.PLUGINS is not None:
for plugin in self.PLUGINS:
__import__(plugin)
default_renderer = PythonMarkdownRenderer()
final_config = default_renderer.supported_extensions_dict
settings_renderer_config = config.pop('POST_RENDERER_CONFIG', {})
settings_renderer_config = dict([(k, get_class(v)) for k, v in settings_renderer_config.iteritems()])
final_config.update(settings_renderer_config)
self.POST_RENDERER_CONFIG = final_config
# THEMES
self.THEME_DIRS = self.normalize_list(config.pop('THEME_DIRS', None))
self.THEME_FINDERS = [
'engineer.themes.finders.ThemeDirsFinder',
'engineer.themes.finders.SiteFinder',
'engineer.themes.finders.PluginFinder',
'engineer.themes.finders.DefaultFinder'
]
self.THEME_SETTINGS = config.pop('THEME_SETTINGS', {})
self.THEME = config.pop('THEME', 'dark_rainbow')
# PREPROCESSOR / COMPRESSOR SETTINGS
self.COMPRESSOR_ENABLED = config.pop('COMPRESSOR_ENABLED', True)
self.COMPRESSOR_FILE_EXTENSIONS = config.pop('COMPRESSOR_FILE_EXTENSIONS', ['js', 'css'])
self.PREPROCESS_LESS = config.pop('PREPROCESS_LESS', True)
if 'LESS_PREPROCESSOR' not in config:
if platform.system() == 'Windows':
self.LESS_PREPROCESSOR = str(self.ENGINEER.ROOT_DIR /
'lib/less.js-windows/lessc.cmd')
else:
self.LESS_PREPROCESSOR = 'lessc'
else:
self.LESS_PREPROCESSOR = path(config.pop('LESS_PREPROCESSOR'))
# SITE SETTINGS
self.SITE_TITLE = config.pop('SITE_TITLE', 'SITE_TITLE')
self.SITE_URL = config.pop('SITE_URL', 'SITE_URL')
self.SITE_AUTHOR = config.pop('SITE_AUTHOR', None)
self.HOME_URL = config.pop('HOME_URL', '/')
# HOME_URL must end with a slash
if not self.HOME_URL.endswith('/'):
self.HOME_URL += '/'
self.STATIC_URL = config.pop('STATIC_URL', urljoin(self.HOME_URL, 'static'))
# starting in version 0.5, the default permalink style will change to 'pretty'
permalink_setting = config.pop('PERMALINK_STYLE', None)
if permalink_setting is None:
self.PERMALINK_STYLE = permalink_styles['pretty']
else:
self.PERMALINK_STYLE = permalink_styles.get(permalink_setting, permalink_setting)
self.ROLLUP_PAGE_SIZE = int(config.pop('ROLLUP_PAGE_SIZE', 5))
# RSS FEED SETTINGS
self.FEED_TITLE = config.pop('FEED_TITLE', self.SITE_TITLE + ' Feed')
self.FEED_ITEM_LIMIT = config.pop('FEED_ITEM_LIMIT', self.ROLLUP_PAGE_SIZE)
self.FEED_DESCRIPTION = config.pop('FEED_DESCRIPTION',
'The %s most recent posts from %s.' % (self.FEED_ITEM_LIMIT, self.SITE_URL))
self.FEED_URL = config.pop('FEED_URL', urljoin(self.HOME_URL, 'feeds/atom.xml'))
# URL helper functions
def page(num):
page_path = urljoin('page', str(num))
return urljoin(self.HOME_URL, page_path)
def tag(name):
page_path = urljoin('tag', slugify(name))
page_path = urljoin(self.HOME_URL, page_path)
return page_path
self.URLS = {
'home': self.HOME_URL,
'archives': urljoin(self.HOME_URL, 'archives'),
'feed': self.FEED_URL,
'listpage': page,
'tag': tag,
}
# Update URLs from the config setting if they're present
self.URLS.update(config.pop('URLS', {}))
# MISCELLANEOUS SETTINGS
self.ACTIVE_NAV_CLASS = config.pop('ACTIVE_NAV_CLASS', 'current')
self.DEBUG = config.pop('DEBUG', False)
# self.DISABLE_CACHE = config.pop('DISABLE_CACHE', False)
self.PLUGIN_PERMISSIONS = {
'MODIFY_RAW_POST': []
}
provided_permissions = config.pop('PLUGIN_PERMISSIONS', {})
update_additive(self.PLUGIN_PERMISSIONS, provided_permissions)
self.PUBLISH_DRAFTS = config.pop('PUBLISH_DRAFTS', False)
self.PUBLISH_PENDING = config.pop('PUBLISH_PENDING', False)
self.PUBLISH_REVIEW = config.pop('PUBLISH_REVIEW', False)
self.POST_TIMEZONE = zoneinfo.gettz(config.pop('POST_TIMEZONE', 'UTC'))
self.SERVER_TIMEZONE = self.POST_TIMEZONE if config.get('SERVER_TIMEZONE',
None) is None else config.pop('SERVER_TIMEZONE')
self.TIME_FORMAT = config.pop('TIME_FORMAT', '%I:%M %p %A, %B %d, %Y %Z') # '%Y-%m-%d %H:%M:%S %Z%z'
# Let plugins deal with their settings in their own way if needed
for plugin_type in get_all_plugin_types():
for plugin in plugin_type.plugins:
if hasattr(plugin, 'handle_settings'):
logger.debug("Calling handle_settings on plugin: %s.\nconfig dict is: %s\n" % (plugin.__name__,
log_object(config)))
config = plugin.handle_settings(config, self)
else:
logger.error("This plugin does not have a handle_settings method defined: %s" % plugin.get_name())
# Pull any remaining settings in the config and set them as attributes on the settings object
for k, v in config.iteritems():
setattr(self, k, v)
@staticmethod
def _check_deprecated_settings(config):
for setting in deprecated_settings:
if config.pop(setting[0], None) is not None:
logger.warning("The '%s' setting was deprecated in version %s: %s" % setting)
@cached_property
def OUTPUT_STATIC_DIR(self):
return path(self.OUTPUT_CACHE_DIR / self.ENGINEER.STATIC_DIR.basename()).abspath()
@cached_property
def JINJA_ENV(self):
from webassets.ext.jinja2 import AssetsExtension
from engineer.processors import preprocess_less
from engineer.themes import ThemeManager
# Configure Jinja2 environment
logger.debug("Configuring the Jinja environment.")
# Helper function to look up a URL by name
# noinspection PyShadowingNames
def urlname(name, *args):
url = settings.URLS.get(name, settings.HOME_URL)
if isfunction(url):
return url(*args)
else:
return url
theme = ThemeManager.current_theme()
env = Environment(
loader=ChoiceLoader(
[FileSystemLoader([self.TEMPLATE_DIR]),
theme.template_loader,
# self.ENGINEER.THEMES_DIR / 'base_templates',
FileSystemLoader([self.ENGINEER.TEMPLATE_DIR])]
),
extensions=['jinja2.ext.with_', AssetsExtension],
bytecode_cache=FileSystemBytecodeCache(directory=self.ENGINEER.JINJA_CACHE_DIR),
trim_blocks=True)
env.assets_environment = theme.assets_environment
# JinjaEnvironment plugins
# noinspection PyUnresolvedReferences
for plugin in JinjaEnvironmentPlugin.plugins:
plugin.update_environment(env)
# Built-in globals
env.globals['theme'] = theme
env.globals['urlname'] = urlname
env.globals['preprocess_less'] = preprocess_less
env.globals['make_precompiled_reference'] = make_precompiled_reference
# env.globals['url'] = url
env.globals['STATIC_URL'] = self.STATIC_URL
env.globals['DEBUG'] = self.DEBUG
env.globals['settings'] = self
return env
@cached_property
def CACHE(self):
# This check is a hack to ensure sphinx autodoc doesn't choke on this property.
# I don't know why it chokes here, but I think the exception handling might be
# messing with it.
if self is None:
return
# Use a shelf as the main cache
try:
CACHE = shelve.open(self.CACHE_FILE, writeback=True)
except Exception as e:
logger.exception(e)
CACHE = None
exit()
if CACHE is None or len(CACHE) == 0 or 'version' not in CACHE or CACHE['version'] != version:
# all new caches
logger.warning("Caches either don't exist or are old, so creating new ones...")
CACHE.clear()
CACHE['version'] = version
return CACHE
@cached_property
def COMPRESSION_CACHE(self):
if 'COMPRESSION_CACHE' not in self.CACHE:
self.CACHE['COMPRESSION_CACHE'] = SimpleFileCache(version=version)
return self.CACHE['COMPRESSION_CACHE']
@cached_property
def POST_CACHE(self):
if 'POST_CACHE' not in self.CACHE:
self.CACHE['POST_CACHE'] = SimpleFileCache(version=version)
return self.CACHE['POST_CACHE']
@cached_property
def LESS_CACHE(self):
if 'LESS_CACHE' not in self.CACHE:
self.CACHE['LESS_CACHE'] = SimpleFileCache(version=version)
return self.CACHE['LESS_CACHE']
def normalize(self, p):
if p is None:
return None
the_path = path(p).expand()
if the_path.isabs():
return the_path
else:
return (self.SETTINGS_DIR / the_path).abspath()
def normalize_list(self, p):
l = wrap_list(p)
return_list = [self.normalize(p) for p in l]
return return_list
|
tylerbutler/engineer | engineer/plugins/bundled.py | FinalizationPlugin.render_markdown | python | def render_markdown(post):
from engineer.conf import settings
# A hack to guarantee the YAML output is in a sensible order.
# The order, assuming all metadata should be written, should be:
# title
# status
# timestamp
# link
# via
# via-link
# slug
# tags
# updated
# template
# content-template
# url
d = [
('status', post.status.name),
('link', post.link),
('via', post.via),
('via-link', post.via_link),
('tags', post.tags),
('updated', post.updated_local.strftime(settings.TIME_FORMAT) if post.updated is not None else None),
('template', post.template if post.template != 'theme/post_detail.html' else None),
('content-template',
post.content_template if post.content_template != 'theme/_content_default.html' else None),
]
# The complete set of metadata that should be written is the union of the FINALIZE_METADATA.config setting and
# the set of metadata that was in the file originally.
finalization_config = FinalizationPlugin.get_settings()['config']
metadata_to_finalize = set([m for m, s in finalization_config.iteritems() if post.status in s])
metadata_to_finalize.update(post.metadata_original)
if 'title' in metadata_to_finalize:
# insert at the top of the list
d.insert(0, ('title', post.title))
if 'slug' in metadata_to_finalize:
# insert right before tags
d.insert(d.index(('tags', post.tags)), ('slug', post.slug))
if 'timestamp' in metadata_to_finalize:
# insert right after status
d.insert(d.index(('status', post.status.name)), ('timestamp',
post.timestamp_local.strftime(settings.TIME_FORMAT)))
if 'url' in metadata_to_finalize:
# insert at end of list
d.append(('url', post.url))
metadata = ''
for k, v in d:
if v is not None and len(v) > 0:
metadata += yaml.safe_dump(dict([(k, v)]), default_flow_style=False)
# handle custom metadata
if len(post.custom_properties):
metadata += '\n'
metadata += yaml.safe_dump(dict(post.custom_properties), default_flow_style=False)
return settings.JINJA_ENV.get_template(post.markdown_template_path).render(metadata=metadata,
content=post.content_finalized,
post=post) | Renders the post as Markdown using the template specified in :attr:`markdown_template_path`. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/plugins/bundled.py#L159-L222 | [
"def get_settings(cls):\n return cls._settings\n"
] | class FinalizationPlugin(PostProcessor):
_finalize_map_defaults = {
'timestamp': [Status.published],
'title': [Status.published, Status.review, Status.draft],
'slug': [Status.published, Status.review, Status.draft],
'url': [Status.review, Status.published]
}
_fenced_metadata_formats = ('fenced', 'jekyll', 'octopress')
_unfenced_metadata_formats = ('unfenced', 'engineer',)
_default_metadata_format = 'input'
setting_name = 'FINALIZE_METADATA'
default_settings = {
'config': _finalize_map_defaults,
'format': _default_metadata_format
}
disabled_msg = "A metadata finalization config is specified but the plugin is disabled."
@classmethod
def handle_settings(cls, config_dict, settings):
logger = cls.get_logger()
plugin_settings, user_supplied_settings = cls.initialize_settings(config_dict)
# POST METADATA FINALIZATION SETTINGS
if not cls.is_enabled and 'config' in user_supplied_settings:
cls.log_once(cls.disabled_msg, 'disabled_msg', logging.WARNING)
elif 'config' in user_supplied_settings:
for metadata_attribute, statuses in user_supplied_settings['config'].iteritems():
plugin_settings['config'][metadata_attribute] = [Status(s) for s in statuses]
valid_metadata_formats = set(flatten_list((
cls._default_metadata_format,
cls._fenced_metadata_formats,
cls._unfenced_metadata_formats,
)))
if plugin_settings['format'] not in valid_metadata_formats:
logger.warning("'%s' is not a valid METADATA_FORMAT setting. Defaulting to '%s'.",
plugin_settings.format, cls._default_metadata_format)
plugin_settings.format = cls._default_metadata_format
cls.store_settings(plugin_settings)
return config_dict
@classmethod
def preprocess(cls, post, metadata):
if cls.is_enabled():
# Get the list of metadata that's specified directly in the source file -- this metadata we *always* want
# to ensure gets output during finalization. Store it on the post object,
# then we'll use it later in the postprocess method.
post.metadata_original = set(metadata.keys())
return post, metadata
@classmethod
def postprocess(cls, post):
logger = cls.get_logger()
if cls.is_enabled():
metadata_format = cls.get_settings()['format']
if metadata_format in cls._fenced_metadata_formats:
logger.debug("METADATA_FORMAT is '%s', metadata will always be fenced during normalization.",
metadata_format)
post._fence = True
elif metadata_format in cls._unfenced_metadata_formats:
logger.debug("METADATA_FORMAT is '%s', metadata will always be unfenced during normalization.",
metadata_format)
post._fence = False
output = cls.render_markdown(post)
if cls.need_update(post, output):
logger.debug("Finalizing metadata for post '%s'" % post)
with open(post.source, mode='wb', encoding='UTF-8') as the_file:
the_file.write(output)
else:
logger.debug("No metadata finalization needed for post '%s'" % post)
return post
# noinspection PyProtectedMember
@classmethod
def need_update(cls, post, new_post_content):
from engineer.models import Post
old = Post._regex.match(post._file_contents_raw)
new = Post._regex.match(new_post_content)
old_fenced = (old.group('fence') is not None)
if old_fenced != post._fence:
return True
old_metadata = old.group('metadata').strip()
new_metadata = new.group('metadata').strip()
if new_metadata != old_metadata:
return True
old_content = old.group('content').strip()
new_content = new.group('content').strip()
if new_content != old_content:
return True
else:
return False
@staticmethod
|
tylerbutler/engineer | engineer/commands/core.py | _ArgparseMixin.parser | python | def parser(self):
if self._command_parser is None:
parents = []
if self.need_verbose:
parents.append(_verbose_parser)
if self.need_settings:
parents.append(_settings_parser)
self._command_parser = self._main_parser.add_parser(self.name,
help=self.help,
parents=parents,
formatter_class=argparse.RawDescriptionHelpFormatter)
return self._command_parser | Returns the appropriate parser to use for adding arguments to your command. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/commands/core.py#L165-L178 | null | class _ArgparseMixin(_CommandMixin):
_name = None
_help = None
_need_settings = True
_need_verbose = True
@property
def name(self):
"""The name of the command."""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def help(self):
"""The help string for the command."""
return self._help
@help.setter
def help(self, value):
self._help = value
@property
def need_settings(self):
"""Defaults to True. Set to False if the command does not require an Engineer config file."""
return self._need_settings
@need_settings.setter
def need_settings(self, value):
self._need_settings = value
@property
def need_verbose(self):
"""
Defaults to True. Set to False if the command does not support the standard Engineer
:option:`verbose<engineer -v>` option.
"""
return self._need_verbose
@need_verbose.setter
def need_verbose(self, value):
self._need_verbose = value
@property
def setup_command(self):
self.add_arguments()
self._finalize()
def add_arguments(self):
"""Override this method in subclasses to add arguments to parsers as needed."""
raise NotImplementedError()
def _finalize(self):
if not self.parser.get_default('handle'):
self.parser.set_defaults(handler_function=self.handler_function)
self.parser.set_defaults(need_settings=self.need_settings)
|
tylerbutler/engineer | engineer/plugins/core.py | PluginMixin.get_logger | python | def get_logger(cls, custom_name=None):
name = custom_name or cls.get_name()
return logging.getLogger(name) | Returns a logger for the plugin. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/plugins/core.py#L80-L83 | [
"def get_name(cls):\n return get_class_string(cls)\n"
] | class PluginMixin(object):
_logs = '_logs'
_enabled = 'enabled'
_required_settings = {
_enabled: False,
_logs: dict()
}
_settings = _required_settings
setting_name = None
default_settings = {}
@classmethod
def get_name(cls):
return get_class_string(cls)
@classmethod
@classmethod
def get_setting_name(cls):
if cls.setting_name is None:
# raise NotImplementedError("A setting_name property must be set on the class.")
return cls.__name__ + '_SETTINGS'
else:
return cls.setting_name
@classmethod
def get_default_settings(cls):
return cls.default_settings
@classmethod
def handle_settings(cls, config_dict, settings):
"""
If a plugin defines its own settings, it may also need to handle those settings in some unique way when the
Engineer configuration files are being read. By overriding this method,
plugins can ensure such unique handling of their settings is done.
Note that a plugin does not have to handle its own settings unless there is unique processing that must be
done. Any settings that are unknown to Engineer will automatically be added as attributes on the
:class:`~engineer.conf.EngineerConfiguration` object. This method should only be implemented if the settings
must be processed in some more complicated way prior to being added to the global configuration object.
Implementations of this method should check for the plugin-specific settings in ``config_dict`` and set
appropriate attributes/properties on the ``settings`` object. In addition, settings that
have been handled should be removed from ``config_dict``. This ensures they are not handled by
other plugins or the default Engineer code.
:param config_dict: The dict of as-yet unhandled settings in the current settings file.
:param settings: The global :class:`~engineer.conf.EngineerConfiguration` object that contains all the
:param settings: The global :class:`~engineer.conf.EngineerConfiguration` object that contains all the
settings for the current Engineer process. Any custom settings should be added to this object.
:returns: The modified ``config_dict`` object.
"""
cls.initialize_settings(config_dict)
return config_dict
@classmethod
def initialize_settings(cls, config_dict):
# Combine the required, default, and user-supplied settings
plugin_settings = cls._required_settings.copy()
user_supplied_settings = config_dict.pop(cls.get_setting_name(), {})
update_additive(plugin_settings, cls.get_default_settings())
update_additive(plugin_settings, user_supplied_settings)
cls.store_settings(plugin_settings)
return plugin_settings, user_supplied_settings
@classmethod
def store_settings(cls, plugin_settings):
cls._settings = plugin_settings
@classmethod
def get_settings(cls):
return cls._settings
@classmethod
def is_enabled(cls):
return cls.get_settings()[cls._enabled]
@classmethod
def log_once(cls, msg, key, level=logging.DEBUG):
if cls.get_settings()[cls._logs].get(key, False):
return
else:
cls.get_logger().log(level, msg)
cls.get_settings()[cls._logs][key] = True
|
tylerbutler/engineer | engineer/models.py | Post.url | python | def url(self):
url = u'{home_url}{permalink}'.format(home_url=settings.HOME_URL,
permalink=self._permalink)
url = re.sub(r'/{2,}', r'/', url)
return url | The site-relative URL to the post. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/models.py#L157-L162 | null | class Post(object):
"""
Represents a post written in Markdown and stored in a file.
:param source: path to the source file for the post.
"""
DEFAULT_CONTENT_TEMPLATE = 'theme/_content_default.html'
DEFAULT_TEMPLATE = 'theme/post_detail.html'
_regex = re.compile(
r'^[\n|\r\n]*(?P<fence>---)?[\n|\r\n]*(?P<metadata>.+?)[\n|\r\n]*---[\n|\r\n]*(?P<content>.*)[\n|\r\n]*',
re.DOTALL)
# Make _content_raw only settable once. This is just to help prevent data loss that might be caused by
# inadvertantly messing with this property.
_content_raw = setonce()
_file_contents_raw = setonce()
def __init__(self, source):
self._content_stash = []
self.source = path(source).abspath()
"""The absolute path to the source file for the post."""
self.markdown_template_path = 'core/post.md'
"""The path to the template to use to transform the post back into a :ref:`post source file <posts>`."""
# This will get set to `True in _parse_source if the source file has 'fenced metadata' (like Jekyll)
self._fence = False
metadata, self._content_raw = self._parse_source()
# if not hasattr(self, 'content_preprocessed'):
self._content_preprocessed = self.content_raw
self._content_finalized = self.content_raw
content_template = metadata.pop('content-template', metadata.pop('content_template',
self.DEFAULT_CONTENT_TEMPLATE))
if not content_template.endswith('.html'):
content_template += '.html'
self.content_template = content_template
"""The path to the template to use to transform the post *content* into HTML."""
template = metadata.pop('template', self.DEFAULT_TEMPLATE)
if not template.endswith('.html'):
template += '.html'
self.template = template
"""The path to the template to use to transform the post into HTML."""
self.title = metadata.pop('title', self.source.namebase.replace('-', ' ').replace('_', ' ').title())
"""The title of the post."""
self.slug = metadata.pop('slug', slugify(self.title))
"""The slug for the post."""
self._tags = wrap_list(metadata.pop('tags', []))
self.link = metadata.pop('link', None)
"""The post's :ref:`external link <post link>`."""
self.via = metadata.pop('via', None)
"""The post's attribution name."""
self.via_link = metadata.pop('via-link', metadata.pop('via_link', None))
"""The post's attribution link."""
try:
self.status = Status(metadata.pop('status', Status.draft.name))
"""The status of the post (published or draft)."""
except ValueError:
logger.warning("'%s': Invalid status value in metadata. Defaulting to 'draft'." % self.title)
self.status = Status.draft
timestamp = metadata.pop('timestamp', None)
self.timestamp = self._clean_datetime(timestamp)
"""The date/time the post was published or written."""
updated = metadata.pop('updated', None)
self.updated = self._clean_datetime(updated) if updated is not None else None
"""The date/time the post was updated."""
# determine the URL based on the HOME_URL and the PERMALINK_STYLE settings
permalink = settings.PERMALINK_STYLE.format(year=unicode(self.timestamp_local.year),
month=u'{0:02d}'.format(self.timestamp_local.month),
day=u'{0:02d}'.format(self.timestamp_local.day),
i_month=self.timestamp_local.month,
i_day=self.timestamp_local.day,
title=self.slug, # for Jekyll compatibility
slug=self.slug,
timestamp=self.timestamp_local.datetime,
post=self)
if permalink.endswith('index.html'):
permalink = permalink[:-10]
elif permalink.endswith('.html') or permalink.endswith('/'):
pass
else:
permalink += '.html'
self._permalink = permalink
# noinspection PyUnresolvedReferences
# Handle any preprocessor plugins
for plugin in PostProcessor.plugins:
if plugin.is_enabled():
plugin.preprocess(self, metadata)
# keep track of any remaining properties in the post metadata
metadata.pop('url', None) # remove the url property from the metadata dict before copy
self.custom_properties = copy(metadata)
"""A dict of any custom metadata properties specified in the post."""
# noinspection PyUnresolvedReferences
# handle any postprocessor plugins
for plugin in PostProcessor.plugins:
if plugin.is_enabled():
plugin.postprocess(self)
# update cache
settings.POST_CACHE[self.source] = self
@cached_property
@cached_property
def absolute_url(self):
"""The absolute URL to the post."""
return u'{0}{1}'.format(settings.SITE_URL, self.url)
@cached_property
def output_path(self):
url = self._permalink
if url.endswith('/'):
url += 'index.html'
return path(settings.OUTPUT_CACHE_DIR / url)
@cached_property
def output_file_name(self):
r = self.output_path.name
return r
@cached_property
def tags(self):
"""A list of strings representing the tags applied to the post."""
r = [unicode(t) for t in self._tags]
return r
@property
def content(self):
"""The post's content in HTML format."""
content_list = wrap_list(self._content_preprocessed)
content_list.extend(self._content_stash)
content_to_render = '\n'.join(content_list)
return typogrify(self.content_renderer.render(content_to_render, self.format))
@property
def content_preprocessed(self):
return self._content_preprocessed
@content_preprocessed.setter
def content_preprocessed(self, value):
self._content_preprocessed = value
# def set_content_preprocessed(self, content, caller_class):
# caller = caller_class.get_name() if hasattr(caller_class, 'get_name') else get_class_string(caller_class)
# perms = settings.PLUGIN_PERMISSIONS['MODIFY_PREPROCESSED_CONTENT']
# if caller not in perms and '*' not in perms:
# logger.warning("A plugin is trying to modify the post's preprocessed content but does not have the "
# "MODIFY_PREPROCESSED_CONTENT permission. Plugin: %s" % caller)
# return False
# else:
# logger.debug("%s is setting post source content." % caller)
# self._content_preprocessed = content
# return True
@property
def content_finalized(self):
return self._content_finalized
@property
def content_raw(self):
return self._content_raw
@cached_property
def content_renderer(self):
return settings.POST_RENDERER_CONFIG[self.format]()
@property
def description(self):
regex = re.compile(r'^.*?<p>(?P<para>.*?)</p>.*?', re.DOTALL)
matches = re.match(regex, self.content)
return matches.group('para')
@property
def format(self):
return self.source.ext
@property
def is_draft(self):
"""``True`` if the post is a draft, ``False`` otherwise."""
return self.status == Status.draft
@property
def is_published(self):
"""``True`` if the post is published, ``False`` otherwise."""
return self.status == Status.published and self.timestamp <= arrow.now()
@property
def is_pending(self):
"""``True`` if the post is marked as published but has a timestamp set in the future."""
return self.status == Status.published and self.timestamp >= arrow.now()
@property
def is_external_link(self):
"""``True`` if the post has an associated external link. ``False`` otherwise."""
return self.link is not None and self.link != ''
@property
def timestamp_local(self):
"""
The post's :attr:`timestamp` in 'local' time.
Local time is determined by the :attr:`~engineer.conf.EngineerConfiguration.POST_TIMEZONE` setting.
"""
return localtime(self.timestamp)
@property
def updated_local(self):
return localtime(self.updated)
@staticmethod
def _clean_datetime(datetime):
if datetime is None:
datetime = arrow.now(settings.POST_TIMEZONE)
# Reduce resolution of timestamp
datetime = datetime.replace(second=0, microsecond=0)
else:
timestamp_dt = parser.parse(str(datetime))
if timestamp_dt.tzinfo is None:
# convert to UTC assuming input time is in the POST_TIMEZONE
datetime = arrow.get(timestamp_dt, settings.POST_TIMEZONE)
else:
datetime = arrow.get(timestamp_dt)
return datetime.to('utc')
def _parse_source(self):
try:
with open(self.source, mode='r') as the_file:
item = unicode(the_file.read())
except UnicodeDecodeError:
with open(self.source, mode='r', encoding='UTF-8') as the_file:
item = the_file.read()
self._file_contents_raw = item
parsed_content = re.match(self._regex, item)
if parsed_content is None or parsed_content.group('metadata') is None:
# Parsing failed, maybe there's no metadata
raise PostMetadataError()
if parsed_content.group('fence') is not None:
self._fence = True
# 'Clean' the YAML section since there might be tab characters
metadata = parsed_content.group('metadata').replace('\t', ' ')
try:
metadata = yaml.load(metadata)
except ScannerError:
raise PostMetadataError("YAML error parsing metadata.")
if not isinstance(metadata, dict):
raise PostMetadataError("Metadata isn't a dict. Instead, it's a %s." % type(metadata))
# Make the metadata dict case insensitive
metadata = CaseInsensitiveDict(metadata)
content = parsed_content.group('content')
return metadata, content
def render_item(self, all_posts):
"""
Renders the Post as HTML using the template specified in :attr:`html_template_path`.
:param all_posts: An optional :class:`PostCollection` containing all of the posts in the site.
:return: The rendered HTML as a string.
"""
index = all_posts.index(self)
if index > 0: # has newer posts
newer_post = all_posts[index - 1]
else:
newer_post = None
if index < len(all_posts) - 1: # has older posts
older_post = all_posts[index + 1]
else:
older_post = None
return settings.JINJA_ENV.get_template(self.template).render(
post=self,
newer_post=newer_post,
older_post=older_post,
all_posts=all_posts,
nav_context='post'
)
def set_finalized_content(self, content, caller_class):
"""
Plugins can call this method to modify post content that is written back to source post files.
This method can be called at any time by anyone, but it has no effect if the caller is not granted the
``MODIFY_RAW_POST`` permission in the Engineer configuration.
The :attr:`~engineer.conf.EngineerConfiguration.FINALIZE_METADATA` setting must also be enabled in order for
calls to this method to have any effect.
:param content: The modified post content that should be written back to the post source file.
:param caller_class: The class of the plugin that's calling this method.
:return: ``True`` if the content was successfully modified; otherwise ``False``.
"""
caller = caller_class.get_name() if hasattr(caller_class, 'get_name') else unicode(caller_class)
if not FinalizationPlugin.is_enabled():
logger.warning("A plugin is trying to modify the post content but the FINALIZE_METADATA setting is "
"disabled. This setting must be enabled for plugins to modify post content. "
"Plugin: %s" % caller)
return False
perms = settings.PLUGIN_PERMISSIONS['MODIFY_RAW_POST']
if caller not in perms and '*' not in perms:
logger.warning("A plugin is trying to modify the post content but does not have the "
"MODIFY_RAW_POST permission. Plugin: %s" % caller)
return False
else:
logger.debug("%s is setting post source content." % caller)
self._content_finalized = self._remove_all_stashed_content()
return True
def stash_content(self, stash_content):
self._content_stash.append(stash_content)
# self.content_preprocessed = '''%s
# <!-- ||stash|| -->
# %s
# <!-- ||end_stash|| -->''' % (self.content_preprocessed, stash_content)
def _remove_all_stashed_content(self):
_regex = re.compile('<!-- \|\|stash\|\| -->.*?<!-- \|\|end_stash\|\| -->', re.DOTALL | re.MULTILINE)
cleaned_content, num_stashes = _regex.subn('', self.content_preprocessed)
if num_stashes > 0:
logger.debug("Removed %i stash sections from finalized content." % num_stashes)
return cleaned_content
def __unicode__(self):
return self.slug
__repr__ = __unicode__
|
tylerbutler/engineer | engineer/models.py | Post.content | python | def content(self):
content_list = wrap_list(self._content_preprocessed)
content_list.extend(self._content_stash)
content_to_render = '\n'.join(content_list)
return typogrify(self.content_renderer.render(content_to_render, self.format)) | The post's content in HTML format. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/models.py#L188-L193 | [
"def wrap_list(item):\n \"\"\"\n Returns an object as a list.\n\n If the object is a list, it is returned directly. If it is a tuple or set, it\n is returned as a list. If it is another object, it is wrapped in a list and\n returned.\n \"\"\"\n if item is None:\n return []\n elif isinstance(item, list):\n return item\n elif isinstance(item, (tuple, set)):\n return list(item)\n else:\n return [item]\n"
] | class Post(object):
"""
Represents a post written in Markdown and stored in a file.
:param source: path to the source file for the post.
"""
DEFAULT_CONTENT_TEMPLATE = 'theme/_content_default.html'
DEFAULT_TEMPLATE = 'theme/post_detail.html'
_regex = re.compile(
r'^[\n|\r\n]*(?P<fence>---)?[\n|\r\n]*(?P<metadata>.+?)[\n|\r\n]*---[\n|\r\n]*(?P<content>.*)[\n|\r\n]*',
re.DOTALL)
# Make _content_raw only settable once. This is just to help prevent data loss that might be caused by
# inadvertantly messing with this property.
_content_raw = setonce()
_file_contents_raw = setonce()
def __init__(self, source):
self._content_stash = []
self.source = path(source).abspath()
"""The absolute path to the source file for the post."""
self.markdown_template_path = 'core/post.md'
"""The path to the template to use to transform the post back into a :ref:`post source file <posts>`."""
# This will get set to `True in _parse_source if the source file has 'fenced metadata' (like Jekyll)
self._fence = False
metadata, self._content_raw = self._parse_source()
# if not hasattr(self, 'content_preprocessed'):
self._content_preprocessed = self.content_raw
self._content_finalized = self.content_raw
content_template = metadata.pop('content-template', metadata.pop('content_template',
self.DEFAULT_CONTENT_TEMPLATE))
if not content_template.endswith('.html'):
content_template += '.html'
self.content_template = content_template
"""The path to the template to use to transform the post *content* into HTML."""
template = metadata.pop('template', self.DEFAULT_TEMPLATE)
if not template.endswith('.html'):
template += '.html'
self.template = template
"""The path to the template to use to transform the post into HTML."""
self.title = metadata.pop('title', self.source.namebase.replace('-', ' ').replace('_', ' ').title())
"""The title of the post."""
self.slug = metadata.pop('slug', slugify(self.title))
"""The slug for the post."""
self._tags = wrap_list(metadata.pop('tags', []))
self.link = metadata.pop('link', None)
"""The post's :ref:`external link <post link>`."""
self.via = metadata.pop('via', None)
"""The post's attribution name."""
self.via_link = metadata.pop('via-link', metadata.pop('via_link', None))
"""The post's attribution link."""
try:
self.status = Status(metadata.pop('status', Status.draft.name))
"""The status of the post (published or draft)."""
except ValueError:
logger.warning("'%s': Invalid status value in metadata. Defaulting to 'draft'." % self.title)
self.status = Status.draft
timestamp = metadata.pop('timestamp', None)
self.timestamp = self._clean_datetime(timestamp)
"""The date/time the post was published or written."""
updated = metadata.pop('updated', None)
self.updated = self._clean_datetime(updated) if updated is not None else None
"""The date/time the post was updated."""
# determine the URL based on the HOME_URL and the PERMALINK_STYLE settings
permalink = settings.PERMALINK_STYLE.format(year=unicode(self.timestamp_local.year),
month=u'{0:02d}'.format(self.timestamp_local.month),
day=u'{0:02d}'.format(self.timestamp_local.day),
i_month=self.timestamp_local.month,
i_day=self.timestamp_local.day,
title=self.slug, # for Jekyll compatibility
slug=self.slug,
timestamp=self.timestamp_local.datetime,
post=self)
if permalink.endswith('index.html'):
permalink = permalink[:-10]
elif permalink.endswith('.html') or permalink.endswith('/'):
pass
else:
permalink += '.html'
self._permalink = permalink
# noinspection PyUnresolvedReferences
# Handle any preprocessor plugins
for plugin in PostProcessor.plugins:
if plugin.is_enabled():
plugin.preprocess(self, metadata)
# keep track of any remaining properties in the post metadata
metadata.pop('url', None) # remove the url property from the metadata dict before copy
self.custom_properties = copy(metadata)
"""A dict of any custom metadata properties specified in the post."""
# noinspection PyUnresolvedReferences
# handle any postprocessor plugins
for plugin in PostProcessor.plugins:
if plugin.is_enabled():
plugin.postprocess(self)
# update cache
settings.POST_CACHE[self.source] = self
@cached_property
def url(self):
"""The site-relative URL to the post."""
url = u'{home_url}{permalink}'.format(home_url=settings.HOME_URL,
permalink=self._permalink)
url = re.sub(r'/{2,}', r'/', url)
return url
@cached_property
def absolute_url(self):
"""The absolute URL to the post."""
return u'{0}{1}'.format(settings.SITE_URL, self.url)
@cached_property
def output_path(self):
url = self._permalink
if url.endswith('/'):
url += 'index.html'
return path(settings.OUTPUT_CACHE_DIR / url)
@cached_property
def output_file_name(self):
r = self.output_path.name
return r
@cached_property
def tags(self):
"""A list of strings representing the tags applied to the post."""
r = [unicode(t) for t in self._tags]
return r
@property
@property
def content_preprocessed(self):
return self._content_preprocessed
@content_preprocessed.setter
def content_preprocessed(self, value):
self._content_preprocessed = value
# def set_content_preprocessed(self, content, caller_class):
# caller = caller_class.get_name() if hasattr(caller_class, 'get_name') else get_class_string(caller_class)
# perms = settings.PLUGIN_PERMISSIONS['MODIFY_PREPROCESSED_CONTENT']
# if caller not in perms and '*' not in perms:
# logger.warning("A plugin is trying to modify the post's preprocessed content but does not have the "
# "MODIFY_PREPROCESSED_CONTENT permission. Plugin: %s" % caller)
# return False
# else:
# logger.debug("%s is setting post source content." % caller)
# self._content_preprocessed = content
# return True
@property
def content_finalized(self):
return self._content_finalized
@property
def content_raw(self):
return self._content_raw
@cached_property
def content_renderer(self):
return settings.POST_RENDERER_CONFIG[self.format]()
@property
def description(self):
regex = re.compile(r'^.*?<p>(?P<para>.*?)</p>.*?', re.DOTALL)
matches = re.match(regex, self.content)
return matches.group('para')
@property
def format(self):
return self.source.ext
@property
def is_draft(self):
"""``True`` if the post is a draft, ``False`` otherwise."""
return self.status == Status.draft
@property
def is_published(self):
"""``True`` if the post is published, ``False`` otherwise."""
return self.status == Status.published and self.timestamp <= arrow.now()
@property
def is_pending(self):
"""``True`` if the post is marked as published but has a timestamp set in the future."""
return self.status == Status.published and self.timestamp >= arrow.now()
@property
def is_external_link(self):
"""``True`` if the post has an associated external link. ``False`` otherwise."""
return self.link is not None and self.link != ''
@property
def timestamp_local(self):
"""
The post's :attr:`timestamp` in 'local' time.
Local time is determined by the :attr:`~engineer.conf.EngineerConfiguration.POST_TIMEZONE` setting.
"""
return localtime(self.timestamp)
@property
def updated_local(self):
return localtime(self.updated)
@staticmethod
def _clean_datetime(datetime):
if datetime is None:
datetime = arrow.now(settings.POST_TIMEZONE)
# Reduce resolution of timestamp
datetime = datetime.replace(second=0, microsecond=0)
else:
timestamp_dt = parser.parse(str(datetime))
if timestamp_dt.tzinfo is None:
# convert to UTC assuming input time is in the POST_TIMEZONE
datetime = arrow.get(timestamp_dt, settings.POST_TIMEZONE)
else:
datetime = arrow.get(timestamp_dt)
return datetime.to('utc')
def _parse_source(self):
try:
with open(self.source, mode='r') as the_file:
item = unicode(the_file.read())
except UnicodeDecodeError:
with open(self.source, mode='r', encoding='UTF-8') as the_file:
item = the_file.read()
self._file_contents_raw = item
parsed_content = re.match(self._regex, item)
if parsed_content is None or parsed_content.group('metadata') is None:
# Parsing failed, maybe there's no metadata
raise PostMetadataError()
if parsed_content.group('fence') is not None:
self._fence = True
# 'Clean' the YAML section since there might be tab characters
metadata = parsed_content.group('metadata').replace('\t', ' ')
try:
metadata = yaml.load(metadata)
except ScannerError:
raise PostMetadataError("YAML error parsing metadata.")
if not isinstance(metadata, dict):
raise PostMetadataError("Metadata isn't a dict. Instead, it's a %s." % type(metadata))
# Make the metadata dict case insensitive
metadata = CaseInsensitiveDict(metadata)
content = parsed_content.group('content')
return metadata, content
def render_item(self, all_posts):
"""
Renders the Post as HTML using the template specified in :attr:`html_template_path`.
:param all_posts: An optional :class:`PostCollection` containing all of the posts in the site.
:return: The rendered HTML as a string.
"""
index = all_posts.index(self)
if index > 0: # has newer posts
newer_post = all_posts[index - 1]
else:
newer_post = None
if index < len(all_posts) - 1: # has older posts
older_post = all_posts[index + 1]
else:
older_post = None
return settings.JINJA_ENV.get_template(self.template).render(
post=self,
newer_post=newer_post,
older_post=older_post,
all_posts=all_posts,
nav_context='post'
)
def set_finalized_content(self, content, caller_class):
"""
Plugins can call this method to modify post content that is written back to source post files.
This method can be called at any time by anyone, but it has no effect if the caller is not granted the
``MODIFY_RAW_POST`` permission in the Engineer configuration.
The :attr:`~engineer.conf.EngineerConfiguration.FINALIZE_METADATA` setting must also be enabled in order for
calls to this method to have any effect.
:param content: The modified post content that should be written back to the post source file.
:param caller_class: The class of the plugin that's calling this method.
:return: ``True`` if the content was successfully modified; otherwise ``False``.
"""
caller = caller_class.get_name() if hasattr(caller_class, 'get_name') else unicode(caller_class)
if not FinalizationPlugin.is_enabled():
logger.warning("A plugin is trying to modify the post content but the FINALIZE_METADATA setting is "
"disabled. This setting must be enabled for plugins to modify post content. "
"Plugin: %s" % caller)
return False
perms = settings.PLUGIN_PERMISSIONS['MODIFY_RAW_POST']
if caller not in perms and '*' not in perms:
logger.warning("A plugin is trying to modify the post content but does not have the "
"MODIFY_RAW_POST permission. Plugin: %s" % caller)
return False
else:
logger.debug("%s is setting post source content." % caller)
self._content_finalized = self._remove_all_stashed_content()
return True
def stash_content(self, stash_content):
self._content_stash.append(stash_content)
# self.content_preprocessed = '''%s
# <!-- ||stash|| -->
# %s
# <!-- ||end_stash|| -->''' % (self.content_preprocessed, stash_content)
def _remove_all_stashed_content(self):
_regex = re.compile('<!-- \|\|stash\|\| -->.*?<!-- \|\|end_stash\|\| -->', re.DOTALL | re.MULTILINE)
cleaned_content, num_stashes = _regex.subn('', self.content_preprocessed)
if num_stashes > 0:
logger.debug("Removed %i stash sections from finalized content." % num_stashes)
return cleaned_content
def __unicode__(self):
return self.slug
__repr__ = __unicode__
|
tylerbutler/engineer | engineer/models.py | Post.is_published | python | def is_published(self):
return self.status == Status.published and self.timestamp <= arrow.now() | ``True`` if the post is published, ``False`` otherwise. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/models.py#L243-L245 | null | class Post(object):
"""
Represents a post written in Markdown and stored in a file.
:param source: path to the source file for the post.
"""
DEFAULT_CONTENT_TEMPLATE = 'theme/_content_default.html'
DEFAULT_TEMPLATE = 'theme/post_detail.html'
_regex = re.compile(
r'^[\n|\r\n]*(?P<fence>---)?[\n|\r\n]*(?P<metadata>.+?)[\n|\r\n]*---[\n|\r\n]*(?P<content>.*)[\n|\r\n]*',
re.DOTALL)
# Make _content_raw only settable once. This is just to help prevent data loss that might be caused by
# inadvertantly messing with this property.
_content_raw = setonce()
_file_contents_raw = setonce()
def __init__(self, source):
self._content_stash = []
self.source = path(source).abspath()
"""The absolute path to the source file for the post."""
self.markdown_template_path = 'core/post.md'
"""The path to the template to use to transform the post back into a :ref:`post source file <posts>`."""
# This will get set to `True in _parse_source if the source file has 'fenced metadata' (like Jekyll)
self._fence = False
metadata, self._content_raw = self._parse_source()
# if not hasattr(self, 'content_preprocessed'):
self._content_preprocessed = self.content_raw
self._content_finalized = self.content_raw
content_template = metadata.pop('content-template', metadata.pop('content_template',
self.DEFAULT_CONTENT_TEMPLATE))
if not content_template.endswith('.html'):
content_template += '.html'
self.content_template = content_template
"""The path to the template to use to transform the post *content* into HTML."""
template = metadata.pop('template', self.DEFAULT_TEMPLATE)
if not template.endswith('.html'):
template += '.html'
self.template = template
"""The path to the template to use to transform the post into HTML."""
self.title = metadata.pop('title', self.source.namebase.replace('-', ' ').replace('_', ' ').title())
"""The title of the post."""
self.slug = metadata.pop('slug', slugify(self.title))
"""The slug for the post."""
self._tags = wrap_list(metadata.pop('tags', []))
self.link = metadata.pop('link', None)
"""The post's :ref:`external link <post link>`."""
self.via = metadata.pop('via', None)
"""The post's attribution name."""
self.via_link = metadata.pop('via-link', metadata.pop('via_link', None))
"""The post's attribution link."""
try:
self.status = Status(metadata.pop('status', Status.draft.name))
"""The status of the post (published or draft)."""
except ValueError:
logger.warning("'%s': Invalid status value in metadata. Defaulting to 'draft'." % self.title)
self.status = Status.draft
timestamp = metadata.pop('timestamp', None)
self.timestamp = self._clean_datetime(timestamp)
"""The date/time the post was published or written."""
updated = metadata.pop('updated', None)
self.updated = self._clean_datetime(updated) if updated is not None else None
"""The date/time the post was updated."""
# determine the URL based on the HOME_URL and the PERMALINK_STYLE settings
permalink = settings.PERMALINK_STYLE.format(year=unicode(self.timestamp_local.year),
month=u'{0:02d}'.format(self.timestamp_local.month),
day=u'{0:02d}'.format(self.timestamp_local.day),
i_month=self.timestamp_local.month,
i_day=self.timestamp_local.day,
title=self.slug, # for Jekyll compatibility
slug=self.slug,
timestamp=self.timestamp_local.datetime,
post=self)
if permalink.endswith('index.html'):
permalink = permalink[:-10]
elif permalink.endswith('.html') or permalink.endswith('/'):
pass
else:
permalink += '.html'
self._permalink = permalink
# noinspection PyUnresolvedReferences
# Handle any preprocessor plugins
for plugin in PostProcessor.plugins:
if plugin.is_enabled():
plugin.preprocess(self, metadata)
# keep track of any remaining properties in the post metadata
metadata.pop('url', None) # remove the url property from the metadata dict before copy
self.custom_properties = copy(metadata)
"""A dict of any custom metadata properties specified in the post."""
# noinspection PyUnresolvedReferences
# handle any postprocessor plugins
for plugin in PostProcessor.plugins:
if plugin.is_enabled():
plugin.postprocess(self)
# update cache
settings.POST_CACHE[self.source] = self
@cached_property
def url(self):
"""The site-relative URL to the post."""
url = u'{home_url}{permalink}'.format(home_url=settings.HOME_URL,
permalink=self._permalink)
url = re.sub(r'/{2,}', r'/', url)
return url
@cached_property
def absolute_url(self):
"""The absolute URL to the post."""
return u'{0}{1}'.format(settings.SITE_URL, self.url)
@cached_property
def output_path(self):
url = self._permalink
if url.endswith('/'):
url += 'index.html'
return path(settings.OUTPUT_CACHE_DIR / url)
@cached_property
def output_file_name(self):
r = self.output_path.name
return r
@cached_property
def tags(self):
"""A list of strings representing the tags applied to the post."""
r = [unicode(t) for t in self._tags]
return r
@property
def content(self):
"""The post's content in HTML format."""
content_list = wrap_list(self._content_preprocessed)
content_list.extend(self._content_stash)
content_to_render = '\n'.join(content_list)
return typogrify(self.content_renderer.render(content_to_render, self.format))
@property
def content_preprocessed(self):
return self._content_preprocessed
@content_preprocessed.setter
def content_preprocessed(self, value):
self._content_preprocessed = value
# def set_content_preprocessed(self, content, caller_class):
# caller = caller_class.get_name() if hasattr(caller_class, 'get_name') else get_class_string(caller_class)
# perms = settings.PLUGIN_PERMISSIONS['MODIFY_PREPROCESSED_CONTENT']
# if caller not in perms and '*' not in perms:
# logger.warning("A plugin is trying to modify the post's preprocessed content but does not have the "
# "MODIFY_PREPROCESSED_CONTENT permission. Plugin: %s" % caller)
# return False
# else:
# logger.debug("%s is setting post source content." % caller)
# self._content_preprocessed = content
# return True
@property
def content_finalized(self):
return self._content_finalized
@property
def content_raw(self):
return self._content_raw
@cached_property
def content_renderer(self):
return settings.POST_RENDERER_CONFIG[self.format]()
@property
def description(self):
regex = re.compile(r'^.*?<p>(?P<para>.*?)</p>.*?', re.DOTALL)
matches = re.match(regex, self.content)
return matches.group('para')
@property
def format(self):
return self.source.ext
@property
def is_draft(self):
"""``True`` if the post is a draft, ``False`` otherwise."""
return self.status == Status.draft
@property
@property
def is_pending(self):
"""``True`` if the post is marked as published but has a timestamp set in the future."""
return self.status == Status.published and self.timestamp >= arrow.now()
@property
def is_external_link(self):
"""``True`` if the post has an associated external link. ``False`` otherwise."""
return self.link is not None and self.link != ''
@property
def timestamp_local(self):
"""
The post's :attr:`timestamp` in 'local' time.
Local time is determined by the :attr:`~engineer.conf.EngineerConfiguration.POST_TIMEZONE` setting.
"""
return localtime(self.timestamp)
@property
def updated_local(self):
return localtime(self.updated)
@staticmethod
def _clean_datetime(datetime):
if datetime is None:
datetime = arrow.now(settings.POST_TIMEZONE)
# Reduce resolution of timestamp
datetime = datetime.replace(second=0, microsecond=0)
else:
timestamp_dt = parser.parse(str(datetime))
if timestamp_dt.tzinfo is None:
# convert to UTC assuming input time is in the POST_TIMEZONE
datetime = arrow.get(timestamp_dt, settings.POST_TIMEZONE)
else:
datetime = arrow.get(timestamp_dt)
return datetime.to('utc')
def _parse_source(self):
try:
with open(self.source, mode='r') as the_file:
item = unicode(the_file.read())
except UnicodeDecodeError:
with open(self.source, mode='r', encoding='UTF-8') as the_file:
item = the_file.read()
self._file_contents_raw = item
parsed_content = re.match(self._regex, item)
if parsed_content is None or parsed_content.group('metadata') is None:
# Parsing failed, maybe there's no metadata
raise PostMetadataError()
if parsed_content.group('fence') is not None:
self._fence = True
# 'Clean' the YAML section since there might be tab characters
metadata = parsed_content.group('metadata').replace('\t', ' ')
try:
metadata = yaml.load(metadata)
except ScannerError:
raise PostMetadataError("YAML error parsing metadata.")
if not isinstance(metadata, dict):
raise PostMetadataError("Metadata isn't a dict. Instead, it's a %s." % type(metadata))
# Make the metadata dict case insensitive
metadata = CaseInsensitiveDict(metadata)
content = parsed_content.group('content')
return metadata, content
def render_item(self, all_posts):
"""
Renders the Post as HTML using the template specified in :attr:`html_template_path`.
:param all_posts: An optional :class:`PostCollection` containing all of the posts in the site.
:return: The rendered HTML as a string.
"""
index = all_posts.index(self)
if index > 0: # has newer posts
newer_post = all_posts[index - 1]
else:
newer_post = None
if index < len(all_posts) - 1: # has older posts
older_post = all_posts[index + 1]
else:
older_post = None
return settings.JINJA_ENV.get_template(self.template).render(
post=self,
newer_post=newer_post,
older_post=older_post,
all_posts=all_posts,
nav_context='post'
)
def set_finalized_content(self, content, caller_class):
"""
Plugins can call this method to modify post content that is written back to source post files.
This method can be called at any time by anyone, but it has no effect if the caller is not granted the
``MODIFY_RAW_POST`` permission in the Engineer configuration.
The :attr:`~engineer.conf.EngineerConfiguration.FINALIZE_METADATA` setting must also be enabled in order for
calls to this method to have any effect.
:param content: The modified post content that should be written back to the post source file.
:param caller_class: The class of the plugin that's calling this method.
:return: ``True`` if the content was successfully modified; otherwise ``False``.
"""
caller = caller_class.get_name() if hasattr(caller_class, 'get_name') else unicode(caller_class)
if not FinalizationPlugin.is_enabled():
logger.warning("A plugin is trying to modify the post content but the FINALIZE_METADATA setting is "
"disabled. This setting must be enabled for plugins to modify post content. "
"Plugin: %s" % caller)
return False
perms = settings.PLUGIN_PERMISSIONS['MODIFY_RAW_POST']
if caller not in perms and '*' not in perms:
logger.warning("A plugin is trying to modify the post content but does not have the "
"MODIFY_RAW_POST permission. Plugin: %s" % caller)
return False
else:
logger.debug("%s is setting post source content." % caller)
self._content_finalized = self._remove_all_stashed_content()
return True
def stash_content(self, stash_content):
self._content_stash.append(stash_content)
# self.content_preprocessed = '''%s
# <!-- ||stash|| -->
# %s
# <!-- ||end_stash|| -->''' % (self.content_preprocessed, stash_content)
def _remove_all_stashed_content(self):
_regex = re.compile('<!-- \|\|stash\|\| -->.*?<!-- \|\|end_stash\|\| -->', re.DOTALL | re.MULTILINE)
cleaned_content, num_stashes = _regex.subn('', self.content_preprocessed)
if num_stashes > 0:
logger.debug("Removed %i stash sections from finalized content." % num_stashes)
return cleaned_content
def __unicode__(self):
return self.slug
__repr__ = __unicode__
|
tylerbutler/engineer | engineer/models.py | Post.is_pending | python | def is_pending(self):
return self.status == Status.published and self.timestamp >= arrow.now() | ``True`` if the post is marked as published but has a timestamp set in the future. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/models.py#L248-L250 | null | class Post(object):
"""
Represents a post written in Markdown and stored in a file.
:param source: path to the source file for the post.
"""
DEFAULT_CONTENT_TEMPLATE = 'theme/_content_default.html'
DEFAULT_TEMPLATE = 'theme/post_detail.html'
_regex = re.compile(
r'^[\n|\r\n]*(?P<fence>---)?[\n|\r\n]*(?P<metadata>.+?)[\n|\r\n]*---[\n|\r\n]*(?P<content>.*)[\n|\r\n]*',
re.DOTALL)
# Make _content_raw only settable once. This is just to help prevent data loss that might be caused by
# inadvertantly messing with this property.
_content_raw = setonce()
_file_contents_raw = setonce()
def __init__(self, source):
self._content_stash = []
self.source = path(source).abspath()
"""The absolute path to the source file for the post."""
self.markdown_template_path = 'core/post.md'
"""The path to the template to use to transform the post back into a :ref:`post source file <posts>`."""
# This will get set to `True in _parse_source if the source file has 'fenced metadata' (like Jekyll)
self._fence = False
metadata, self._content_raw = self._parse_source()
# if not hasattr(self, 'content_preprocessed'):
self._content_preprocessed = self.content_raw
self._content_finalized = self.content_raw
content_template = metadata.pop('content-template', metadata.pop('content_template',
self.DEFAULT_CONTENT_TEMPLATE))
if not content_template.endswith('.html'):
content_template += '.html'
self.content_template = content_template
"""The path to the template to use to transform the post *content* into HTML."""
template = metadata.pop('template', self.DEFAULT_TEMPLATE)
if not template.endswith('.html'):
template += '.html'
self.template = template
"""The path to the template to use to transform the post into HTML."""
self.title = metadata.pop('title', self.source.namebase.replace('-', ' ').replace('_', ' ').title())
"""The title of the post."""
self.slug = metadata.pop('slug', slugify(self.title))
"""The slug for the post."""
self._tags = wrap_list(metadata.pop('tags', []))
self.link = metadata.pop('link', None)
"""The post's :ref:`external link <post link>`."""
self.via = metadata.pop('via', None)
"""The post's attribution name."""
self.via_link = metadata.pop('via-link', metadata.pop('via_link', None))
"""The post's attribution link."""
try:
self.status = Status(metadata.pop('status', Status.draft.name))
"""The status of the post (published or draft)."""
except ValueError:
logger.warning("'%s': Invalid status value in metadata. Defaulting to 'draft'." % self.title)
self.status = Status.draft
timestamp = metadata.pop('timestamp', None)
self.timestamp = self._clean_datetime(timestamp)
"""The date/time the post was published or written."""
updated = metadata.pop('updated', None)
self.updated = self._clean_datetime(updated) if updated is not None else None
"""The date/time the post was updated."""
# determine the URL based on the HOME_URL and the PERMALINK_STYLE settings
permalink = settings.PERMALINK_STYLE.format(year=unicode(self.timestamp_local.year),
month=u'{0:02d}'.format(self.timestamp_local.month),
day=u'{0:02d}'.format(self.timestamp_local.day),
i_month=self.timestamp_local.month,
i_day=self.timestamp_local.day,
title=self.slug, # for Jekyll compatibility
slug=self.slug,
timestamp=self.timestamp_local.datetime,
post=self)
if permalink.endswith('index.html'):
permalink = permalink[:-10]
elif permalink.endswith('.html') or permalink.endswith('/'):
pass
else:
permalink += '.html'
self._permalink = permalink
# noinspection PyUnresolvedReferences
# Handle any preprocessor plugins
for plugin in PostProcessor.plugins:
if plugin.is_enabled():
plugin.preprocess(self, metadata)
# keep track of any remaining properties in the post metadata
metadata.pop('url', None) # remove the url property from the metadata dict before copy
self.custom_properties = copy(metadata)
"""A dict of any custom metadata properties specified in the post."""
# noinspection PyUnresolvedReferences
# handle any postprocessor plugins
for plugin in PostProcessor.plugins:
if plugin.is_enabled():
plugin.postprocess(self)
# update cache
settings.POST_CACHE[self.source] = self
@cached_property
def url(self):
"""The site-relative URL to the post."""
url = u'{home_url}{permalink}'.format(home_url=settings.HOME_URL,
permalink=self._permalink)
url = re.sub(r'/{2,}', r'/', url)
return url
@cached_property
def absolute_url(self):
"""The absolute URL to the post."""
return u'{0}{1}'.format(settings.SITE_URL, self.url)
@cached_property
def output_path(self):
url = self._permalink
if url.endswith('/'):
url += 'index.html'
return path(settings.OUTPUT_CACHE_DIR / url)
@cached_property
def output_file_name(self):
r = self.output_path.name
return r
@cached_property
def tags(self):
"""A list of strings representing the tags applied to the post."""
r = [unicode(t) for t in self._tags]
return r
@property
def content(self):
"""The post's content in HTML format."""
content_list = wrap_list(self._content_preprocessed)
content_list.extend(self._content_stash)
content_to_render = '\n'.join(content_list)
return typogrify(self.content_renderer.render(content_to_render, self.format))
@property
def content_preprocessed(self):
return self._content_preprocessed
@content_preprocessed.setter
def content_preprocessed(self, value):
self._content_preprocessed = value
# def set_content_preprocessed(self, content, caller_class):
# caller = caller_class.get_name() if hasattr(caller_class, 'get_name') else get_class_string(caller_class)
# perms = settings.PLUGIN_PERMISSIONS['MODIFY_PREPROCESSED_CONTENT']
# if caller not in perms and '*' not in perms:
# logger.warning("A plugin is trying to modify the post's preprocessed content but does not have the "
# "MODIFY_PREPROCESSED_CONTENT permission. Plugin: %s" % caller)
# return False
# else:
# logger.debug("%s is setting post source content." % caller)
# self._content_preprocessed = content
# return True
@property
def content_finalized(self):
return self._content_finalized
@property
def content_raw(self):
return self._content_raw
@cached_property
def content_renderer(self):
return settings.POST_RENDERER_CONFIG[self.format]()
@property
def description(self):
regex = re.compile(r'^.*?<p>(?P<para>.*?)</p>.*?', re.DOTALL)
matches = re.match(regex, self.content)
return matches.group('para')
@property
def format(self):
return self.source.ext
@property
def is_draft(self):
"""``True`` if the post is a draft, ``False`` otherwise."""
return self.status == Status.draft
@property
def is_published(self):
"""``True`` if the post is published, ``False`` otherwise."""
return self.status == Status.published and self.timestamp <= arrow.now()
@property
@property
def is_external_link(self):
"""``True`` if the post has an associated external link. ``False`` otherwise."""
return self.link is not None and self.link != ''
@property
def timestamp_local(self):
"""
The post's :attr:`timestamp` in 'local' time.
Local time is determined by the :attr:`~engineer.conf.EngineerConfiguration.POST_TIMEZONE` setting.
"""
return localtime(self.timestamp)
@property
def updated_local(self):
return localtime(self.updated)
@staticmethod
def _clean_datetime(datetime):
if datetime is None:
datetime = arrow.now(settings.POST_TIMEZONE)
# Reduce resolution of timestamp
datetime = datetime.replace(second=0, microsecond=0)
else:
timestamp_dt = parser.parse(str(datetime))
if timestamp_dt.tzinfo is None:
# convert to UTC assuming input time is in the POST_TIMEZONE
datetime = arrow.get(timestamp_dt, settings.POST_TIMEZONE)
else:
datetime = arrow.get(timestamp_dt)
return datetime.to('utc')
def _parse_source(self):
try:
with open(self.source, mode='r') as the_file:
item = unicode(the_file.read())
except UnicodeDecodeError:
with open(self.source, mode='r', encoding='UTF-8') as the_file:
item = the_file.read()
self._file_contents_raw = item
parsed_content = re.match(self._regex, item)
if parsed_content is None or parsed_content.group('metadata') is None:
# Parsing failed, maybe there's no metadata
raise PostMetadataError()
if parsed_content.group('fence') is not None:
self._fence = True
# 'Clean' the YAML section since there might be tab characters
metadata = parsed_content.group('metadata').replace('\t', ' ')
try:
metadata = yaml.load(metadata)
except ScannerError:
raise PostMetadataError("YAML error parsing metadata.")
if not isinstance(metadata, dict):
raise PostMetadataError("Metadata isn't a dict. Instead, it's a %s." % type(metadata))
# Make the metadata dict case insensitive
metadata = CaseInsensitiveDict(metadata)
content = parsed_content.group('content')
return metadata, content
def render_item(self, all_posts):
"""
Renders the Post as HTML using the template specified in :attr:`html_template_path`.
:param all_posts: An optional :class:`PostCollection` containing all of the posts in the site.
:return: The rendered HTML as a string.
"""
index = all_posts.index(self)
if index > 0: # has newer posts
newer_post = all_posts[index - 1]
else:
newer_post = None
if index < len(all_posts) - 1: # has older posts
older_post = all_posts[index + 1]
else:
older_post = None
return settings.JINJA_ENV.get_template(self.template).render(
post=self,
newer_post=newer_post,
older_post=older_post,
all_posts=all_posts,
nav_context='post'
)
def set_finalized_content(self, content, caller_class):
"""
Plugins can call this method to modify post content that is written back to source post files.
This method can be called at any time by anyone, but it has no effect if the caller is not granted the
``MODIFY_RAW_POST`` permission in the Engineer configuration.
The :attr:`~engineer.conf.EngineerConfiguration.FINALIZE_METADATA` setting must also be enabled in order for
calls to this method to have any effect.
:param content: The modified post content that should be written back to the post source file.
:param caller_class: The class of the plugin that's calling this method.
:return: ``True`` if the content was successfully modified; otherwise ``False``.
"""
caller = caller_class.get_name() if hasattr(caller_class, 'get_name') else unicode(caller_class)
if not FinalizationPlugin.is_enabled():
logger.warning("A plugin is trying to modify the post content but the FINALIZE_METADATA setting is "
"disabled. This setting must be enabled for plugins to modify post content. "
"Plugin: %s" % caller)
return False
perms = settings.PLUGIN_PERMISSIONS['MODIFY_RAW_POST']
if caller not in perms and '*' not in perms:
logger.warning("A plugin is trying to modify the post content but does not have the "
"MODIFY_RAW_POST permission. Plugin: %s" % caller)
return False
else:
logger.debug("%s is setting post source content." % caller)
self._content_finalized = self._remove_all_stashed_content()
return True
def stash_content(self, stash_content):
self._content_stash.append(stash_content)
# self.content_preprocessed = '''%s
# <!-- ||stash|| -->
# %s
# <!-- ||end_stash|| -->''' % (self.content_preprocessed, stash_content)
def _remove_all_stashed_content(self):
_regex = re.compile('<!-- \|\|stash\|\| -->.*?<!-- \|\|end_stash\|\| -->', re.DOTALL | re.MULTILINE)
cleaned_content, num_stashes = _regex.subn('', self.content_preprocessed)
if num_stashes > 0:
logger.debug("Removed %i stash sections from finalized content." % num_stashes)
return cleaned_content
def __unicode__(self):
return self.slug
__repr__ = __unicode__
|
tylerbutler/engineer | engineer/models.py | Post.render_item | python | def render_item(self, all_posts):
index = all_posts.index(self)
if index > 0: # has newer posts
newer_post = all_posts[index - 1]
else:
newer_post = None
if index < len(all_posts) - 1: # has older posts
older_post = all_posts[index + 1]
else:
older_post = None
return settings.JINJA_ENV.get_template(self.template).render(
post=self,
newer_post=newer_post,
older_post=older_post,
all_posts=all_posts,
nav_context='post'
) | Renders the Post as HTML using the template specified in :attr:`html_template_path`.
:param all_posts: An optional :class:`PostCollection` containing all of the posts in the site.
:return: The rendered HTML as a string. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/models.py#L319-L342 | null | class Post(object):
"""
Represents a post written in Markdown and stored in a file.
:param source: path to the source file for the post.
"""
DEFAULT_CONTENT_TEMPLATE = 'theme/_content_default.html'
DEFAULT_TEMPLATE = 'theme/post_detail.html'
_regex = re.compile(
r'^[\n|\r\n]*(?P<fence>---)?[\n|\r\n]*(?P<metadata>.+?)[\n|\r\n]*---[\n|\r\n]*(?P<content>.*)[\n|\r\n]*',
re.DOTALL)
# Make _content_raw only settable once. This is just to help prevent data loss that might be caused by
# inadvertantly messing with this property.
_content_raw = setonce()
_file_contents_raw = setonce()
def __init__(self, source):
self._content_stash = []
self.source = path(source).abspath()
"""The absolute path to the source file for the post."""
self.markdown_template_path = 'core/post.md'
"""The path to the template to use to transform the post back into a :ref:`post source file <posts>`."""
# This will get set to `True in _parse_source if the source file has 'fenced metadata' (like Jekyll)
self._fence = False
metadata, self._content_raw = self._parse_source()
# if not hasattr(self, 'content_preprocessed'):
self._content_preprocessed = self.content_raw
self._content_finalized = self.content_raw
content_template = metadata.pop('content-template', metadata.pop('content_template',
self.DEFAULT_CONTENT_TEMPLATE))
if not content_template.endswith('.html'):
content_template += '.html'
self.content_template = content_template
"""The path to the template to use to transform the post *content* into HTML."""
template = metadata.pop('template', self.DEFAULT_TEMPLATE)
if not template.endswith('.html'):
template += '.html'
self.template = template
"""The path to the template to use to transform the post into HTML."""
self.title = metadata.pop('title', self.source.namebase.replace('-', ' ').replace('_', ' ').title())
"""The title of the post."""
self.slug = metadata.pop('slug', slugify(self.title))
"""The slug for the post."""
self._tags = wrap_list(metadata.pop('tags', []))
self.link = metadata.pop('link', None)
"""The post's :ref:`external link <post link>`."""
self.via = metadata.pop('via', None)
"""The post's attribution name."""
self.via_link = metadata.pop('via-link', metadata.pop('via_link', None))
"""The post's attribution link."""
try:
self.status = Status(metadata.pop('status', Status.draft.name))
"""The status of the post (published or draft)."""
except ValueError:
logger.warning("'%s': Invalid status value in metadata. Defaulting to 'draft'." % self.title)
self.status = Status.draft
timestamp = metadata.pop('timestamp', None)
self.timestamp = self._clean_datetime(timestamp)
"""The date/time the post was published or written."""
updated = metadata.pop('updated', None)
self.updated = self._clean_datetime(updated) if updated is not None else None
"""The date/time the post was updated."""
# determine the URL based on the HOME_URL and the PERMALINK_STYLE settings
permalink = settings.PERMALINK_STYLE.format(year=unicode(self.timestamp_local.year),
month=u'{0:02d}'.format(self.timestamp_local.month),
day=u'{0:02d}'.format(self.timestamp_local.day),
i_month=self.timestamp_local.month,
i_day=self.timestamp_local.day,
title=self.slug, # for Jekyll compatibility
slug=self.slug,
timestamp=self.timestamp_local.datetime,
post=self)
if permalink.endswith('index.html'):
permalink = permalink[:-10]
elif permalink.endswith('.html') or permalink.endswith('/'):
pass
else:
permalink += '.html'
self._permalink = permalink
# noinspection PyUnresolvedReferences
# Handle any preprocessor plugins
for plugin in PostProcessor.plugins:
if plugin.is_enabled():
plugin.preprocess(self, metadata)
# keep track of any remaining properties in the post metadata
metadata.pop('url', None) # remove the url property from the metadata dict before copy
self.custom_properties = copy(metadata)
"""A dict of any custom metadata properties specified in the post."""
# noinspection PyUnresolvedReferences
# handle any postprocessor plugins
for plugin in PostProcessor.plugins:
if plugin.is_enabled():
plugin.postprocess(self)
# update cache
settings.POST_CACHE[self.source] = self
@cached_property
def url(self):
"""The site-relative URL to the post."""
url = u'{home_url}{permalink}'.format(home_url=settings.HOME_URL,
permalink=self._permalink)
url = re.sub(r'/{2,}', r'/', url)
return url
@cached_property
def absolute_url(self):
"""The absolute URL to the post."""
return u'{0}{1}'.format(settings.SITE_URL, self.url)
@cached_property
def output_path(self):
url = self._permalink
if url.endswith('/'):
url += 'index.html'
return path(settings.OUTPUT_CACHE_DIR / url)
@cached_property
def output_file_name(self):
r = self.output_path.name
return r
@cached_property
def tags(self):
"""A list of strings representing the tags applied to the post."""
r = [unicode(t) for t in self._tags]
return r
@property
def content(self):
"""The post's content in HTML format."""
content_list = wrap_list(self._content_preprocessed)
content_list.extend(self._content_stash)
content_to_render = '\n'.join(content_list)
return typogrify(self.content_renderer.render(content_to_render, self.format))
@property
def content_preprocessed(self):
return self._content_preprocessed
@content_preprocessed.setter
def content_preprocessed(self, value):
self._content_preprocessed = value
# def set_content_preprocessed(self, content, caller_class):
# caller = caller_class.get_name() if hasattr(caller_class, 'get_name') else get_class_string(caller_class)
# perms = settings.PLUGIN_PERMISSIONS['MODIFY_PREPROCESSED_CONTENT']
# if caller not in perms and '*' not in perms:
# logger.warning("A plugin is trying to modify the post's preprocessed content but does not have the "
# "MODIFY_PREPROCESSED_CONTENT permission. Plugin: %s" % caller)
# return False
# else:
# logger.debug("%s is setting post source content." % caller)
# self._content_preprocessed = content
# return True
@property
def content_finalized(self):
return self._content_finalized
@property
def content_raw(self):
return self._content_raw
@cached_property
def content_renderer(self):
return settings.POST_RENDERER_CONFIG[self.format]()
@property
def description(self):
regex = re.compile(r'^.*?<p>(?P<para>.*?)</p>.*?', re.DOTALL)
matches = re.match(regex, self.content)
return matches.group('para')
@property
def format(self):
return self.source.ext
@property
def is_draft(self):
"""``True`` if the post is a draft, ``False`` otherwise."""
return self.status == Status.draft
@property
def is_published(self):
"""``True`` if the post is published, ``False`` otherwise."""
return self.status == Status.published and self.timestamp <= arrow.now()
@property
def is_pending(self):
"""``True`` if the post is marked as published but has a timestamp set in the future."""
return self.status == Status.published and self.timestamp >= arrow.now()
@property
def is_external_link(self):
"""``True`` if the post has an associated external link. ``False`` otherwise."""
return self.link is not None and self.link != ''
@property
def timestamp_local(self):
"""
The post's :attr:`timestamp` in 'local' time.
Local time is determined by the :attr:`~engineer.conf.EngineerConfiguration.POST_TIMEZONE` setting.
"""
return localtime(self.timestamp)
@property
def updated_local(self):
return localtime(self.updated)
@staticmethod
def _clean_datetime(datetime):
if datetime is None:
datetime = arrow.now(settings.POST_TIMEZONE)
# Reduce resolution of timestamp
datetime = datetime.replace(second=0, microsecond=0)
else:
timestamp_dt = parser.parse(str(datetime))
if timestamp_dt.tzinfo is None:
# convert to UTC assuming input time is in the POST_TIMEZONE
datetime = arrow.get(timestamp_dt, settings.POST_TIMEZONE)
else:
datetime = arrow.get(timestamp_dt)
return datetime.to('utc')
def _parse_source(self):
try:
with open(self.source, mode='r') as the_file:
item = unicode(the_file.read())
except UnicodeDecodeError:
with open(self.source, mode='r', encoding='UTF-8') as the_file:
item = the_file.read()
self._file_contents_raw = item
parsed_content = re.match(self._regex, item)
if parsed_content is None or parsed_content.group('metadata') is None:
# Parsing failed, maybe there's no metadata
raise PostMetadataError()
if parsed_content.group('fence') is not None:
self._fence = True
# 'Clean' the YAML section since there might be tab characters
metadata = parsed_content.group('metadata').replace('\t', ' ')
try:
metadata = yaml.load(metadata)
except ScannerError:
raise PostMetadataError("YAML error parsing metadata.")
if not isinstance(metadata, dict):
raise PostMetadataError("Metadata isn't a dict. Instead, it's a %s." % type(metadata))
# Make the metadata dict case insensitive
metadata = CaseInsensitiveDict(metadata)
content = parsed_content.group('content')
return metadata, content
def set_finalized_content(self, content, caller_class):
"""
Plugins can call this method to modify post content that is written back to source post files.
This method can be called at any time by anyone, but it has no effect if the caller is not granted the
``MODIFY_RAW_POST`` permission in the Engineer configuration.
The :attr:`~engineer.conf.EngineerConfiguration.FINALIZE_METADATA` setting must also be enabled in order for
calls to this method to have any effect.
:param content: The modified post content that should be written back to the post source file.
:param caller_class: The class of the plugin that's calling this method.
:return: ``True`` if the content was successfully modified; otherwise ``False``.
"""
caller = caller_class.get_name() if hasattr(caller_class, 'get_name') else unicode(caller_class)
if not FinalizationPlugin.is_enabled():
logger.warning("A plugin is trying to modify the post content but the FINALIZE_METADATA setting is "
"disabled. This setting must be enabled for plugins to modify post content. "
"Plugin: %s" % caller)
return False
perms = settings.PLUGIN_PERMISSIONS['MODIFY_RAW_POST']
if caller not in perms and '*' not in perms:
logger.warning("A plugin is trying to modify the post content but does not have the "
"MODIFY_RAW_POST permission. Plugin: %s" % caller)
return False
else:
logger.debug("%s is setting post source content." % caller)
self._content_finalized = self._remove_all_stashed_content()
return True
def stash_content(self, stash_content):
self._content_stash.append(stash_content)
# self.content_preprocessed = '''%s
# <!-- ||stash|| -->
# %s
# <!-- ||end_stash|| -->''' % (self.content_preprocessed, stash_content)
def _remove_all_stashed_content(self):
_regex = re.compile('<!-- \|\|stash\|\| -->.*?<!-- \|\|end_stash\|\| -->', re.DOTALL | re.MULTILINE)
cleaned_content, num_stashes = _regex.subn('', self.content_preprocessed)
if num_stashes > 0:
logger.debug("Removed %i stash sections from finalized content." % num_stashes)
return cleaned_content
def __unicode__(self):
return self.slug
__repr__ = __unicode__
|
tylerbutler/engineer | engineer/models.py | Post.set_finalized_content | python | def set_finalized_content(self, content, caller_class):
caller = caller_class.get_name() if hasattr(caller_class, 'get_name') else unicode(caller_class)
if not FinalizationPlugin.is_enabled():
logger.warning("A plugin is trying to modify the post content but the FINALIZE_METADATA setting is "
"disabled. This setting must be enabled for plugins to modify post content. "
"Plugin: %s" % caller)
return False
perms = settings.PLUGIN_PERMISSIONS['MODIFY_RAW_POST']
if caller not in perms and '*' not in perms:
logger.warning("A plugin is trying to modify the post content but does not have the "
"MODIFY_RAW_POST permission. Plugin: %s" % caller)
return False
else:
logger.debug("%s is setting post source content." % caller)
self._content_finalized = self._remove_all_stashed_content()
return True | Plugins can call this method to modify post content that is written back to source post files.
This method can be called at any time by anyone, but it has no effect if the caller is not granted the
``MODIFY_RAW_POST`` permission in the Engineer configuration.
The :attr:`~engineer.conf.EngineerConfiguration.FINALIZE_METADATA` setting must also be enabled in order for
calls to this method to have any effect.
:param content: The modified post content that should be written back to the post source file.
:param caller_class: The class of the plugin that's calling this method.
:return: ``True`` if the content was successfully modified; otherwise ``False``. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/models.py#L344-L371 | [
"def is_enabled(cls):\n return cls.get_settings()[cls._enabled]\n",
"def _remove_all_stashed_content(self):\n _regex = re.compile('<!-- \\|\\|stash\\|\\| -->.*?<!-- \\|\\|end_stash\\|\\| -->', re.DOTALL | re.MULTILINE)\n cleaned_content, num_stashes = _regex.subn('', self.content_preprocessed)\n if num_stashes > 0:\n logger.debug(\"Removed %i stash sections from finalized content.\" % num_stashes)\n return cleaned_content\n"
] | class Post(object):
"""
Represents a post written in Markdown and stored in a file.
:param source: path to the source file for the post.
"""
DEFAULT_CONTENT_TEMPLATE = 'theme/_content_default.html'
DEFAULT_TEMPLATE = 'theme/post_detail.html'
_regex = re.compile(
r'^[\n|\r\n]*(?P<fence>---)?[\n|\r\n]*(?P<metadata>.+?)[\n|\r\n]*---[\n|\r\n]*(?P<content>.*)[\n|\r\n]*',
re.DOTALL)
# Make _content_raw only settable once. This is just to help prevent data loss that might be caused by
# inadvertantly messing with this property.
_content_raw = setonce()
_file_contents_raw = setonce()
def __init__(self, source):
self._content_stash = []
self.source = path(source).abspath()
"""The absolute path to the source file for the post."""
self.markdown_template_path = 'core/post.md'
"""The path to the template to use to transform the post back into a :ref:`post source file <posts>`."""
# This will get set to `True in _parse_source if the source file has 'fenced metadata' (like Jekyll)
self._fence = False
metadata, self._content_raw = self._parse_source()
# if not hasattr(self, 'content_preprocessed'):
self._content_preprocessed = self.content_raw
self._content_finalized = self.content_raw
content_template = metadata.pop('content-template', metadata.pop('content_template',
self.DEFAULT_CONTENT_TEMPLATE))
if not content_template.endswith('.html'):
content_template += '.html'
self.content_template = content_template
"""The path to the template to use to transform the post *content* into HTML."""
template = metadata.pop('template', self.DEFAULT_TEMPLATE)
if not template.endswith('.html'):
template += '.html'
self.template = template
"""The path to the template to use to transform the post into HTML."""
self.title = metadata.pop('title', self.source.namebase.replace('-', ' ').replace('_', ' ').title())
"""The title of the post."""
self.slug = metadata.pop('slug', slugify(self.title))
"""The slug for the post."""
self._tags = wrap_list(metadata.pop('tags', []))
self.link = metadata.pop('link', None)
"""The post's :ref:`external link <post link>`."""
self.via = metadata.pop('via', None)
"""The post's attribution name."""
self.via_link = metadata.pop('via-link', metadata.pop('via_link', None))
"""The post's attribution link."""
try:
self.status = Status(metadata.pop('status', Status.draft.name))
"""The status of the post (published or draft)."""
except ValueError:
logger.warning("'%s': Invalid status value in metadata. Defaulting to 'draft'." % self.title)
self.status = Status.draft
timestamp = metadata.pop('timestamp', None)
self.timestamp = self._clean_datetime(timestamp)
"""The date/time the post was published or written."""
updated = metadata.pop('updated', None)
self.updated = self._clean_datetime(updated) if updated is not None else None
"""The date/time the post was updated."""
# determine the URL based on the HOME_URL and the PERMALINK_STYLE settings
permalink = settings.PERMALINK_STYLE.format(year=unicode(self.timestamp_local.year),
month=u'{0:02d}'.format(self.timestamp_local.month),
day=u'{0:02d}'.format(self.timestamp_local.day),
i_month=self.timestamp_local.month,
i_day=self.timestamp_local.day,
title=self.slug, # for Jekyll compatibility
slug=self.slug,
timestamp=self.timestamp_local.datetime,
post=self)
if permalink.endswith('index.html'):
permalink = permalink[:-10]
elif permalink.endswith('.html') or permalink.endswith('/'):
pass
else:
permalink += '.html'
self._permalink = permalink
# noinspection PyUnresolvedReferences
# Handle any preprocessor plugins
for plugin in PostProcessor.plugins:
if plugin.is_enabled():
plugin.preprocess(self, metadata)
# keep track of any remaining properties in the post metadata
metadata.pop('url', None) # remove the url property from the metadata dict before copy
self.custom_properties = copy(metadata)
"""A dict of any custom metadata properties specified in the post."""
# noinspection PyUnresolvedReferences
# handle any postprocessor plugins
for plugin in PostProcessor.plugins:
if plugin.is_enabled():
plugin.postprocess(self)
# update cache
settings.POST_CACHE[self.source] = self
@cached_property
def url(self):
"""The site-relative URL to the post."""
url = u'{home_url}{permalink}'.format(home_url=settings.HOME_URL,
permalink=self._permalink)
url = re.sub(r'/{2,}', r'/', url)
return url
@cached_property
def absolute_url(self):
"""The absolute URL to the post."""
return u'{0}{1}'.format(settings.SITE_URL, self.url)
@cached_property
def output_path(self):
url = self._permalink
if url.endswith('/'):
url += 'index.html'
return path(settings.OUTPUT_CACHE_DIR / url)
@cached_property
def output_file_name(self):
r = self.output_path.name
return r
@cached_property
def tags(self):
"""A list of strings representing the tags applied to the post."""
r = [unicode(t) for t in self._tags]
return r
@property
def content(self):
"""The post's content in HTML format."""
content_list = wrap_list(self._content_preprocessed)
content_list.extend(self._content_stash)
content_to_render = '\n'.join(content_list)
return typogrify(self.content_renderer.render(content_to_render, self.format))
@property
def content_preprocessed(self):
return self._content_preprocessed
@content_preprocessed.setter
def content_preprocessed(self, value):
self._content_preprocessed = value
# def set_content_preprocessed(self, content, caller_class):
# caller = caller_class.get_name() if hasattr(caller_class, 'get_name') else get_class_string(caller_class)
# perms = settings.PLUGIN_PERMISSIONS['MODIFY_PREPROCESSED_CONTENT']
# if caller not in perms and '*' not in perms:
# logger.warning("A plugin is trying to modify the post's preprocessed content but does not have the "
# "MODIFY_PREPROCESSED_CONTENT permission. Plugin: %s" % caller)
# return False
# else:
# logger.debug("%s is setting post source content." % caller)
# self._content_preprocessed = content
# return True
@property
def content_finalized(self):
return self._content_finalized
@property
def content_raw(self):
return self._content_raw
@cached_property
def content_renderer(self):
return settings.POST_RENDERER_CONFIG[self.format]()
@property
def description(self):
regex = re.compile(r'^.*?<p>(?P<para>.*?)</p>.*?', re.DOTALL)
matches = re.match(regex, self.content)
return matches.group('para')
@property
def format(self):
return self.source.ext
@property
def is_draft(self):
"""``True`` if the post is a draft, ``False`` otherwise."""
return self.status == Status.draft
@property
def is_published(self):
"""``True`` if the post is published, ``False`` otherwise."""
return self.status == Status.published and self.timestamp <= arrow.now()
@property
def is_pending(self):
"""``True`` if the post is marked as published but has a timestamp set in the future."""
return self.status == Status.published and self.timestamp >= arrow.now()
@property
def is_external_link(self):
"""``True`` if the post has an associated external link. ``False`` otherwise."""
return self.link is not None and self.link != ''
@property
def timestamp_local(self):
"""
The post's :attr:`timestamp` in 'local' time.
Local time is determined by the :attr:`~engineer.conf.EngineerConfiguration.POST_TIMEZONE` setting.
"""
return localtime(self.timestamp)
@property
def updated_local(self):
return localtime(self.updated)
@staticmethod
def _clean_datetime(datetime):
if datetime is None:
datetime = arrow.now(settings.POST_TIMEZONE)
# Reduce resolution of timestamp
datetime = datetime.replace(second=0, microsecond=0)
else:
timestamp_dt = parser.parse(str(datetime))
if timestamp_dt.tzinfo is None:
# convert to UTC assuming input time is in the POST_TIMEZONE
datetime = arrow.get(timestamp_dt, settings.POST_TIMEZONE)
else:
datetime = arrow.get(timestamp_dt)
return datetime.to('utc')
def _parse_source(self):
try:
with open(self.source, mode='r') as the_file:
item = unicode(the_file.read())
except UnicodeDecodeError:
with open(self.source, mode='r', encoding='UTF-8') as the_file:
item = the_file.read()
self._file_contents_raw = item
parsed_content = re.match(self._regex, item)
if parsed_content is None or parsed_content.group('metadata') is None:
# Parsing failed, maybe there's no metadata
raise PostMetadataError()
if parsed_content.group('fence') is not None:
self._fence = True
# 'Clean' the YAML section since there might be tab characters
metadata = parsed_content.group('metadata').replace('\t', ' ')
try:
metadata = yaml.load(metadata)
except ScannerError:
raise PostMetadataError("YAML error parsing metadata.")
if not isinstance(metadata, dict):
raise PostMetadataError("Metadata isn't a dict. Instead, it's a %s." % type(metadata))
# Make the metadata dict case insensitive
metadata = CaseInsensitiveDict(metadata)
content = parsed_content.group('content')
return metadata, content
def render_item(self, all_posts):
"""
Renders the Post as HTML using the template specified in :attr:`html_template_path`.
:param all_posts: An optional :class:`PostCollection` containing all of the posts in the site.
:return: The rendered HTML as a string.
"""
index = all_posts.index(self)
if index > 0: # has newer posts
newer_post = all_posts[index - 1]
else:
newer_post = None
if index < len(all_posts) - 1: # has older posts
older_post = all_posts[index + 1]
else:
older_post = None
return settings.JINJA_ENV.get_template(self.template).render(
post=self,
newer_post=newer_post,
older_post=older_post,
all_posts=all_posts,
nav_context='post'
)
def stash_content(self, stash_content):
self._content_stash.append(stash_content)
# self.content_preprocessed = '''%s
# <!-- ||stash|| -->
# %s
# <!-- ||end_stash|| -->''' % (self.content_preprocessed, stash_content)
def _remove_all_stashed_content(self):
_regex = re.compile('<!-- \|\|stash\|\| -->.*?<!-- \|\|end_stash\|\| -->', re.DOTALL | re.MULTILINE)
cleaned_content, num_stashes = _regex.subn('', self.content_preprocessed)
if num_stashes > 0:
logger.debug("Removed %i stash sections from finalized content." % num_stashes)
return cleaned_content
def __unicode__(self):
return self.slug
__repr__ = __unicode__
|
tylerbutler/engineer | engineer/models.py | PostCollection.review | python | def review(self):
return PostCollection([p for p in self if p.status == Status.review]) | Returns a new PostCollection containing the subset of posts whose status is :attr:`~Status.review`. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/models.py#L423-L425 | null | class PostCollection(list):
"""A collection of :class:`Posts <engineer.models.Post>`."""
# noinspection PyTypeChecker
def __init__(self, seq=()):
list.__init__(self, seq)
self.listpage_template = settings.JINJA_ENV.get_template('theme/post_list.html')
self.archive_template = settings.JINJA_ENV.get_template('theme/post_archives.html')
def paginate(self, paginate_by=None):
if paginate_by is None:
paginate_by = settings.ROLLUP_PAGE_SIZE
return chunk(self, paginate_by, PostCollection)
@cached_property
def published(self):
"""Returns a new PostCollection containing the subset of posts that are published."""
return PostCollection([p for p in self if p.is_published is True])
@cached_property
def drafts(self):
"""Returns a new PostCollection containing the subset of posts that are drafts."""
return PostCollection([p for p in self if p.is_draft is True])
@property
def pending(self):
"""Returns a new PostCollection containing the subset of posts that are pending."""
return PostCollection([p for p in self if p.is_pending is True])
@cached_property
@cached_property
def all_tags(self):
"""Returns a list of all the unique tags, as strings, that posts in the collection have."""
tags = set()
for post in self:
tags.update(post.tags)
return list(tags)
def tagged(self, tag):
"""Returns a new PostCollection containing the subset of posts that are tagged with *tag*."""
return PostCollection([p for p in self if unicode(tag) in p.tags])
@staticmethod
def output_path(slice_num):
return path(settings.OUTPUT_CACHE_DIR / ("page/%s/index.html" % slice_num))
def render_listpage_html(self, slice_num, has_next, has_previous, all_posts=None):
return self.listpage_template.render(
post_list=self,
slice_num=slice_num,
has_next=has_next,
has_previous=has_previous,
all_posts=all_posts,
nav_context='listpage')
def render_archive_html(self, all_posts=None):
return self.archive_template.render(post_list=self,
all_posts=all_posts,
nav_context='archive')
def render_tag_html(self, tag, all_posts=None):
return settings.JINJA_ENV.get_template('theme/tags_list.html').render(tag=tag,
post_list=self.tagged(tag),
all_posts=all_posts,
nav_context='tag')
|
tylerbutler/engineer | engineer/models.py | PostCollection.all_tags | python | def all_tags(self):
tags = set()
for post in self:
tags.update(post.tags)
return list(tags) | Returns a list of all the unique tags, as strings, that posts in the collection have. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/models.py#L428-L433 | null | class PostCollection(list):
"""A collection of :class:`Posts <engineer.models.Post>`."""
# noinspection PyTypeChecker
def __init__(self, seq=()):
list.__init__(self, seq)
self.listpage_template = settings.JINJA_ENV.get_template('theme/post_list.html')
self.archive_template = settings.JINJA_ENV.get_template('theme/post_archives.html')
def paginate(self, paginate_by=None):
if paginate_by is None:
paginate_by = settings.ROLLUP_PAGE_SIZE
return chunk(self, paginate_by, PostCollection)
@cached_property
def published(self):
"""Returns a new PostCollection containing the subset of posts that are published."""
return PostCollection([p for p in self if p.is_published is True])
@cached_property
def drafts(self):
"""Returns a new PostCollection containing the subset of posts that are drafts."""
return PostCollection([p for p in self if p.is_draft is True])
@property
def pending(self):
"""Returns a new PostCollection containing the subset of posts that are pending."""
return PostCollection([p for p in self if p.is_pending is True])
@cached_property
def review(self):
"""Returns a new PostCollection containing the subset of posts whose status is :attr:`~Status.review`."""
return PostCollection([p for p in self if p.status == Status.review])
@cached_property
def tagged(self, tag):
"""Returns a new PostCollection containing the subset of posts that are tagged with *tag*."""
return PostCollection([p for p in self if unicode(tag) in p.tags])
@staticmethod
def output_path(slice_num):
return path(settings.OUTPUT_CACHE_DIR / ("page/%s/index.html" % slice_num))
def render_listpage_html(self, slice_num, has_next, has_previous, all_posts=None):
return self.listpage_template.render(
post_list=self,
slice_num=slice_num,
has_next=has_next,
has_previous=has_previous,
all_posts=all_posts,
nav_context='listpage')
def render_archive_html(self, all_posts=None):
return self.archive_template.render(post_list=self,
all_posts=all_posts,
nav_context='archive')
def render_tag_html(self, tag, all_posts=None):
return settings.JINJA_ENV.get_template('theme/tags_list.html').render(tag=tag,
post_list=self.tagged(tag),
all_posts=all_posts,
nav_context='tag')
|
tylerbutler/engineer | engineer/models.py | PostCollection.tagged | python | def tagged(self, tag):
return PostCollection([p for p in self if unicode(tag) in p.tags]) | Returns a new PostCollection containing the subset of posts that are tagged with *tag*. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/models.py#L435-L437 | null | class PostCollection(list):
"""A collection of :class:`Posts <engineer.models.Post>`."""
# noinspection PyTypeChecker
def __init__(self, seq=()):
list.__init__(self, seq)
self.listpage_template = settings.JINJA_ENV.get_template('theme/post_list.html')
self.archive_template = settings.JINJA_ENV.get_template('theme/post_archives.html')
def paginate(self, paginate_by=None):
if paginate_by is None:
paginate_by = settings.ROLLUP_PAGE_SIZE
return chunk(self, paginate_by, PostCollection)
@cached_property
def published(self):
"""Returns a new PostCollection containing the subset of posts that are published."""
return PostCollection([p for p in self if p.is_published is True])
@cached_property
def drafts(self):
"""Returns a new PostCollection containing the subset of posts that are drafts."""
return PostCollection([p for p in self if p.is_draft is True])
@property
def pending(self):
"""Returns a new PostCollection containing the subset of posts that are pending."""
return PostCollection([p for p in self if p.is_pending is True])
@cached_property
def review(self):
"""Returns a new PostCollection containing the subset of posts whose status is :attr:`~Status.review`."""
return PostCollection([p for p in self if p.status == Status.review])
@cached_property
def all_tags(self):
"""Returns a list of all the unique tags, as strings, that posts in the collection have."""
tags = set()
for post in self:
tags.update(post.tags)
return list(tags)
@staticmethod
def output_path(slice_num):
return path(settings.OUTPUT_CACHE_DIR / ("page/%s/index.html" % slice_num))
def render_listpage_html(self, slice_num, has_next, has_previous, all_posts=None):
return self.listpage_template.render(
post_list=self,
slice_num=slice_num,
has_next=has_next,
has_previous=has_previous,
all_posts=all_posts,
nav_context='listpage')
def render_archive_html(self, all_posts=None):
return self.archive_template.render(post_list=self,
all_posts=all_posts,
nav_context='archive')
def render_tag_html(self, tag, all_posts=None):
return settings.JINJA_ENV.get_template('theme/tags_list.html').render(tag=tag,
post_list=self.tagged(tag),
all_posts=all_posts,
nav_context='tag')
|
tylerbutler/engineer | engineer/devtools/theme_tools.py | compile_theme | python | def compile_theme(theme_id=None):
from engineer.processors import convert_less
from engineer.themes import ThemeManager
if theme_id is None:
themes = ThemeManager.themes().values()
else:
themes = [ThemeManager.theme(theme_id)]
with(indent(2)):
puts(colored.yellow("Compiling %s themes." % len(themes)))
for theme in themes:
theme_output_path = (theme.static_root / ('stylesheets/%s_precompiled.css' % theme.id)).normpath()
puts(colored.cyan("Compiling theme %s to %s" % (theme.id, theme_output_path)))
with indent(4):
puts("Compiling...")
convert_less(theme.static_root / ('stylesheets/%s.less' % theme.id),
theme_output_path,
minify=True)
puts(colored.green("Done.", bold=True)) | Compiles a theme. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/devtools/theme_tools.py#L20-L42 | [
"def convert_less(infile, outfile, minify=True):\n if minify:\n preprocessor = str(settings.LESS_PREPROCESSOR) + ' {infile} {outfile} -x'\n else:\n preprocessor = str(settings.LESS_PREPROCESSOR) + ' {infile} {outfile}'\n\n cmd = str.format(preprocessor, infile=infile, outfile=outfile).split()\n try:\n subprocess.check_output(cmd)\n except subprocess.CalledProcessError as e:\n logger.critical(\"Error pre-processing LESS file %s.\" % file)\n logger.critical(e.output)\n exit(1355)\n except WindowsError as e:\n logger.critical(\"Unexpected error pre-processing LESS file %s.\" % file)\n logger.critical(e.strerror)\n exit(1355)\n except Exception as e:\n logger.critical(\"Unexpected error pre-processing LESS file %s.\" % file)\n logger.critical(e.message)\n if platform.system() != 'Windows':\n logger.critical(\"Are you sure lessc is on your path?\")\n exit(1355)\n"
] | # coding=utf-8
# noinspection PyPackageRequirements
from argh import named
# noinspection PyPackageRequirements
from clint.textui import colored, indent, puts
# noinspection PyPackageRequirements
from clint.textui import columns
from engineer.log import bootstrap
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
bootstrap()
# noinspection PyShadowingBuiltins
@named('compile')
# noinspection PyShadowingBuiltins
@named('list')
def list_theme():
"""List all available Engineer themes."""
from engineer.themes import ThemeManager
themes = ThemeManager.themes()
col1, col2 = map(max, zip(*[(len(t.id) + 2, len(t.root_path) + 2) for t in themes.itervalues()]))
themes = ThemeManager.themes_by_finder()
for finder in sorted(themes.iterkeys()):
if len(themes[finder]) > 0:
puts("%s: " % finder)
for theme in sorted(themes[finder], key=lambda _: _.id):
with indent(4):
puts(
columns(
[colored.cyan("%s:" % theme.id), col1],
[colored.white(theme.root_path, bold=True), col2]
)
)
if __name__ == '__main__':
compile_theme()
|
tylerbutler/engineer | engineer/devtools/theme_tools.py | list_theme | python | def list_theme():
from engineer.themes import ThemeManager
themes = ThemeManager.themes()
col1, col2 = map(max, zip(*[(len(t.id) + 2, len(t.root_path) + 2) for t in themes.itervalues()]))
themes = ThemeManager.themes_by_finder()
for finder in sorted(themes.iterkeys()):
if len(themes[finder]) > 0:
puts("%s: " % finder)
for theme in sorted(themes[finder], key=lambda _: _.id):
with indent(4):
puts(
columns(
[colored.cyan("%s:" % theme.id), col1],
[colored.white(theme.root_path, bold=True), col2]
)
) | List all available Engineer themes. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/devtools/theme_tools.py#L47-L65 | null | # coding=utf-8
# noinspection PyPackageRequirements
from argh import named
# noinspection PyPackageRequirements
from clint.textui import colored, indent, puts
# noinspection PyPackageRequirements
from clint.textui import columns
from engineer.log import bootstrap
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
bootstrap()
# noinspection PyShadowingBuiltins
@named('compile')
def compile_theme(theme_id=None):
"""Compiles a theme."""
from engineer.processors import convert_less
from engineer.themes import ThemeManager
if theme_id is None:
themes = ThemeManager.themes().values()
else:
themes = [ThemeManager.theme(theme_id)]
with(indent(2)):
puts(colored.yellow("Compiling %s themes." % len(themes)))
for theme in themes:
theme_output_path = (theme.static_root / ('stylesheets/%s_precompiled.css' % theme.id)).normpath()
puts(colored.cyan("Compiling theme %s to %s" % (theme.id, theme_output_path)))
with indent(4):
puts("Compiling...")
convert_less(theme.static_root / ('stylesheets/%s.less' % theme.id),
theme_output_path,
minify=True)
puts(colored.green("Done.", bold=True))
# noinspection PyShadowingBuiltins
@named('list')
if __name__ == '__main__':
compile_theme()
|
tylerbutler/engineer | engineer/filters.py | markdown_filter | python | def markdown_filter(value, typogrify=True, extensions=('extra', 'codehilite')):
# Determine how many leading spaces there are, then remove that number from the beginning of each line.
match = re.match(r'(\n*)(\s*)', value)
s, e = match.span(2)
pattern = re.compile(r'^ {%s}' % (e - s), # use ^ in the pattern so mid-string matches won't be removed
flags=re.MULTILINE) # use multi-line mode so ^ will match the start of each line
output = pattern.sub(u'', value)
if typogrify:
return jinja_filters.typogrify(markdown(output, extensions=extensions))
else:
return markdown(output, extensions=extensions) | A smart wrapper around the ``markdown`` and ``typogrify`` functions that automatically removes leading
whitespace before every line. This is necessary because Markdown is whitespace-sensitive. Consider some Markdown
content in a template that looks like this:
.. codeblock:: html+jinja
<article>
{% filter markdown %}
## A Heading
Some content here.
Code goes here.
More lines of code
And more.
Closing thoughts
{% endfilter %}
</article>
In this example, a typical Markdown filter would see the leading whitespace before the first heading and assume
it was a code block, which would then cause the entire Markdown document to be rendered incorrectly. You may have
a document with spacing like this because your text editor automatically 'pretty-prints' the markup,
including the content within the filter tag.
This filter automatically removes the leading whitespace - leaving code block and other expected offsets in place
of course - so that rendering occurs correctly regardless of the nested spacing of the source document. | train | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/filters.py#L24-L63 | null | # coding=utf-8
import logging
import re
import humanize
from dateutil import tz
from markdown import markdown
from path import path
# noinspection PyPep8Naming
from typogrify import filters as Typogrify
from typogrify.templatetags import jinja_filters
from engineer.util import wrap_list
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
logger = logging.getLogger(__name__)
def format_datetime(value, format_string='%Y-%m-%d'):
return value.strftime(format_string)
def localtime(value, timezone=None):
from engineer.conf import settings
if timezone is None:
timezone = settings.POST_TIMEZONE
if isinstance(timezone, basestring):
timezone = tz.gettz(timezone)
return value.to(timezone)
def naturaltime(value):
from engineer.conf import settings
server_time = localtime(value, settings.SERVER_TIMEZONE).replace(tzinfo=None)
friendly = humanize.naturaltime(server_time)
return friendly
# noinspection PyShadowingBuiltins
def compress(value):
from engineer.conf import settings
if not settings.COMPRESSOR_ENABLED:
return value
else: # COMPRESSOR_ENABLED == True
import html5lib
# noinspection PyUnresolvedReferences,PyUnusedLocal
def _min_js_slim(js_string):
# NOTE: The slimit filter seems to break some scripts. I'm not sure why. I'm leaving this code in for
# posterity, but it's not functional right now and shouldn't be used.
from slimit import minify
return minify(js_string)
doc = html5lib.parseFragment(value.strip())
to_compress = [l for l in doc.childNodes if
l.name in ('link', 'script')]
for item in to_compress:
if item.name == 'link':
src = item.attributes['href']
compression_type = 'css'
elif item.name == 'script':
if 'src' in item.attributes:
src = item.attributes['src']
compression_type = 'js'
else: # inline script
continue
# TODO: Inline script minification.
# has_inline = True
# if len(item.childNodes) > 1:
# raise Exception("For some reason the inline script node has more than one child node.")
# else:
# item.childNodes[0].value = _min_js(item.childNodes[0].value)
else:
raise Exception("Hmmm, wasn't expecting a '%s' here." % item.name)
if src.startswith(settings.HOME_URL):
# trim the HOME_URL since it won't be part of the local path to the file
src = src[len(settings.HOME_URL):]
elif src.startswith('/'):
# trim the leading '/' from the src so we can combine it with the OUTPUT_CACHE_DIR to get a path
src = src[1:]
file = path(settings.OUTPUT_CACHE_DIR / src).abspath()
if file.ext[1:] in settings.COMPRESSOR_FILE_EXTENSIONS:
settings.COMPRESS_FILE_LIST.add((file, compression_type))
# TODO: Inline script minification.
# if has_inline: # Handle inline script
# # Since we have inline script, we need to serialize the minified content into a
# # string and return it
# walker = treewalkers.getTreeWalker('simpletree')
# stream = walker(doc)
# s = serializer.htmlserializer.HTMLSerializer(omit_optional_tags=False,
# #strip_whitespace=True,
# quote_attr_values=True)
# generator = s.serialize(stream)
# output = ''
# for tag in generator:
# output += tag
return value
def typogrify_no_widont(value):
value = Typogrify.amp(value)
value = Typogrify.smartypants(value)
value = Typogrify.caps(value)
value = Typogrify.initial_quotes(value)
return value
def img(img_path, classes=None, width=None, height=None, title=None, alt=None, link=None):
from engineer.conf import settings
template = settings.JINJA_ENV.get_template('theme/_img.html')
classes = wrap_list(classes)
rendered_content = template.render(source=img_path,
classes=classes,
width=width,
height=height,
title=title,
alt=alt,
link=link)
whitespace_remover = re.compile(r"^\s+", re.MULTILINE)
rendered_content = whitespace_remover.sub("", rendered_content)
return rendered_content
|
drericstrong/pyedna | pyedna/calc_config.py | CalcConfig.write_relationships | python | def write_relationships(self, file_name, flat=True):
with open(file_name, 'w') as writer:
if flat:
self._write_relationships_flat(writer)
else:
self._write_relationships_non_flat(writer) | This module will output the eDNA tags which are used inside each
calculation.
If flat=True, data will be written flat, like:
ADE1CA01, ADE1PI01, ADE1PI02
If flat=False, data will be written in the non-flat way, like:
ADE1CA01, ADE1PI01
ADE1CA01, ADE1PI02
:param file_name: the output filename to write the relationships,
which should include the '.csv' extension
:param flat: True or False | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/calc_config.py#L87-L107 | [
"def _write_relationships_flat(self, writer):\n # Write relationships in flat form, e.g.\n # ADE1CA01, ADE1PI01, ADE1PI02\n for ii, row in self.config.iterrows():\n calc_tag = row['FullTag'].lower()\n for associated_tag in row['TagsInCalc'].split(';'):\n if associated_tag:\n writer.write(','.join([calc_tag, associated_tag]) + '\\n')\n",
"def _write_relationships_non_flat(self, writer):\n # Write relationships in non-flat form, e.g.\n # ADE1CA01, ADE1PI01\n # ADE1CA01, ADE1PI02\n for ii, row in self.config.iterrows():\n calc_tag = row['FullTag'].lower()\n associated_tag = row['TagsInCalc']\n if associated_tag:\n writer.write(','.join([calc_tag, associated_tag]))\n"
] | class CalcConfig:
def __init__(self, filename, site_override=None):
"""
A class is meant to parse an eDNA CM.DB file that contains
calculations. The CM.DB file is simply a sqlite database, but the
'Equation' field inside may contain references to other eDNA tags. It
is useful to know which tags are associated with which calculations.
:param filename: the filename of the CM.DB file
:param site_override: a way to override the 'Site' field of the CB.DB
"""
sql_file = 'sqlite+pysqlite:///' + filename
engine = create_engine(sql_file, module=sqlite)
sql = "SELECT id,site,service,point_id,desc,equation from points;"
self.config = pd.read_sql(sql, engine)
self._create_full_tags(site_override)
self._find_tags_in_calc()
def _create_full_tags(self, site_override=None):
# This function will concatenate the Site, Service, and Point_ID fields
# so that the full site.service.tag is created.
original_site = self.config['Site'].str.strip()
site = site_override if site_override else original_site
service = self.config['Service'].str.strip()
tag = self.config['Point_ID'].str.strip()
self.config['FullTag'] = '.'.join([site, service, tag])
def _find_tags_in_calc(self):
# This function will find all eDNA tags that are mentioned within the
# 'Equation' field. eDNA tags are always preceded by dnagetrtvalue(
# so the function will find all locations for that function.
search_string = 'dnagetrtvalue\("(.*?)"\)'
for ii, row in self.config.iterrows():
equation = row['Equation'].lower()
results = []
for tag in re.findall(search_string, equation):
results.append(tag)
joined_results = ';'.join(results)
self.config.ix[ii, 'TagsInCalc'] = joined_results
def _write_relationships_flat(self, writer):
# Write relationships in flat form, e.g.
# ADE1CA01, ADE1PI01, ADE1PI02
for ii, row in self.config.iterrows():
calc_tag = row['FullTag'].lower()
for associated_tag in row['TagsInCalc'].split(';'):
if associated_tag:
writer.write(','.join([calc_tag, associated_tag]) + '\n')
def _write_relationships_non_flat(self, writer):
# Write relationships in non-flat form, e.g.
# ADE1CA01, ADE1PI01
# ADE1CA01, ADE1PI02
for ii, row in self.config.iterrows():
calc_tag = row['FullTag'].lower()
associated_tag = row['TagsInCalc']
if associated_tag:
writer.write(','.join([calc_tag, associated_tag]))
def get_relationships(self):
"""
Gets the relationships between calc tags and all associated tags.
:return: a pandas DataFrame with columns:
'FullTag' = calculation tag
'TagsInCalc' = the tags that appear in the calculation
"""
results = self.config[['FullTag', 'TagsInCalc']]
return results
|
drericstrong/pyedna | pyedna/ezdna.py | LoadDll | python | def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location) | If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L70-L82 | null | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/ezdna.py | DoesIDExist | python | def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result | Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag") | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L94-L111 | null | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/ezdna.py | GetHistAvg | python | def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label) | Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L114-L130 | [
"def GetHist(tag_name, start_time, end_time, period=5, mode=\"raw\",\n desc_as_label=False, label=None, high_speed=False, utc=False):\n \"\"\"\n Retrieves data from eDNA history for a given tag.\n\n :param tag_name: fully-qualified (site.service.tag) eDNA tag\n :param start_time: must be in format mm/dd/yy hh:mm:ss\n :param end_time: must be in format mm/dd/yy hh:mm:ss\n :param period: specify the number of seconds for the pull interval\n :param mode: \"raw\", \"snap\", \"avg\", \"interp\", \"max\", \"min\"\n See eDNA documentation for more information.\n :param desc_as_label: use the tag description as the column name instead\n of the full tag\n :param label: supply a custom label to use as the DataFrame column name\n :param high_speed: if True, pull millisecond data\n :param utc: if True, use the integer time format instead of DateTime\n :return: a pandas DataFrame with timestamp, value, and status\n \"\"\"\n # Check if the point even exists\n if not DoesIDExist(tag_name):\n warnings.warn(\"WARNING- \" + tag_name + \" does not exist or \" +\n \"connection was dropped. Try again if tag does exist.\")\n return pd.DataFrame()\n\n # Define all required variables in the correct ctypes format\n szPoint = c_char_p(tag_name.encode('utf-8'))\n tStart = c_long(StringToUTCTime(start_time))\n tEnd = c_long(StringToUTCTime(end_time))\n tPeriod = c_long(period)\n pulKey = c_ulong(0)\n\n # Initialize the data pull using the specified pulKey, which is an\n # identifier that tells eDNA which data pull is occurring\n mode = mode.lower().strip()\n if not high_speed:\n if mode == \"avg\":\n nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n if mode == \"interp\":\n nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n if mode == \"min\":\n nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n if mode == \"max\":\n nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n if mode == \"snap\":\n nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n else:\n nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))\n time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)\n else:\n nStartMillis = c_ushort(0)\n nEndMillis = c_ushort(0)\n nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,\n tEnd, nEndMillis, byref(pulKey))\n time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)\n\n # The history request must be cancelled to free up network resources\n dna_dll.DnaCancelHistRequest(pulKey)\n\n # To construct the pandas DataFrame, the tag name will be used as the\n # column name, and the index (which is in the strange eDNA format) must be\n # converted to an actual DateTime\n d = {tag_name + ' Status': stat, tag_name: val}\n df = pd.DataFrame(data=d, index=time_)\n if not utc:\n if not high_speed:\n df.index = pd.to_datetime(df.index, unit=\"s\")\n else:\n df.index = pd.to_datetime(df.index, unit=\"ms\")\n if df.empty:\n warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +\n 'Check eDNA connection, ensure that the start time is ' +\n 'not later than the end time, verify that the ' +\n 'DateTime formatting matches eDNA requirements, and ' +\n 'check that data exists in the query time period.')\n\n # Check if the user would rather use the description as the column name\n if desc_as_label or label:\n if label:\n new_label = label\n else:\n new_label = _GetLabel(tag_name)\n df.rename(inplace=True, columns={tag_name: new_label,\n tag_name + \" Status\": new_label + \" Status\"})\n return df\n"
] | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/ezdna.py | GetHistRaw | python | def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label) | Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L189-L204 | [
"def GetHist(tag_name, start_time, end_time, period=5, mode=\"raw\",\n desc_as_label=False, label=None, high_speed=False, utc=False):\n \"\"\"\n Retrieves data from eDNA history for a given tag.\n\n :param tag_name: fully-qualified (site.service.tag) eDNA tag\n :param start_time: must be in format mm/dd/yy hh:mm:ss\n :param end_time: must be in format mm/dd/yy hh:mm:ss\n :param period: specify the number of seconds for the pull interval\n :param mode: \"raw\", \"snap\", \"avg\", \"interp\", \"max\", \"min\"\n See eDNA documentation for more information.\n :param desc_as_label: use the tag description as the column name instead\n of the full tag\n :param label: supply a custom label to use as the DataFrame column name\n :param high_speed: if True, pull millisecond data\n :param utc: if True, use the integer time format instead of DateTime\n :return: a pandas DataFrame with timestamp, value, and status\n \"\"\"\n # Check if the point even exists\n if not DoesIDExist(tag_name):\n warnings.warn(\"WARNING- \" + tag_name + \" does not exist or \" +\n \"connection was dropped. Try again if tag does exist.\")\n return pd.DataFrame()\n\n # Define all required variables in the correct ctypes format\n szPoint = c_char_p(tag_name.encode('utf-8'))\n tStart = c_long(StringToUTCTime(start_time))\n tEnd = c_long(StringToUTCTime(end_time))\n tPeriod = c_long(period)\n pulKey = c_ulong(0)\n\n # Initialize the data pull using the specified pulKey, which is an\n # identifier that tells eDNA which data pull is occurring\n mode = mode.lower().strip()\n if not high_speed:\n if mode == \"avg\":\n nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n if mode == \"interp\":\n nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n if mode == \"min\":\n nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n if mode == \"max\":\n nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n if mode == \"snap\":\n nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n else:\n nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))\n time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)\n else:\n nStartMillis = c_ushort(0)\n nEndMillis = c_ushort(0)\n nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,\n tEnd, nEndMillis, byref(pulKey))\n time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)\n\n # The history request must be cancelled to free up network resources\n dna_dll.DnaCancelHistRequest(pulKey)\n\n # To construct the pandas DataFrame, the tag name will be used as the\n # column name, and the index (which is in the strange eDNA format) must be\n # converted to an actual DateTime\n d = {tag_name + ' Status': stat, tag_name: val}\n df = pd.DataFrame(data=d, index=time_)\n if not utc:\n if not high_speed:\n df.index = pd.to_datetime(df.index, unit=\"s\")\n else:\n df.index = pd.to_datetime(df.index, unit=\"ms\")\n if df.empty:\n warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +\n 'Check eDNA connection, ensure that the start time is ' +\n 'not later than the end time, verify that the ' +\n 'DateTime formatting matches eDNA requirements, and ' +\n 'check that data exists in the query time period.')\n\n # Check if the user would rather use the description as the column name\n if desc_as_label or label:\n if label:\n new_label = label\n else:\n new_label = _GetLabel(tag_name)\n df.rename(inplace=True, columns={tag_name: new_label,\n tag_name + \" Status\": new_label + \" Status\"})\n return df\n"
] | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.