repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
thiezn/iperf3-python | iperf3/iperf3.py | IPerf3.iperf_version | python | def iperf_version(self):
# TODO: Is there a better way to get the const char than allocating 30?
VersionType = c_char * 30
return VersionType.in_dll(self.lib, "version").value.decode('utf-8') | Returns the version of the libiperf library
:rtype: string | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L369-L376 | null | class IPerf3(object):
"""The base class used by both the iperf3 :class:`Server` and :class:`Client`
.. note:: You should not use this class directly
"""
def __init__(self,
role,
verbose=True,
lib_name=None):
"""Initialise the iperf shared library
:param role: 'c' = client; 's' = server
:param verbose: enable verbose output
:param lib_name: optional name and path for libiperf.so.0 library
"""
if lib_name is None:
lib_name = util.find_library('libiperf')
if lib_name is None:
# If we still couldn't find it lets try the manual approach
lib_name = 'libiperf.so.0'
try:
self.lib = cdll.LoadLibrary(lib_name)
except OSError:
raise OSError(
"Couldn't find shared library {}, is iperf3 installed?".format(
lib_name
)
)
# Set the appropriate C types.
self.lib.iperf_client_end.restype = c_int
self.lib.iperf_client_end.argtypes = (c_void_p,)
self.lib.iperf_free_test.restxpe = None
self.lib.iperf_free_test.argtypes = (c_void_p,)
self.lib.iperf_new_test.restype = c_void_p
self.lib.iperf_new_test.argtypes = None
self.lib.iperf_defaults.restype = c_int
self.lib.iperf_defaults.argtypes = (c_void_p,)
self.lib.iperf_get_test_role.restype = c_char
self.lib.iperf_get_test_role.argtypes = (c_void_p,)
self.lib.iperf_set_test_role.restype = None
self.lib.iperf_set_test_role.argtypes = (c_void_p, c_char,)
self.lib.iperf_get_test_bind_address.restype = c_char_p
self.lib.iperf_get_test_bind_address.argtypes = (c_void_p,)
self.lib.iperf_set_test_bind_address.restype = None
self.lib.iperf_set_test_bind_address.argtypes = (c_void_p, c_char_p,)
self.lib.iperf_get_test_server_port.restype = c_int
self.lib.iperf_get_test_server_port.argtypes = (c_void_p,)
self.lib.iperf_set_test_server_port.restype = None
self.lib.iperf_set_test_server_port.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_test_json_output.restype = c_int
self.lib.iperf_get_test_json_output.argtypes = (c_void_p,)
self.lib.iperf_set_test_json_output.restype = None
self.lib.iperf_set_test_json_output.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_verbose.restype = c_int
self.lib.iperf_get_verbose.argtypes = (c_void_p,)
self.lib.iperf_set_verbose.restype = None
self.lib.iperf_set_verbose.argtypes = (c_void_p, c_int)
self.lib.iperf_strerror.restype = c_char_p
self.lib.iperf_strerror.argtypes = (c_int,)
self.lib.iperf_get_test_server_hostname.restype = c_char_p
self.lib.iperf_get_test_server_hostname.argtypes = (c_void_p,)
self.lib.iperf_set_test_server_hostname.restype = None
self.lib.iperf_set_test_server_hostname.argtypes = (
c_void_p, c_char_p,
)
self.lib.iperf_get_test_protocol_id.restype = c_int
self.lib.iperf_get_test_protocol_id.argtypes = (c_void_p,)
self.lib.set_protocol.restype = c_int
self.lib.set_protocol.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_test_omit.restype = c_int
self.lib.iperf_get_test_omit.argtypes = (c_void_p,)
self.lib.iperf_set_test_omit.restype = None
self.lib.iperf_set_test_omit.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_test_duration.restype = c_int
self.lib.iperf_get_test_duration.argtypes = (c_void_p,)
self.lib.iperf_set_test_duration.restype = None
self.lib.iperf_set_test_duration.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_test_rate.restype = c_uint64
self.lib.iperf_get_test_rate.argtypes = (c_void_p,)
self.lib.iperf_set_test_rate.restype = None
self.lib.iperf_set_test_rate.argtypes = (c_void_p, c_uint64,)
self.lib.iperf_get_test_blksize.restype = c_int
self.lib.iperf_get_test_blksize.argtypes = (c_void_p,)
self.lib.iperf_set_test_blksize.restype = None
self.lib.iperf_set_test_blksize.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_test_num_streams.restype = c_int
self.lib.iperf_get_test_num_streams.argtypes = (c_void_p,)
self.lib.iperf_set_test_num_streams.restype = None
self.lib.iperf_set_test_num_streams.argtypes = (c_void_p, c_int,)
self.lib.iperf_has_zerocopy.restype = c_int
self.lib.iperf_has_zerocopy.argtypes = None
self.lib.iperf_set_test_zerocopy.restype = None
self.lib.iperf_set_test_zerocopy.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_test_reverse.restype = c_int
self.lib.iperf_get_test_reverse.argtypes = (c_void_p,)
self.lib.iperf_set_test_reverse.restype = None
self.lib.iperf_set_test_reverse.argtypes = (c_void_p, c_int,)
self.lib.iperf_run_client.restype = c_int
self.lib.iperf_run_client.argtypes = (c_void_p,)
self.lib.iperf_run_server.restype = c_int
self.lib.iperf_run_server.argtypes = (c_void_p,)
self.lib.iperf_reset_test.restype = None
self.lib.iperf_reset_test.argtypes = (c_void_p,)
try:
# Only available from iperf v3.1 and onwards
self.lib.iperf_get_test_json_output_string.restype = c_char_p
self.lib.iperf_get_test_json_output_string.argtypes = (c_void_p,)
except AttributeError:
pass
# The test C struct iperf_test
self._test = self._new()
self.defaults()
# stdout/strerr redirection variables
self._stdout_fd = os.dup(1)
self._stderr_fd = os.dup(2)
self._pipe_out, self._pipe_in = os.pipe() # no need for pipe write
# Generic test settings
self.role = role
self.json_output = True
self.verbose = verbose
def __del__(self):
"""Cleanup the test after the :class:`IPerf3` class is terminated"""
os.close(self._stdout_fd)
os.close(self._stderr_fd)
os.close(self._pipe_out)
os.close(self._pipe_in)
try:
# In the current version of libiperf, the control socket isn't
# closed on iperf_client_end(), see proposed pull request:
# https://github.com/esnet/iperf/pull/597
# Workaround for testing, don't ever do this..:
#
# sck=self.lib.iperf_get_control_socket(self._test)
# os.close(sck)
self.lib.iperf_client_end(self._test)
self.lib.iperf_free_test(self._test)
except AttributeError:
# self.lib doesn't exist, likely because iperf3 wasn't installed or
# the shared library libiperf.so.0 could not be found
pass
def _new(self):
"""Initialise a new iperf test
struct iperf_test *iperf_new_test()
"""
return self.lib.iperf_new_test()
def defaults(self):
"""Set/reset iperf test defaults."""
self.lib.iperf_defaults(self._test)
@property
def role(self):
"""The iperf3 instance role
valid roles are 'c'=client and 's'=server
:rtype: 'c' or 's'
"""
try:
self._role = c_char(
self.lib.iperf_get_test_role(self._test)
).value.decode('utf-8')
except TypeError:
self._role = c_char(
chr(self.lib.iperf_get_test_role(self._test))
).value.decode('utf-8')
return self._role
@role.setter
def role(self, role):
if role.lower() in ['c', 's']:
self.lib.iperf_set_test_role(
self._test,
c_char(role.lower().encode('utf-8'))
)
self._role = role
else:
raise ValueError("Unknown role, accepted values are 'c' and 's'")
@property
def bind_address(self):
"""The bind address the iperf3 instance will listen on
use * to listen on all available IPs
:rtype: string
"""
result = c_char_p(
self.lib.iperf_get_test_bind_address(self._test)
).value
if result:
self._bind_address = result.decode('utf-8')
else:
self._bind_address = '*'
return self._bind_address
@bind_address.setter
def bind_address(self, address):
self.lib.iperf_set_test_bind_address(
self._test,
c_char_p(address.encode('utf-8'))
)
self._bind_address = address
@property
def port(self):
"""The port the iperf3 server is listening on"""
self._port = self.lib.iperf_get_test_server_port(self._test)
return self._port
@port.setter
def port(self, port):
self.lib.iperf_set_test_server_port(self._test, int(port))
self._port = port
@property
def json_output(self):
"""Toggles json output of libiperf
Turning this off will output the iperf3 instance results to
stdout/stderr
:rtype: bool
"""
enabled = self.lib.iperf_get_test_json_output(self._test)
if enabled:
self._json_output = True
else:
self._json_output = False
return self._json_output
@json_output.setter
def json_output(self, enabled):
if enabled:
self.lib.iperf_set_test_json_output(self._test, 1)
else:
self.lib.iperf_set_test_json_output(self._test, 0)
self._json_output = enabled
@property
def verbose(self):
"""Toggles verbose output for the iperf3 instance
:rtype: bool
"""
enabled = self.lib.iperf_get_verbose(self._test)
if enabled:
self._verbose = True
else:
self._verbose = False
return self._verbose
@verbose.setter
def verbose(self, enabled):
if enabled:
self.lib.iperf_set_verbose(self._test, 1)
else:
self.lib.iperf_set_verbose(self._test, 0)
self._verbose = enabled
@property
def _errno(self):
"""Returns the last error ID
:rtype: int
"""
return c_int.in_dll(self.lib, "i_errno").value
@property
def _error_to_string(self, error_id):
"""Returns an error string from libiperf
:param error_id: The error_id produced by libiperf
:rtype: string
"""
strerror = self.lib.iperf_strerror
strerror.restype = c_char_p
return strerror(error_id).decode('utf-8')
def run(self):
"""Runs the iperf3 instance.
This function has to be instantiated by the Client and Server
instances
:rtype: NotImplementedError
"""
raise NotImplementedError
|
thiezn/iperf3-python | iperf3/iperf3.py | IPerf3._error_to_string | python | def _error_to_string(self, error_id):
strerror = self.lib.iperf_strerror
strerror.restype = c_char_p
return strerror(error_id).decode('utf-8') | Returns an error string from libiperf
:param error_id: The error_id produced by libiperf
:rtype: string | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L378-L386 | null | class IPerf3(object):
"""The base class used by both the iperf3 :class:`Server` and :class:`Client`
.. note:: You should not use this class directly
"""
def __init__(self,
role,
verbose=True,
lib_name=None):
"""Initialise the iperf shared library
:param role: 'c' = client; 's' = server
:param verbose: enable verbose output
:param lib_name: optional name and path for libiperf.so.0 library
"""
if lib_name is None:
lib_name = util.find_library('libiperf')
if lib_name is None:
# If we still couldn't find it lets try the manual approach
lib_name = 'libiperf.so.0'
try:
self.lib = cdll.LoadLibrary(lib_name)
except OSError:
raise OSError(
"Couldn't find shared library {}, is iperf3 installed?".format(
lib_name
)
)
# Set the appropriate C types.
self.lib.iperf_client_end.restype = c_int
self.lib.iperf_client_end.argtypes = (c_void_p,)
self.lib.iperf_free_test.restxpe = None
self.lib.iperf_free_test.argtypes = (c_void_p,)
self.lib.iperf_new_test.restype = c_void_p
self.lib.iperf_new_test.argtypes = None
self.lib.iperf_defaults.restype = c_int
self.lib.iperf_defaults.argtypes = (c_void_p,)
self.lib.iperf_get_test_role.restype = c_char
self.lib.iperf_get_test_role.argtypes = (c_void_p,)
self.lib.iperf_set_test_role.restype = None
self.lib.iperf_set_test_role.argtypes = (c_void_p, c_char,)
self.lib.iperf_get_test_bind_address.restype = c_char_p
self.lib.iperf_get_test_bind_address.argtypes = (c_void_p,)
self.lib.iperf_set_test_bind_address.restype = None
self.lib.iperf_set_test_bind_address.argtypes = (c_void_p, c_char_p,)
self.lib.iperf_get_test_server_port.restype = c_int
self.lib.iperf_get_test_server_port.argtypes = (c_void_p,)
self.lib.iperf_set_test_server_port.restype = None
self.lib.iperf_set_test_server_port.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_test_json_output.restype = c_int
self.lib.iperf_get_test_json_output.argtypes = (c_void_p,)
self.lib.iperf_set_test_json_output.restype = None
self.lib.iperf_set_test_json_output.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_verbose.restype = c_int
self.lib.iperf_get_verbose.argtypes = (c_void_p,)
self.lib.iperf_set_verbose.restype = None
self.lib.iperf_set_verbose.argtypes = (c_void_p, c_int)
self.lib.iperf_strerror.restype = c_char_p
self.lib.iperf_strerror.argtypes = (c_int,)
self.lib.iperf_get_test_server_hostname.restype = c_char_p
self.lib.iperf_get_test_server_hostname.argtypes = (c_void_p,)
self.lib.iperf_set_test_server_hostname.restype = None
self.lib.iperf_set_test_server_hostname.argtypes = (
c_void_p, c_char_p,
)
self.lib.iperf_get_test_protocol_id.restype = c_int
self.lib.iperf_get_test_protocol_id.argtypes = (c_void_p,)
self.lib.set_protocol.restype = c_int
self.lib.set_protocol.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_test_omit.restype = c_int
self.lib.iperf_get_test_omit.argtypes = (c_void_p,)
self.lib.iperf_set_test_omit.restype = None
self.lib.iperf_set_test_omit.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_test_duration.restype = c_int
self.lib.iperf_get_test_duration.argtypes = (c_void_p,)
self.lib.iperf_set_test_duration.restype = None
self.lib.iperf_set_test_duration.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_test_rate.restype = c_uint64
self.lib.iperf_get_test_rate.argtypes = (c_void_p,)
self.lib.iperf_set_test_rate.restype = None
self.lib.iperf_set_test_rate.argtypes = (c_void_p, c_uint64,)
self.lib.iperf_get_test_blksize.restype = c_int
self.lib.iperf_get_test_blksize.argtypes = (c_void_p,)
self.lib.iperf_set_test_blksize.restype = None
self.lib.iperf_set_test_blksize.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_test_num_streams.restype = c_int
self.lib.iperf_get_test_num_streams.argtypes = (c_void_p,)
self.lib.iperf_set_test_num_streams.restype = None
self.lib.iperf_set_test_num_streams.argtypes = (c_void_p, c_int,)
self.lib.iperf_has_zerocopy.restype = c_int
self.lib.iperf_has_zerocopy.argtypes = None
self.lib.iperf_set_test_zerocopy.restype = None
self.lib.iperf_set_test_zerocopy.argtypes = (c_void_p, c_int,)
self.lib.iperf_get_test_reverse.restype = c_int
self.lib.iperf_get_test_reverse.argtypes = (c_void_p,)
self.lib.iperf_set_test_reverse.restype = None
self.lib.iperf_set_test_reverse.argtypes = (c_void_p, c_int,)
self.lib.iperf_run_client.restype = c_int
self.lib.iperf_run_client.argtypes = (c_void_p,)
self.lib.iperf_run_server.restype = c_int
self.lib.iperf_run_server.argtypes = (c_void_p,)
self.lib.iperf_reset_test.restype = None
self.lib.iperf_reset_test.argtypes = (c_void_p,)
try:
# Only available from iperf v3.1 and onwards
self.lib.iperf_get_test_json_output_string.restype = c_char_p
self.lib.iperf_get_test_json_output_string.argtypes = (c_void_p,)
except AttributeError:
pass
# The test C struct iperf_test
self._test = self._new()
self.defaults()
# stdout/strerr redirection variables
self._stdout_fd = os.dup(1)
self._stderr_fd = os.dup(2)
self._pipe_out, self._pipe_in = os.pipe() # no need for pipe write
# Generic test settings
self.role = role
self.json_output = True
self.verbose = verbose
def __del__(self):
"""Cleanup the test after the :class:`IPerf3` class is terminated"""
os.close(self._stdout_fd)
os.close(self._stderr_fd)
os.close(self._pipe_out)
os.close(self._pipe_in)
try:
# In the current version of libiperf, the control socket isn't
# closed on iperf_client_end(), see proposed pull request:
# https://github.com/esnet/iperf/pull/597
# Workaround for testing, don't ever do this..:
#
# sck=self.lib.iperf_get_control_socket(self._test)
# os.close(sck)
self.lib.iperf_client_end(self._test)
self.lib.iperf_free_test(self._test)
except AttributeError:
# self.lib doesn't exist, likely because iperf3 wasn't installed or
# the shared library libiperf.so.0 could not be found
pass
def _new(self):
"""Initialise a new iperf test
struct iperf_test *iperf_new_test()
"""
return self.lib.iperf_new_test()
def defaults(self):
"""Set/reset iperf test defaults."""
self.lib.iperf_defaults(self._test)
@property
def role(self):
"""The iperf3 instance role
valid roles are 'c'=client and 's'=server
:rtype: 'c' or 's'
"""
try:
self._role = c_char(
self.lib.iperf_get_test_role(self._test)
).value.decode('utf-8')
except TypeError:
self._role = c_char(
chr(self.lib.iperf_get_test_role(self._test))
).value.decode('utf-8')
return self._role
@role.setter
def role(self, role):
if role.lower() in ['c', 's']:
self.lib.iperf_set_test_role(
self._test,
c_char(role.lower().encode('utf-8'))
)
self._role = role
else:
raise ValueError("Unknown role, accepted values are 'c' and 's'")
@property
def bind_address(self):
"""The bind address the iperf3 instance will listen on
use * to listen on all available IPs
:rtype: string
"""
result = c_char_p(
self.lib.iperf_get_test_bind_address(self._test)
).value
if result:
self._bind_address = result.decode('utf-8')
else:
self._bind_address = '*'
return self._bind_address
@bind_address.setter
def bind_address(self, address):
self.lib.iperf_set_test_bind_address(
self._test,
c_char_p(address.encode('utf-8'))
)
self._bind_address = address
@property
def port(self):
"""The port the iperf3 server is listening on"""
self._port = self.lib.iperf_get_test_server_port(self._test)
return self._port
@port.setter
def port(self, port):
self.lib.iperf_set_test_server_port(self._test, int(port))
self._port = port
@property
def json_output(self):
"""Toggles json output of libiperf
Turning this off will output the iperf3 instance results to
stdout/stderr
:rtype: bool
"""
enabled = self.lib.iperf_get_test_json_output(self._test)
if enabled:
self._json_output = True
else:
self._json_output = False
return self._json_output
@json_output.setter
def json_output(self, enabled):
if enabled:
self.lib.iperf_set_test_json_output(self._test, 1)
else:
self.lib.iperf_set_test_json_output(self._test, 0)
self._json_output = enabled
@property
def verbose(self):
"""Toggles verbose output for the iperf3 instance
:rtype: bool
"""
enabled = self.lib.iperf_get_verbose(self._test)
if enabled:
self._verbose = True
else:
self._verbose = False
return self._verbose
@verbose.setter
def verbose(self, enabled):
if enabled:
self.lib.iperf_set_verbose(self._test, 1)
else:
self.lib.iperf_set_verbose(self._test, 0)
self._verbose = enabled
@property
def _errno(self):
"""Returns the last error ID
:rtype: int
"""
return c_int.in_dll(self.lib, "i_errno").value
@property
def iperf_version(self):
"""Returns the version of the libiperf library
:rtype: string
"""
# TODO: Is there a better way to get the const char than allocating 30?
VersionType = c_char * 30
return VersionType.in_dll(self.lib, "version").value.decode('utf-8')
def run(self):
"""Runs the iperf3 instance.
This function has to be instantiated by the Client and Server
instances
:rtype: NotImplementedError
"""
raise NotImplementedError
|
thiezn/iperf3-python | iperf3/iperf3.py | Client.server_hostname | python | def server_hostname(self):
result = c_char_p(
self.lib.iperf_get_test_server_hostname(self._test)
).value
if result:
self._server_hostname = result.decode('utf-8')
else:
self._server_hostname = None
return self._server_hostname | The server hostname to connect to.
Accepts DNS entries or IP addresses.
:rtype: string | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L432-L446 | null | class Client(IPerf3):
"""An iperf3 client connection.
This opens up a connection to a running iperf3 server
Basic Usage::
>>> import iperf3
>>> client = iperf3.Client()
>>> client.duration = 1
>>> client.server_hostname = '127.0.0.1'
>>> client.port = 5201
>>> client.run()
{'intervals': [{'sum': {...
"""
def __init__(self, *args, **kwargs):
"""Initialise the iperf shared library"""
super(Client, self).__init__(role='c', *args, **kwargs)
# Internal variables
self._blksize = None
self._server_hostname = None
self._port = None
self._num_streams = None
self._zerocopy = False
self._omit = None
self._duration = None
self._bandwidth = None
self._protocol = None
@property
@server_hostname.setter
def server_hostname(self, hostname):
self.lib.iperf_set_test_server_hostname(
self._test,
c_char_p(hostname.encode('utf-8'))
)
self._server_hostname = hostname
@property
def protocol(self):
"""The iperf3 instance protocol
valid protocols are 'tcp' and 'udp'
:rtype: str
"""
proto_id = self.lib.iperf_get_test_protocol_id(self._test)
if proto_id == SOCK_STREAM:
self._protocol = 'tcp'
elif proto_id == SOCK_DGRAM:
self._protocol = 'udp'
return self._protocol
@protocol.setter
def protocol(self, protocol):
if protocol == 'tcp':
self.lib.set_protocol(self._test, int(SOCK_STREAM))
elif protocol == 'udp':
self.lib.set_protocol(self._test, int(SOCK_DGRAM))
if self.blksize > MAX_UDP_BULKSIZE:
self.blksize = MAX_UDP_BULKSIZE
self._protocol = protocol
@property
def omit(self):
"""The test startup duration to omit in seconds."""
self._omit = self.lib.iperf_get_test_omit(self._test)
return self._omit
@omit.setter
def omit(self, omit):
self.lib.iperf_set_test_omit(self._test, omit)
self._omit = omit
@property
def duration(self):
"""The test duration in seconds."""
self._duration = self.lib.iperf_get_test_duration(self._test)
return self._duration
@duration.setter
def duration(self, duration):
self.lib.iperf_set_test_duration(self._test, duration)
self._duration = duration
@property
def bandwidth(self):
"""Target bandwidth in bits/sec"""
self._bandwidth = self.lib.iperf_get_test_rate(self._test)
return self._bandwidth
@bandwidth.setter
def bandwidth(self, bandwidth):
self.lib.iperf_set_test_rate(self._test, bandwidth)
self._bandwidth = bandwidth
@property
def blksize(self):
"""The test blksize."""
self._blksize = self.lib.iperf_get_test_blksize(self._test)
return self._blksize
@blksize.setter
def blksize(self, bulksize):
# iperf version < 3.1.3 has some weird bugs when bulksize is
# larger than MAX_UDP_BULKSIZE
if self.protocol == 'udp' and bulksize > MAX_UDP_BULKSIZE:
bulksize = MAX_UDP_BULKSIZE
self.lib.iperf_set_test_blksize(self._test, bulksize)
self._blksize = bulksize
@property
def bulksize(self):
"""The test bulksize.
Deprecated argument, use blksize instead to ensure consistency
with iperf3 C libary
"""
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
return self.blksize
@bulksize.setter
def bulksize(self, bulksize):
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
self.blksize = bulksize
@property
def num_streams(self):
"""The number of streams to use."""
self._num_streams = self.lib.iperf_get_test_num_streams(self._test)
return self._num_streams
@num_streams.setter
def num_streams(self, number):
self.lib.iperf_set_test_num_streams(self._test, number)
self._num_streams = number
@property
def zerocopy(self):
"""Toggle zerocopy.
Use the sendfile() system call for "Zero Copy" mode. This uses much
less CPU. This is not supported on all systems.
**Note** there isn't a hook in the libiperf library for getting the
current configured value. Relying on zerocopy.setter function
:rtype: bool
"""
return self._zerocopy
@zerocopy.setter
def zerocopy(self, enabled):
if enabled and self.lib.iperf_has_zerocopy():
self.lib.iperf_set_test_zerocopy(self._test, 1)
self._zerocopy = True
else:
self.lib.iperf_set_test_zerocopy(self._test, 0)
self._zerocopy = False
@property
def reverse(self):
"""Toggles direction of test
:rtype: bool
"""
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse
@reverse.setter
def reverse(self, enabled):
if enabled:
self.lib.iperf_set_test_reverse(self._test, 1)
else:
self.lib.iperf_set_test_reverse(self._test, 0)
self._reverse = enabled
def run(self):
"""Run the current test client.
:rtype: instance of :class:`TestResult`
"""
if self.json_output:
output_to_pipe(self._pipe_in) # Disable stdout
error = self.lib.iperf_run_client(self._test)
if not self.iperf_version.startswith('iperf 3.1'):
data = read_pipe(self._pipe_out)
if data.startswith('Control connection'):
data = '{' + data.split('{', 1)[1]
else:
data = c_char_p(
self.lib.iperf_get_test_json_output_string(self._test)
).value
if data:
data = data.decode('utf-8')
output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout
if not data or error:
data = '{"error": "%s"}' % self._error_to_string(self._errno)
return TestResult(data)
|
thiezn/iperf3-python | iperf3/iperf3.py | Client.protocol | python | def protocol(self):
proto_id = self.lib.iperf_get_test_protocol_id(self._test)
if proto_id == SOCK_STREAM:
self._protocol = 'tcp'
elif proto_id == SOCK_DGRAM:
self._protocol = 'udp'
return self._protocol | The iperf3 instance protocol
valid protocols are 'tcp' and 'udp'
:rtype: str | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L457-L471 | null | class Client(IPerf3):
"""An iperf3 client connection.
This opens up a connection to a running iperf3 server
Basic Usage::
>>> import iperf3
>>> client = iperf3.Client()
>>> client.duration = 1
>>> client.server_hostname = '127.0.0.1'
>>> client.port = 5201
>>> client.run()
{'intervals': [{'sum': {...
"""
def __init__(self, *args, **kwargs):
"""Initialise the iperf shared library"""
super(Client, self).__init__(role='c', *args, **kwargs)
# Internal variables
self._blksize = None
self._server_hostname = None
self._port = None
self._num_streams = None
self._zerocopy = False
self._omit = None
self._duration = None
self._bandwidth = None
self._protocol = None
@property
def server_hostname(self):
"""The server hostname to connect to.
Accepts DNS entries or IP addresses.
:rtype: string
"""
result = c_char_p(
self.lib.iperf_get_test_server_hostname(self._test)
).value
if result:
self._server_hostname = result.decode('utf-8')
else:
self._server_hostname = None
return self._server_hostname
@server_hostname.setter
def server_hostname(self, hostname):
self.lib.iperf_set_test_server_hostname(
self._test,
c_char_p(hostname.encode('utf-8'))
)
self._server_hostname = hostname
@property
@protocol.setter
def protocol(self, protocol):
if protocol == 'tcp':
self.lib.set_protocol(self._test, int(SOCK_STREAM))
elif protocol == 'udp':
self.lib.set_protocol(self._test, int(SOCK_DGRAM))
if self.blksize > MAX_UDP_BULKSIZE:
self.blksize = MAX_UDP_BULKSIZE
self._protocol = protocol
@property
def omit(self):
"""The test startup duration to omit in seconds."""
self._omit = self.lib.iperf_get_test_omit(self._test)
return self._omit
@omit.setter
def omit(self, omit):
self.lib.iperf_set_test_omit(self._test, omit)
self._omit = omit
@property
def duration(self):
"""The test duration in seconds."""
self._duration = self.lib.iperf_get_test_duration(self._test)
return self._duration
@duration.setter
def duration(self, duration):
self.lib.iperf_set_test_duration(self._test, duration)
self._duration = duration
@property
def bandwidth(self):
"""Target bandwidth in bits/sec"""
self._bandwidth = self.lib.iperf_get_test_rate(self._test)
return self._bandwidth
@bandwidth.setter
def bandwidth(self, bandwidth):
self.lib.iperf_set_test_rate(self._test, bandwidth)
self._bandwidth = bandwidth
@property
def blksize(self):
"""The test blksize."""
self._blksize = self.lib.iperf_get_test_blksize(self._test)
return self._blksize
@blksize.setter
def blksize(self, bulksize):
# iperf version < 3.1.3 has some weird bugs when bulksize is
# larger than MAX_UDP_BULKSIZE
if self.protocol == 'udp' and bulksize > MAX_UDP_BULKSIZE:
bulksize = MAX_UDP_BULKSIZE
self.lib.iperf_set_test_blksize(self._test, bulksize)
self._blksize = bulksize
@property
def bulksize(self):
"""The test bulksize.
Deprecated argument, use blksize instead to ensure consistency
with iperf3 C libary
"""
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
return self.blksize
@bulksize.setter
def bulksize(self, bulksize):
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
self.blksize = bulksize
@property
def num_streams(self):
"""The number of streams to use."""
self._num_streams = self.lib.iperf_get_test_num_streams(self._test)
return self._num_streams
@num_streams.setter
def num_streams(self, number):
self.lib.iperf_set_test_num_streams(self._test, number)
self._num_streams = number
@property
def zerocopy(self):
"""Toggle zerocopy.
Use the sendfile() system call for "Zero Copy" mode. This uses much
less CPU. This is not supported on all systems.
**Note** there isn't a hook in the libiperf library for getting the
current configured value. Relying on zerocopy.setter function
:rtype: bool
"""
return self._zerocopy
@zerocopy.setter
def zerocopy(self, enabled):
if enabled and self.lib.iperf_has_zerocopy():
self.lib.iperf_set_test_zerocopy(self._test, 1)
self._zerocopy = True
else:
self.lib.iperf_set_test_zerocopy(self._test, 0)
self._zerocopy = False
@property
def reverse(self):
"""Toggles direction of test
:rtype: bool
"""
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse
@reverse.setter
def reverse(self, enabled):
if enabled:
self.lib.iperf_set_test_reverse(self._test, 1)
else:
self.lib.iperf_set_test_reverse(self._test, 0)
self._reverse = enabled
def run(self):
"""Run the current test client.
:rtype: instance of :class:`TestResult`
"""
if self.json_output:
output_to_pipe(self._pipe_in) # Disable stdout
error = self.lib.iperf_run_client(self._test)
if not self.iperf_version.startswith('iperf 3.1'):
data = read_pipe(self._pipe_out)
if data.startswith('Control connection'):
data = '{' + data.split('{', 1)[1]
else:
data = c_char_p(
self.lib.iperf_get_test_json_output_string(self._test)
).value
if data:
data = data.decode('utf-8')
output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout
if not data or error:
data = '{"error": "%s"}' % self._error_to_string(self._errno)
return TestResult(data)
|
thiezn/iperf3-python | iperf3/iperf3.py | Client.omit | python | def omit(self):
self._omit = self.lib.iperf_get_test_omit(self._test)
return self._omit | The test startup duration to omit in seconds. | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L486-L489 | null | class Client(IPerf3):
"""An iperf3 client connection.
This opens up a connection to a running iperf3 server
Basic Usage::
>>> import iperf3
>>> client = iperf3.Client()
>>> client.duration = 1
>>> client.server_hostname = '127.0.0.1'
>>> client.port = 5201
>>> client.run()
{'intervals': [{'sum': {...
"""
def __init__(self, *args, **kwargs):
"""Initialise the iperf shared library"""
super(Client, self).__init__(role='c', *args, **kwargs)
# Internal variables
self._blksize = None
self._server_hostname = None
self._port = None
self._num_streams = None
self._zerocopy = False
self._omit = None
self._duration = None
self._bandwidth = None
self._protocol = None
@property
def server_hostname(self):
"""The server hostname to connect to.
Accepts DNS entries or IP addresses.
:rtype: string
"""
result = c_char_p(
self.lib.iperf_get_test_server_hostname(self._test)
).value
if result:
self._server_hostname = result.decode('utf-8')
else:
self._server_hostname = None
return self._server_hostname
@server_hostname.setter
def server_hostname(self, hostname):
self.lib.iperf_set_test_server_hostname(
self._test,
c_char_p(hostname.encode('utf-8'))
)
self._server_hostname = hostname
@property
def protocol(self):
"""The iperf3 instance protocol
valid protocols are 'tcp' and 'udp'
:rtype: str
"""
proto_id = self.lib.iperf_get_test_protocol_id(self._test)
if proto_id == SOCK_STREAM:
self._protocol = 'tcp'
elif proto_id == SOCK_DGRAM:
self._protocol = 'udp'
return self._protocol
@protocol.setter
def protocol(self, protocol):
if protocol == 'tcp':
self.lib.set_protocol(self._test, int(SOCK_STREAM))
elif protocol == 'udp':
self.lib.set_protocol(self._test, int(SOCK_DGRAM))
if self.blksize > MAX_UDP_BULKSIZE:
self.blksize = MAX_UDP_BULKSIZE
self._protocol = protocol
@property
@omit.setter
def omit(self, omit):
self.lib.iperf_set_test_omit(self._test, omit)
self._omit = omit
@property
def duration(self):
"""The test duration in seconds."""
self._duration = self.lib.iperf_get_test_duration(self._test)
return self._duration
@duration.setter
def duration(self, duration):
self.lib.iperf_set_test_duration(self._test, duration)
self._duration = duration
@property
def bandwidth(self):
"""Target bandwidth in bits/sec"""
self._bandwidth = self.lib.iperf_get_test_rate(self._test)
return self._bandwidth
@bandwidth.setter
def bandwidth(self, bandwidth):
self.lib.iperf_set_test_rate(self._test, bandwidth)
self._bandwidth = bandwidth
@property
def blksize(self):
"""The test blksize."""
self._blksize = self.lib.iperf_get_test_blksize(self._test)
return self._blksize
@blksize.setter
def blksize(self, bulksize):
# iperf version < 3.1.3 has some weird bugs when bulksize is
# larger than MAX_UDP_BULKSIZE
if self.protocol == 'udp' and bulksize > MAX_UDP_BULKSIZE:
bulksize = MAX_UDP_BULKSIZE
self.lib.iperf_set_test_blksize(self._test, bulksize)
self._blksize = bulksize
@property
def bulksize(self):
"""The test bulksize.
Deprecated argument, use blksize instead to ensure consistency
with iperf3 C libary
"""
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
return self.blksize
@bulksize.setter
def bulksize(self, bulksize):
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
self.blksize = bulksize
@property
def num_streams(self):
"""The number of streams to use."""
self._num_streams = self.lib.iperf_get_test_num_streams(self._test)
return self._num_streams
@num_streams.setter
def num_streams(self, number):
self.lib.iperf_set_test_num_streams(self._test, number)
self._num_streams = number
@property
def zerocopy(self):
"""Toggle zerocopy.
Use the sendfile() system call for "Zero Copy" mode. This uses much
less CPU. This is not supported on all systems.
**Note** there isn't a hook in the libiperf library for getting the
current configured value. Relying on zerocopy.setter function
:rtype: bool
"""
return self._zerocopy
@zerocopy.setter
def zerocopy(self, enabled):
if enabled and self.lib.iperf_has_zerocopy():
self.lib.iperf_set_test_zerocopy(self._test, 1)
self._zerocopy = True
else:
self.lib.iperf_set_test_zerocopy(self._test, 0)
self._zerocopy = False
@property
def reverse(self):
"""Toggles direction of test
:rtype: bool
"""
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse
@reverse.setter
def reverse(self, enabled):
if enabled:
self.lib.iperf_set_test_reverse(self._test, 1)
else:
self.lib.iperf_set_test_reverse(self._test, 0)
self._reverse = enabled
def run(self):
"""Run the current test client.
:rtype: instance of :class:`TestResult`
"""
if self.json_output:
output_to_pipe(self._pipe_in) # Disable stdout
error = self.lib.iperf_run_client(self._test)
if not self.iperf_version.startswith('iperf 3.1'):
data = read_pipe(self._pipe_out)
if data.startswith('Control connection'):
data = '{' + data.split('{', 1)[1]
else:
data = c_char_p(
self.lib.iperf_get_test_json_output_string(self._test)
).value
if data:
data = data.decode('utf-8')
output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout
if not data or error:
data = '{"error": "%s"}' % self._error_to_string(self._errno)
return TestResult(data)
|
thiezn/iperf3-python | iperf3/iperf3.py | Client.duration | python | def duration(self):
self._duration = self.lib.iperf_get_test_duration(self._test)
return self._duration | The test duration in seconds. | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L497-L500 | null | class Client(IPerf3):
"""An iperf3 client connection.
This opens up a connection to a running iperf3 server
Basic Usage::
>>> import iperf3
>>> client = iperf3.Client()
>>> client.duration = 1
>>> client.server_hostname = '127.0.0.1'
>>> client.port = 5201
>>> client.run()
{'intervals': [{'sum': {...
"""
def __init__(self, *args, **kwargs):
"""Initialise the iperf shared library"""
super(Client, self).__init__(role='c', *args, **kwargs)
# Internal variables
self._blksize = None
self._server_hostname = None
self._port = None
self._num_streams = None
self._zerocopy = False
self._omit = None
self._duration = None
self._bandwidth = None
self._protocol = None
@property
def server_hostname(self):
"""The server hostname to connect to.
Accepts DNS entries or IP addresses.
:rtype: string
"""
result = c_char_p(
self.lib.iperf_get_test_server_hostname(self._test)
).value
if result:
self._server_hostname = result.decode('utf-8')
else:
self._server_hostname = None
return self._server_hostname
@server_hostname.setter
def server_hostname(self, hostname):
self.lib.iperf_set_test_server_hostname(
self._test,
c_char_p(hostname.encode('utf-8'))
)
self._server_hostname = hostname
@property
def protocol(self):
"""The iperf3 instance protocol
valid protocols are 'tcp' and 'udp'
:rtype: str
"""
proto_id = self.lib.iperf_get_test_protocol_id(self._test)
if proto_id == SOCK_STREAM:
self._protocol = 'tcp'
elif proto_id == SOCK_DGRAM:
self._protocol = 'udp'
return self._protocol
@protocol.setter
def protocol(self, protocol):
if protocol == 'tcp':
self.lib.set_protocol(self._test, int(SOCK_STREAM))
elif protocol == 'udp':
self.lib.set_protocol(self._test, int(SOCK_DGRAM))
if self.blksize > MAX_UDP_BULKSIZE:
self.blksize = MAX_UDP_BULKSIZE
self._protocol = protocol
@property
def omit(self):
"""The test startup duration to omit in seconds."""
self._omit = self.lib.iperf_get_test_omit(self._test)
return self._omit
@omit.setter
def omit(self, omit):
self.lib.iperf_set_test_omit(self._test, omit)
self._omit = omit
@property
@duration.setter
def duration(self, duration):
self.lib.iperf_set_test_duration(self._test, duration)
self._duration = duration
@property
def bandwidth(self):
"""Target bandwidth in bits/sec"""
self._bandwidth = self.lib.iperf_get_test_rate(self._test)
return self._bandwidth
@bandwidth.setter
def bandwidth(self, bandwidth):
self.lib.iperf_set_test_rate(self._test, bandwidth)
self._bandwidth = bandwidth
@property
def blksize(self):
"""The test blksize."""
self._blksize = self.lib.iperf_get_test_blksize(self._test)
return self._blksize
@blksize.setter
def blksize(self, bulksize):
# iperf version < 3.1.3 has some weird bugs when bulksize is
# larger than MAX_UDP_BULKSIZE
if self.protocol == 'udp' and bulksize > MAX_UDP_BULKSIZE:
bulksize = MAX_UDP_BULKSIZE
self.lib.iperf_set_test_blksize(self._test, bulksize)
self._blksize = bulksize
@property
def bulksize(self):
"""The test bulksize.
Deprecated argument, use blksize instead to ensure consistency
with iperf3 C libary
"""
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
return self.blksize
@bulksize.setter
def bulksize(self, bulksize):
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
self.blksize = bulksize
@property
def num_streams(self):
"""The number of streams to use."""
self._num_streams = self.lib.iperf_get_test_num_streams(self._test)
return self._num_streams
@num_streams.setter
def num_streams(self, number):
self.lib.iperf_set_test_num_streams(self._test, number)
self._num_streams = number
@property
def zerocopy(self):
"""Toggle zerocopy.
Use the sendfile() system call for "Zero Copy" mode. This uses much
less CPU. This is not supported on all systems.
**Note** there isn't a hook in the libiperf library for getting the
current configured value. Relying on zerocopy.setter function
:rtype: bool
"""
return self._zerocopy
@zerocopy.setter
def zerocopy(self, enabled):
if enabled and self.lib.iperf_has_zerocopy():
self.lib.iperf_set_test_zerocopy(self._test, 1)
self._zerocopy = True
else:
self.lib.iperf_set_test_zerocopy(self._test, 0)
self._zerocopy = False
@property
def reverse(self):
"""Toggles direction of test
:rtype: bool
"""
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse
@reverse.setter
def reverse(self, enabled):
if enabled:
self.lib.iperf_set_test_reverse(self._test, 1)
else:
self.lib.iperf_set_test_reverse(self._test, 0)
self._reverse = enabled
def run(self):
"""Run the current test client.
:rtype: instance of :class:`TestResult`
"""
if self.json_output:
output_to_pipe(self._pipe_in) # Disable stdout
error = self.lib.iperf_run_client(self._test)
if not self.iperf_version.startswith('iperf 3.1'):
data = read_pipe(self._pipe_out)
if data.startswith('Control connection'):
data = '{' + data.split('{', 1)[1]
else:
data = c_char_p(
self.lib.iperf_get_test_json_output_string(self._test)
).value
if data:
data = data.decode('utf-8')
output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout
if not data or error:
data = '{"error": "%s"}' % self._error_to_string(self._errno)
return TestResult(data)
|
thiezn/iperf3-python | iperf3/iperf3.py | Client.bandwidth | python | def bandwidth(self):
self._bandwidth = self.lib.iperf_get_test_rate(self._test)
return self._bandwidth | Target bandwidth in bits/sec | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L508-L511 | null | class Client(IPerf3):
"""An iperf3 client connection.
This opens up a connection to a running iperf3 server
Basic Usage::
>>> import iperf3
>>> client = iperf3.Client()
>>> client.duration = 1
>>> client.server_hostname = '127.0.0.1'
>>> client.port = 5201
>>> client.run()
{'intervals': [{'sum': {...
"""
def __init__(self, *args, **kwargs):
"""Initialise the iperf shared library"""
super(Client, self).__init__(role='c', *args, **kwargs)
# Internal variables
self._blksize = None
self._server_hostname = None
self._port = None
self._num_streams = None
self._zerocopy = False
self._omit = None
self._duration = None
self._bandwidth = None
self._protocol = None
@property
def server_hostname(self):
"""The server hostname to connect to.
Accepts DNS entries or IP addresses.
:rtype: string
"""
result = c_char_p(
self.lib.iperf_get_test_server_hostname(self._test)
).value
if result:
self._server_hostname = result.decode('utf-8')
else:
self._server_hostname = None
return self._server_hostname
@server_hostname.setter
def server_hostname(self, hostname):
self.lib.iperf_set_test_server_hostname(
self._test,
c_char_p(hostname.encode('utf-8'))
)
self._server_hostname = hostname
@property
def protocol(self):
"""The iperf3 instance protocol
valid protocols are 'tcp' and 'udp'
:rtype: str
"""
proto_id = self.lib.iperf_get_test_protocol_id(self._test)
if proto_id == SOCK_STREAM:
self._protocol = 'tcp'
elif proto_id == SOCK_DGRAM:
self._protocol = 'udp'
return self._protocol
@protocol.setter
def protocol(self, protocol):
if protocol == 'tcp':
self.lib.set_protocol(self._test, int(SOCK_STREAM))
elif protocol == 'udp':
self.lib.set_protocol(self._test, int(SOCK_DGRAM))
if self.blksize > MAX_UDP_BULKSIZE:
self.blksize = MAX_UDP_BULKSIZE
self._protocol = protocol
@property
def omit(self):
"""The test startup duration to omit in seconds."""
self._omit = self.lib.iperf_get_test_omit(self._test)
return self._omit
@omit.setter
def omit(self, omit):
self.lib.iperf_set_test_omit(self._test, omit)
self._omit = omit
@property
def duration(self):
"""The test duration in seconds."""
self._duration = self.lib.iperf_get_test_duration(self._test)
return self._duration
@duration.setter
def duration(self, duration):
self.lib.iperf_set_test_duration(self._test, duration)
self._duration = duration
@property
@bandwidth.setter
def bandwidth(self, bandwidth):
self.lib.iperf_set_test_rate(self._test, bandwidth)
self._bandwidth = bandwidth
@property
def blksize(self):
"""The test blksize."""
self._blksize = self.lib.iperf_get_test_blksize(self._test)
return self._blksize
@blksize.setter
def blksize(self, bulksize):
# iperf version < 3.1.3 has some weird bugs when bulksize is
# larger than MAX_UDP_BULKSIZE
if self.protocol == 'udp' and bulksize > MAX_UDP_BULKSIZE:
bulksize = MAX_UDP_BULKSIZE
self.lib.iperf_set_test_blksize(self._test, bulksize)
self._blksize = bulksize
@property
def bulksize(self):
"""The test bulksize.
Deprecated argument, use blksize instead to ensure consistency
with iperf3 C libary
"""
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
return self.blksize
@bulksize.setter
def bulksize(self, bulksize):
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
self.blksize = bulksize
@property
def num_streams(self):
"""The number of streams to use."""
self._num_streams = self.lib.iperf_get_test_num_streams(self._test)
return self._num_streams
@num_streams.setter
def num_streams(self, number):
self.lib.iperf_set_test_num_streams(self._test, number)
self._num_streams = number
@property
def zerocopy(self):
"""Toggle zerocopy.
Use the sendfile() system call for "Zero Copy" mode. This uses much
less CPU. This is not supported on all systems.
**Note** there isn't a hook in the libiperf library for getting the
current configured value. Relying on zerocopy.setter function
:rtype: bool
"""
return self._zerocopy
@zerocopy.setter
def zerocopy(self, enabled):
if enabled and self.lib.iperf_has_zerocopy():
self.lib.iperf_set_test_zerocopy(self._test, 1)
self._zerocopy = True
else:
self.lib.iperf_set_test_zerocopy(self._test, 0)
self._zerocopy = False
@property
def reverse(self):
"""Toggles direction of test
:rtype: bool
"""
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse
@reverse.setter
def reverse(self, enabled):
if enabled:
self.lib.iperf_set_test_reverse(self._test, 1)
else:
self.lib.iperf_set_test_reverse(self._test, 0)
self._reverse = enabled
def run(self):
"""Run the current test client.
:rtype: instance of :class:`TestResult`
"""
if self.json_output:
output_to_pipe(self._pipe_in) # Disable stdout
error = self.lib.iperf_run_client(self._test)
if not self.iperf_version.startswith('iperf 3.1'):
data = read_pipe(self._pipe_out)
if data.startswith('Control connection'):
data = '{' + data.split('{', 1)[1]
else:
data = c_char_p(
self.lib.iperf_get_test_json_output_string(self._test)
).value
if data:
data = data.decode('utf-8')
output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout
if not data or error:
data = '{"error": "%s"}' % self._error_to_string(self._errno)
return TestResult(data)
|
thiezn/iperf3-python | iperf3/iperf3.py | Client.blksize | python | def blksize(self):
self._blksize = self.lib.iperf_get_test_blksize(self._test)
return self._blksize | The test blksize. | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L519-L522 | null | class Client(IPerf3):
"""An iperf3 client connection.
This opens up a connection to a running iperf3 server
Basic Usage::
>>> import iperf3
>>> client = iperf3.Client()
>>> client.duration = 1
>>> client.server_hostname = '127.0.0.1'
>>> client.port = 5201
>>> client.run()
{'intervals': [{'sum': {...
"""
def __init__(self, *args, **kwargs):
"""Initialise the iperf shared library"""
super(Client, self).__init__(role='c', *args, **kwargs)
# Internal variables
self._blksize = None
self._server_hostname = None
self._port = None
self._num_streams = None
self._zerocopy = False
self._omit = None
self._duration = None
self._bandwidth = None
self._protocol = None
@property
def server_hostname(self):
"""The server hostname to connect to.
Accepts DNS entries or IP addresses.
:rtype: string
"""
result = c_char_p(
self.lib.iperf_get_test_server_hostname(self._test)
).value
if result:
self._server_hostname = result.decode('utf-8')
else:
self._server_hostname = None
return self._server_hostname
@server_hostname.setter
def server_hostname(self, hostname):
self.lib.iperf_set_test_server_hostname(
self._test,
c_char_p(hostname.encode('utf-8'))
)
self._server_hostname = hostname
@property
def protocol(self):
"""The iperf3 instance protocol
valid protocols are 'tcp' and 'udp'
:rtype: str
"""
proto_id = self.lib.iperf_get_test_protocol_id(self._test)
if proto_id == SOCK_STREAM:
self._protocol = 'tcp'
elif proto_id == SOCK_DGRAM:
self._protocol = 'udp'
return self._protocol
@protocol.setter
def protocol(self, protocol):
if protocol == 'tcp':
self.lib.set_protocol(self._test, int(SOCK_STREAM))
elif protocol == 'udp':
self.lib.set_protocol(self._test, int(SOCK_DGRAM))
if self.blksize > MAX_UDP_BULKSIZE:
self.blksize = MAX_UDP_BULKSIZE
self._protocol = protocol
@property
def omit(self):
"""The test startup duration to omit in seconds."""
self._omit = self.lib.iperf_get_test_omit(self._test)
return self._omit
@omit.setter
def omit(self, omit):
self.lib.iperf_set_test_omit(self._test, omit)
self._omit = omit
@property
def duration(self):
"""The test duration in seconds."""
self._duration = self.lib.iperf_get_test_duration(self._test)
return self._duration
@duration.setter
def duration(self, duration):
self.lib.iperf_set_test_duration(self._test, duration)
self._duration = duration
@property
def bandwidth(self):
"""Target bandwidth in bits/sec"""
self._bandwidth = self.lib.iperf_get_test_rate(self._test)
return self._bandwidth
@bandwidth.setter
def bandwidth(self, bandwidth):
self.lib.iperf_set_test_rate(self._test, bandwidth)
self._bandwidth = bandwidth
@property
@blksize.setter
def blksize(self, bulksize):
# iperf version < 3.1.3 has some weird bugs when bulksize is
# larger than MAX_UDP_BULKSIZE
if self.protocol == 'udp' and bulksize > MAX_UDP_BULKSIZE:
bulksize = MAX_UDP_BULKSIZE
self.lib.iperf_set_test_blksize(self._test, bulksize)
self._blksize = bulksize
@property
def bulksize(self):
"""The test bulksize.
Deprecated argument, use blksize instead to ensure consistency
with iperf3 C libary
"""
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
return self.blksize
@bulksize.setter
def bulksize(self, bulksize):
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
self.blksize = bulksize
@property
def num_streams(self):
"""The number of streams to use."""
self._num_streams = self.lib.iperf_get_test_num_streams(self._test)
return self._num_streams
@num_streams.setter
def num_streams(self, number):
self.lib.iperf_set_test_num_streams(self._test, number)
self._num_streams = number
@property
def zerocopy(self):
"""Toggle zerocopy.
Use the sendfile() system call for "Zero Copy" mode. This uses much
less CPU. This is not supported on all systems.
**Note** there isn't a hook in the libiperf library for getting the
current configured value. Relying on zerocopy.setter function
:rtype: bool
"""
return self._zerocopy
@zerocopy.setter
def zerocopy(self, enabled):
if enabled and self.lib.iperf_has_zerocopy():
self.lib.iperf_set_test_zerocopy(self._test, 1)
self._zerocopy = True
else:
self.lib.iperf_set_test_zerocopy(self._test, 0)
self._zerocopy = False
@property
def reverse(self):
"""Toggles direction of test
:rtype: bool
"""
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse
@reverse.setter
def reverse(self, enabled):
if enabled:
self.lib.iperf_set_test_reverse(self._test, 1)
else:
self.lib.iperf_set_test_reverse(self._test, 0)
self._reverse = enabled
def run(self):
"""Run the current test client.
:rtype: instance of :class:`TestResult`
"""
if self.json_output:
output_to_pipe(self._pipe_in) # Disable stdout
error = self.lib.iperf_run_client(self._test)
if not self.iperf_version.startswith('iperf 3.1'):
data = read_pipe(self._pipe_out)
if data.startswith('Control connection'):
data = '{' + data.split('{', 1)[1]
else:
data = c_char_p(
self.lib.iperf_get_test_json_output_string(self._test)
).value
if data:
data = data.decode('utf-8')
output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout
if not data or error:
data = '{"error": "%s"}' % self._error_to_string(self._errno)
return TestResult(data)
|
thiezn/iperf3-python | iperf3/iperf3.py | Client.num_streams | python | def num_streams(self):
self._num_streams = self.lib.iperf_get_test_num_streams(self._test)
return self._num_streams | The number of streams to use. | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L552-L555 | null | class Client(IPerf3):
"""An iperf3 client connection.
This opens up a connection to a running iperf3 server
Basic Usage::
>>> import iperf3
>>> client = iperf3.Client()
>>> client.duration = 1
>>> client.server_hostname = '127.0.0.1'
>>> client.port = 5201
>>> client.run()
{'intervals': [{'sum': {...
"""
def __init__(self, *args, **kwargs):
"""Initialise the iperf shared library"""
super(Client, self).__init__(role='c', *args, **kwargs)
# Internal variables
self._blksize = None
self._server_hostname = None
self._port = None
self._num_streams = None
self._zerocopy = False
self._omit = None
self._duration = None
self._bandwidth = None
self._protocol = None
@property
def server_hostname(self):
"""The server hostname to connect to.
Accepts DNS entries or IP addresses.
:rtype: string
"""
result = c_char_p(
self.lib.iperf_get_test_server_hostname(self._test)
).value
if result:
self._server_hostname = result.decode('utf-8')
else:
self._server_hostname = None
return self._server_hostname
@server_hostname.setter
def server_hostname(self, hostname):
self.lib.iperf_set_test_server_hostname(
self._test,
c_char_p(hostname.encode('utf-8'))
)
self._server_hostname = hostname
@property
def protocol(self):
"""The iperf3 instance protocol
valid protocols are 'tcp' and 'udp'
:rtype: str
"""
proto_id = self.lib.iperf_get_test_protocol_id(self._test)
if proto_id == SOCK_STREAM:
self._protocol = 'tcp'
elif proto_id == SOCK_DGRAM:
self._protocol = 'udp'
return self._protocol
@protocol.setter
def protocol(self, protocol):
if protocol == 'tcp':
self.lib.set_protocol(self._test, int(SOCK_STREAM))
elif protocol == 'udp':
self.lib.set_protocol(self._test, int(SOCK_DGRAM))
if self.blksize > MAX_UDP_BULKSIZE:
self.blksize = MAX_UDP_BULKSIZE
self._protocol = protocol
@property
def omit(self):
"""The test startup duration to omit in seconds."""
self._omit = self.lib.iperf_get_test_omit(self._test)
return self._omit
@omit.setter
def omit(self, omit):
self.lib.iperf_set_test_omit(self._test, omit)
self._omit = omit
@property
def duration(self):
"""The test duration in seconds."""
self._duration = self.lib.iperf_get_test_duration(self._test)
return self._duration
@duration.setter
def duration(self, duration):
self.lib.iperf_set_test_duration(self._test, duration)
self._duration = duration
@property
def bandwidth(self):
"""Target bandwidth in bits/sec"""
self._bandwidth = self.lib.iperf_get_test_rate(self._test)
return self._bandwidth
@bandwidth.setter
def bandwidth(self, bandwidth):
self.lib.iperf_set_test_rate(self._test, bandwidth)
self._bandwidth = bandwidth
@property
def blksize(self):
"""The test blksize."""
self._blksize = self.lib.iperf_get_test_blksize(self._test)
return self._blksize
@blksize.setter
def blksize(self, bulksize):
# iperf version < 3.1.3 has some weird bugs when bulksize is
# larger than MAX_UDP_BULKSIZE
if self.protocol == 'udp' and bulksize > MAX_UDP_BULKSIZE:
bulksize = MAX_UDP_BULKSIZE
self.lib.iperf_set_test_blksize(self._test, bulksize)
self._blksize = bulksize
@property
def bulksize(self):
"""The test bulksize.
Deprecated argument, use blksize instead to ensure consistency
with iperf3 C libary
"""
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
return self.blksize
@bulksize.setter
def bulksize(self, bulksize):
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
self.blksize = bulksize
@property
@num_streams.setter
def num_streams(self, number):
self.lib.iperf_set_test_num_streams(self._test, number)
self._num_streams = number
@property
def zerocopy(self):
"""Toggle zerocopy.
Use the sendfile() system call for "Zero Copy" mode. This uses much
less CPU. This is not supported on all systems.
**Note** there isn't a hook in the libiperf library for getting the
current configured value. Relying on zerocopy.setter function
:rtype: bool
"""
return self._zerocopy
@zerocopy.setter
def zerocopy(self, enabled):
if enabled and self.lib.iperf_has_zerocopy():
self.lib.iperf_set_test_zerocopy(self._test, 1)
self._zerocopy = True
else:
self.lib.iperf_set_test_zerocopy(self._test, 0)
self._zerocopy = False
@property
def reverse(self):
"""Toggles direction of test
:rtype: bool
"""
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse
@reverse.setter
def reverse(self, enabled):
if enabled:
self.lib.iperf_set_test_reverse(self._test, 1)
else:
self.lib.iperf_set_test_reverse(self._test, 0)
self._reverse = enabled
def run(self):
"""Run the current test client.
:rtype: instance of :class:`TestResult`
"""
if self.json_output:
output_to_pipe(self._pipe_in) # Disable stdout
error = self.lib.iperf_run_client(self._test)
if not self.iperf_version.startswith('iperf 3.1'):
data = read_pipe(self._pipe_out)
if data.startswith('Control connection'):
data = '{' + data.split('{', 1)[1]
else:
data = c_char_p(
self.lib.iperf_get_test_json_output_string(self._test)
).value
if data:
data = data.decode('utf-8')
output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout
if not data or error:
data = '{"error": "%s"}' % self._error_to_string(self._errno)
return TestResult(data)
|
thiezn/iperf3-python | iperf3/iperf3.py | Client.reverse | python | def reverse(self):
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse | Toggles direction of test
:rtype: bool | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L586-L598 | null | class Client(IPerf3):
"""An iperf3 client connection.
This opens up a connection to a running iperf3 server
Basic Usage::
>>> import iperf3
>>> client = iperf3.Client()
>>> client.duration = 1
>>> client.server_hostname = '127.0.0.1'
>>> client.port = 5201
>>> client.run()
{'intervals': [{'sum': {...
"""
def __init__(self, *args, **kwargs):
"""Initialise the iperf shared library"""
super(Client, self).__init__(role='c', *args, **kwargs)
# Internal variables
self._blksize = None
self._server_hostname = None
self._port = None
self._num_streams = None
self._zerocopy = False
self._omit = None
self._duration = None
self._bandwidth = None
self._protocol = None
@property
def server_hostname(self):
"""The server hostname to connect to.
Accepts DNS entries or IP addresses.
:rtype: string
"""
result = c_char_p(
self.lib.iperf_get_test_server_hostname(self._test)
).value
if result:
self._server_hostname = result.decode('utf-8')
else:
self._server_hostname = None
return self._server_hostname
@server_hostname.setter
def server_hostname(self, hostname):
self.lib.iperf_set_test_server_hostname(
self._test,
c_char_p(hostname.encode('utf-8'))
)
self._server_hostname = hostname
@property
def protocol(self):
"""The iperf3 instance protocol
valid protocols are 'tcp' and 'udp'
:rtype: str
"""
proto_id = self.lib.iperf_get_test_protocol_id(self._test)
if proto_id == SOCK_STREAM:
self._protocol = 'tcp'
elif proto_id == SOCK_DGRAM:
self._protocol = 'udp'
return self._protocol
@protocol.setter
def protocol(self, protocol):
if protocol == 'tcp':
self.lib.set_protocol(self._test, int(SOCK_STREAM))
elif protocol == 'udp':
self.lib.set_protocol(self._test, int(SOCK_DGRAM))
if self.blksize > MAX_UDP_BULKSIZE:
self.blksize = MAX_UDP_BULKSIZE
self._protocol = protocol
@property
def omit(self):
"""The test startup duration to omit in seconds."""
self._omit = self.lib.iperf_get_test_omit(self._test)
return self._omit
@omit.setter
def omit(self, omit):
self.lib.iperf_set_test_omit(self._test, omit)
self._omit = omit
@property
def duration(self):
"""The test duration in seconds."""
self._duration = self.lib.iperf_get_test_duration(self._test)
return self._duration
@duration.setter
def duration(self, duration):
self.lib.iperf_set_test_duration(self._test, duration)
self._duration = duration
@property
def bandwidth(self):
"""Target bandwidth in bits/sec"""
self._bandwidth = self.lib.iperf_get_test_rate(self._test)
return self._bandwidth
@bandwidth.setter
def bandwidth(self, bandwidth):
self.lib.iperf_set_test_rate(self._test, bandwidth)
self._bandwidth = bandwidth
@property
def blksize(self):
"""The test blksize."""
self._blksize = self.lib.iperf_get_test_blksize(self._test)
return self._blksize
@blksize.setter
def blksize(self, bulksize):
# iperf version < 3.1.3 has some weird bugs when bulksize is
# larger than MAX_UDP_BULKSIZE
if self.protocol == 'udp' and bulksize > MAX_UDP_BULKSIZE:
bulksize = MAX_UDP_BULKSIZE
self.lib.iperf_set_test_blksize(self._test, bulksize)
self._blksize = bulksize
@property
def bulksize(self):
"""The test bulksize.
Deprecated argument, use blksize instead to ensure consistency
with iperf3 C libary
"""
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
return self.blksize
@bulksize.setter
def bulksize(self, bulksize):
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
self.blksize = bulksize
@property
def num_streams(self):
"""The number of streams to use."""
self._num_streams = self.lib.iperf_get_test_num_streams(self._test)
return self._num_streams
@num_streams.setter
def num_streams(self, number):
self.lib.iperf_set_test_num_streams(self._test, number)
self._num_streams = number
@property
def zerocopy(self):
"""Toggle zerocopy.
Use the sendfile() system call for "Zero Copy" mode. This uses much
less CPU. This is not supported on all systems.
**Note** there isn't a hook in the libiperf library for getting the
current configured value. Relying on zerocopy.setter function
:rtype: bool
"""
return self._zerocopy
@zerocopy.setter
def zerocopy(self, enabled):
if enabled and self.lib.iperf_has_zerocopy():
self.lib.iperf_set_test_zerocopy(self._test, 1)
self._zerocopy = True
else:
self.lib.iperf_set_test_zerocopy(self._test, 0)
self._zerocopy = False
@property
@reverse.setter
def reverse(self, enabled):
if enabled:
self.lib.iperf_set_test_reverse(self._test, 1)
else:
self.lib.iperf_set_test_reverse(self._test, 0)
self._reverse = enabled
def run(self):
"""Run the current test client.
:rtype: instance of :class:`TestResult`
"""
if self.json_output:
output_to_pipe(self._pipe_in) # Disable stdout
error = self.lib.iperf_run_client(self._test)
if not self.iperf_version.startswith('iperf 3.1'):
data = read_pipe(self._pipe_out)
if data.startswith('Control connection'):
data = '{' + data.split('{', 1)[1]
else:
data = c_char_p(
self.lib.iperf_get_test_json_output_string(self._test)
).value
if data:
data = data.decode('utf-8')
output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout
if not data or error:
data = '{"error": "%s"}' % self._error_to_string(self._errno)
return TestResult(data)
|
thiezn/iperf3-python | iperf3/iperf3.py | Client.run | python | def run(self):
if self.json_output:
output_to_pipe(self._pipe_in) # Disable stdout
error = self.lib.iperf_run_client(self._test)
if not self.iperf_version.startswith('iperf 3.1'):
data = read_pipe(self._pipe_out)
if data.startswith('Control connection'):
data = '{' + data.split('{', 1)[1]
else:
data = c_char_p(
self.lib.iperf_get_test_json_output_string(self._test)
).value
if data:
data = data.decode('utf-8')
output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout
if not data or error:
data = '{"error": "%s"}' % self._error_to_string(self._errno)
return TestResult(data) | Run the current test client.
:rtype: instance of :class:`TestResult` | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L609-L634 | [
"def read_pipe(pipe_out):\n \"\"\"Read data on a pipe\n\n Used to capture stdout data produced by libiperf\n\n :param pipe_out: The os pipe_out\n :rtype: unicode string\n \"\"\"\n out = b''\n while more_data(pipe_out):\n out += os.read(pipe_out, 1024)\n\n return out.decode('utf-8')\n",
"def output_to_pipe(pipe_in):\n \"\"\"Redirects stdout and stderr to a pipe\n\n :param pipe_out: The pipe to redirect stdout and stderr to\n \"\"\"\n os.dup2(pipe_in, 1) # stdout\n",
"def output_to_screen(stdout_fd, stderr_fd):\n \"\"\"Redirects stdout and stderr to a pipe\n\n :param stdout_fd: The stdout file descriptor\n :param stderr_fd: The stderr file descriptor\n \"\"\"\n os.dup2(stdout_fd, 1)\n",
"def _error_to_string(self, error_id):\n \"\"\"Returns an error string from libiperf\n\n :param error_id: The error_id produced by libiperf\n :rtype: string\n \"\"\"\n strerror = self.lib.iperf_strerror\n strerror.restype = c_char_p\n return strerror(error_id).decode('utf-8')\n"
] | class Client(IPerf3):
"""An iperf3 client connection.
This opens up a connection to a running iperf3 server
Basic Usage::
>>> import iperf3
>>> client = iperf3.Client()
>>> client.duration = 1
>>> client.server_hostname = '127.0.0.1'
>>> client.port = 5201
>>> client.run()
{'intervals': [{'sum': {...
"""
def __init__(self, *args, **kwargs):
"""Initialise the iperf shared library"""
super(Client, self).__init__(role='c', *args, **kwargs)
# Internal variables
self._blksize = None
self._server_hostname = None
self._port = None
self._num_streams = None
self._zerocopy = False
self._omit = None
self._duration = None
self._bandwidth = None
self._protocol = None
@property
def server_hostname(self):
"""The server hostname to connect to.
Accepts DNS entries or IP addresses.
:rtype: string
"""
result = c_char_p(
self.lib.iperf_get_test_server_hostname(self._test)
).value
if result:
self._server_hostname = result.decode('utf-8')
else:
self._server_hostname = None
return self._server_hostname
@server_hostname.setter
def server_hostname(self, hostname):
self.lib.iperf_set_test_server_hostname(
self._test,
c_char_p(hostname.encode('utf-8'))
)
self._server_hostname = hostname
@property
def protocol(self):
"""The iperf3 instance protocol
valid protocols are 'tcp' and 'udp'
:rtype: str
"""
proto_id = self.lib.iperf_get_test_protocol_id(self._test)
if proto_id == SOCK_STREAM:
self._protocol = 'tcp'
elif proto_id == SOCK_DGRAM:
self._protocol = 'udp'
return self._protocol
@protocol.setter
def protocol(self, protocol):
if protocol == 'tcp':
self.lib.set_protocol(self._test, int(SOCK_STREAM))
elif protocol == 'udp':
self.lib.set_protocol(self._test, int(SOCK_DGRAM))
if self.blksize > MAX_UDP_BULKSIZE:
self.blksize = MAX_UDP_BULKSIZE
self._protocol = protocol
@property
def omit(self):
"""The test startup duration to omit in seconds."""
self._omit = self.lib.iperf_get_test_omit(self._test)
return self._omit
@omit.setter
def omit(self, omit):
self.lib.iperf_set_test_omit(self._test, omit)
self._omit = omit
@property
def duration(self):
"""The test duration in seconds."""
self._duration = self.lib.iperf_get_test_duration(self._test)
return self._duration
@duration.setter
def duration(self, duration):
self.lib.iperf_set_test_duration(self._test, duration)
self._duration = duration
@property
def bandwidth(self):
"""Target bandwidth in bits/sec"""
self._bandwidth = self.lib.iperf_get_test_rate(self._test)
return self._bandwidth
@bandwidth.setter
def bandwidth(self, bandwidth):
self.lib.iperf_set_test_rate(self._test, bandwidth)
self._bandwidth = bandwidth
@property
def blksize(self):
"""The test blksize."""
self._blksize = self.lib.iperf_get_test_blksize(self._test)
return self._blksize
@blksize.setter
def blksize(self, bulksize):
# iperf version < 3.1.3 has some weird bugs when bulksize is
# larger than MAX_UDP_BULKSIZE
if self.protocol == 'udp' and bulksize > MAX_UDP_BULKSIZE:
bulksize = MAX_UDP_BULKSIZE
self.lib.iperf_set_test_blksize(self._test, bulksize)
self._blksize = bulksize
@property
def bulksize(self):
"""The test bulksize.
Deprecated argument, use blksize instead to ensure consistency
with iperf3 C libary
"""
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
return self.blksize
@bulksize.setter
def bulksize(self, bulksize):
# Keeping bulksize argument for backwards compatibility with
# iperf3-python < 0.1.7
self.blksize = bulksize
@property
def num_streams(self):
"""The number of streams to use."""
self._num_streams = self.lib.iperf_get_test_num_streams(self._test)
return self._num_streams
@num_streams.setter
def num_streams(self, number):
self.lib.iperf_set_test_num_streams(self._test, number)
self._num_streams = number
@property
def zerocopy(self):
"""Toggle zerocopy.
Use the sendfile() system call for "Zero Copy" mode. This uses much
less CPU. This is not supported on all systems.
**Note** there isn't a hook in the libiperf library for getting the
current configured value. Relying on zerocopy.setter function
:rtype: bool
"""
return self._zerocopy
@zerocopy.setter
def zerocopy(self, enabled):
if enabled and self.lib.iperf_has_zerocopy():
self.lib.iperf_set_test_zerocopy(self._test, 1)
self._zerocopy = True
else:
self.lib.iperf_set_test_zerocopy(self._test, 0)
self._zerocopy = False
@property
def reverse(self):
"""Toggles direction of test
:rtype: bool
"""
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse
@reverse.setter
def reverse(self, enabled):
if enabled:
self.lib.iperf_set_test_reverse(self._test, 1)
else:
self.lib.iperf_set_test_reverse(self._test, 0)
self._reverse = enabled
|
thiezn/iperf3-python | iperf3/iperf3.py | Server.run | python | def run(self):
def _run_in_thread(self, data_queue):
"""Runs the iperf_run_server
:param data_queue: thread-safe queue
"""
output_to_pipe(self._pipe_in) # disable stdout
error = self.lib.iperf_run_server(self._test)
output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout
# TODO json_output_string not available on earlier iperf3 builds
# have to build in a version check using self.iperf_version
# The following line should work on later versions:
# data = c_char_p(
# self.lib.iperf_get_test_json_output_string(self._test)
# ).value
data = read_pipe(self._pipe_out)
if not data or error:
data = '{"error": "%s"}' % self._error_to_string(self._errno)
self.lib.iperf_reset_test(self._test)
data_queue.put(data)
if self.json_output:
data_queue = Queue()
t = threading.Thread(
target=_run_in_thread, args=[self, data_queue]
)
t.daemon = True
t.start()
while t.is_alive():
t.join(.1)
return TestResult(data_queue.get())
else:
# setting json_output to False will output test to screen only
self.lib.iperf_run_server(self._test)
self.lib.iperf_reset_test(self._test)
return None | Run the iperf3 server instance.
:rtype: instance of :class:`TestResult` | train | https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L660-L707 | null | class Server(IPerf3):
"""An iperf3 server connection.
This starts an iperf3 server session. The server terminates after each
succesful client connection so it might be useful to run Server.run()
in a loop.
The C function iperf_run_server is called in a seperate thread to make
sure KeyboardInterrupt(aka ctrl+c) can still be captured
Basic Usage::
>>> import iperf3
>>> server = iperf3.Server()
>>> server.run()
{'start': {...
"""
def __init__(self, *args, **kwargs):
"""Initialise the iperf3 server instance"""
super(Server, self).__init__(role='s', *args, **kwargs)
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | get_or_create_placeholder | python | def get_or_create_placeholder(page, placeholder_slot, delete_existing=False):
placeholder, created = page.placeholders.get_or_create(
slot=placeholder_slot)
if created:
log.debug("Create placeholder %r for page %r", placeholder_slot,
page.get_title())
else:
log.debug("Use existing placeholder %r for page %r", placeholder_slot,
page.get_title())
if delete_existing:
queryset = CMSPlugin.objects.all().filter(placeholder=placeholder)
log.info("Delete %i CMSPlugins on placeholder %s...", queryset.count(),
placeholder)
queryset.delete()
return placeholder, created | Get or create a placeholder on the given page.
Optional: Delete existing placeholder. | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L24-L44 | null | import logging
import pytest
from django.utils import translation
from cms.api import add_plugin, create_page, create_title
from cms.constants import TEMPLATE_INHERITANCE_MAGIC
from cms.models import CMSPlugin, Page, Title, settings
from cms.utils import apphook_reload
# https://github.com/jedie/django-tools
from django_tools.fixture_tools.languages import iter_languages
try:
# https://pypi.org/project/python-slugify/
from slugify import slugify
except ImportError:
from django.template.defaultfilters import slugify
log = logging.getLogger(__name__)
def publish_page(page, languages):
"""
Publish a CMS page in all given languages.
"""
for language_code, lang_name in iter_languages(languages):
url = page.get_absolute_url()
if page.publisher_is_draft:
page.publish(language_code)
log.info('page "%s" published in %s: %s', page, lang_name, url)
else:
log.info('published page "%s" already exists in %s: %s', page,
lang_name, url)
return page.reload()
class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_slug(self, language_code, lang_name):
"""
Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page()
"""
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_home_page(self):
"""
Return the published home page.
Used for 'parent' in cms.api.create_page()
"""
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages)
def create_page(self, **extra_kwargs):
"""
Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
}
"""
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created
def create_title(self, page):
"""
Create page title in all other languages with cms.api.create_title()
"""
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title)
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
}
def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save()
def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created
def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder)
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
@pytest.fixture(scope="class")
def create_cms_index_pages(placeholder_slot="content"):
"""
create cms home page and fill >content< placeholder with TextPlugin
"""
try:
index_page = Page.objects.get(is_home=True, publisher_is_draft=False)
except Page.DoesNotExist:
log.debug('Create index page in "en" and...')
index_page = create_page(
title="index in English",
template=TEMPLATE_INHERITANCE_MAGIC,
language=settings.LANGUAGE_CODE,
published=False,
in_navigation=True)
placeholder, created = index_page.placeholders.get_or_create(
slot=placeholder_slot)
for language_code, lang_name in settings.LANGUAGES:
with translation.override(language_code):
title = 'index in %s' % lang_name
log.info('create %r', title)
if language_code != settings.LANGUAGE_CODE:
create_title(language_code, title, index_page)
add_plugin(
placeholder=placeholder,
plugin_type='TextPlugin', # djangocms_text_ckeditor
language=language_code,
body='index page in %s' % lang_name)
index_page.publish(language_code)
created = True
else:
created = False
log.debug('Index page already exists.')
return index_page, created
class CmsPluginPageCreator(CmsPageCreator):
"""
Create a Django CMS plugin page and fill the content.
Useable for default production fixtures or unittests fixtures.
The idea is to inherit from this class and update it for your need by
overwrite some methods ;)
"""
placeholder_slots = () # Fill no placeholders
def __init__(self, apphook, apphook_namespace, *args, **kwargs):
self.apphook = apphook
self.apphook_namespace = apphook_namespace
super(CmsPluginPageCreator, self).__init__(*args, **kwargs)
def get_title(self, language_code, lang_name):
"""
Contruct the page title. Called from self.create_title()
"""
return '%s in %s' % (self.apphook, lang_name)
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return self.get_home_page()
def create(self):
"""
Create the plugin page in all languages and fill dummy content.
"""
plugin = CMSPlugin.objects.filter(plugin_type=self.apphook)
if plugin.exists():
log.debug('Plugin page for "%s" plugin already exist, ok.',
self.apphook)
raise plugin
page, created = super(CmsPluginPageCreator, self).create()
if created:
# Add a plugin with content in all languages to the created page.
# But only on new created page
for placeholder_slot in self.placeholder_slots:
self.fill_content(page, placeholder_slot)
return page, created
def create_cms_plugin_page(apphook, apphook_namespace, placeholder_slot=None):
"""
Create cms plugin page in all existing languages.
Add a link to the index page.
:param apphook: e.g...........: 'FooBarApp'
:param apphook_namespace: e.g.: 'foobar'
:return:
"""
creator = CmsPluginPageCreator(
apphook=apphook,
apphook_namespace=apphook_namespace,
)
creator.placeholder_slot = placeholder_slot
plugin_page = creator.create()
return plugin_page
class DummyPageGenerator(CmsPageCreator):
def __init__(self,
delete_first=False,
title_prefix=None,
levels=3,
count=2):
if title_prefix is None:
self.title_prefix = self.__class__.__name__
else:
self.title_prefix = title_prefix
self.levels = levels
self.current_level = 1
self.count = count
self.current_count = 1
self.page_data = {}
super(DummyPageGenerator, self).__init__(delete_first=delete_first)
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
title = "%s %i-%i in %s" % (self.title_prefix, self.current_count,
self.current_level, language_code)
log.info(title)
return title
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
if self.current_level == 1:
# 'root' page
return None
else:
return self.page_data[(self.current_level - 1, self.current_count)]
def create(self):
for count in range(1, self.count + 1):
self.current_count = count
for level in range(1, self.levels + 1):
self.current_level = level
log.info("Level: %i current count: %i" % (self.current_level,
self.current_count))
page, created = super().create(
) # Create page (and page title) in default language
self.page_data[(self.current_level, self.current_count)] = page
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return self.page_data
def create_dummy_pages(delete_first, title_prefix, levels, count):
page_data = DummyPageGenerator(
delete_first=delete_first,
title_prefix=title_prefix,
levels=levels,
count=count).create()
return page_data
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | publish_page | python | def publish_page(page, languages):
for language_code, lang_name in iter_languages(languages):
url = page.get_absolute_url()
if page.publisher_is_draft:
page.publish(language_code)
log.info('page "%s" published in %s: %s', page, lang_name, url)
else:
log.info('published page "%s" already exists in %s: %s', page,
lang_name, url)
return page.reload() | Publish a CMS page in all given languages. | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L47-L60 | null | import logging
import pytest
from django.utils import translation
from cms.api import add_plugin, create_page, create_title
from cms.constants import TEMPLATE_INHERITANCE_MAGIC
from cms.models import CMSPlugin, Page, Title, settings
from cms.utils import apphook_reload
# https://github.com/jedie/django-tools
from django_tools.fixture_tools.languages import iter_languages
try:
# https://pypi.org/project/python-slugify/
from slugify import slugify
except ImportError:
from django.template.defaultfilters import slugify
log = logging.getLogger(__name__)
def get_or_create_placeholder(page, placeholder_slot, delete_existing=False):
"""
Get or create a placeholder on the given page.
Optional: Delete existing placeholder.
"""
placeholder, created = page.placeholders.get_or_create(
slot=placeholder_slot)
if created:
log.debug("Create placeholder %r for page %r", placeholder_slot,
page.get_title())
else:
log.debug("Use existing placeholder %r for page %r", placeholder_slot,
page.get_title())
if delete_existing:
queryset = CMSPlugin.objects.all().filter(placeholder=placeholder)
log.info("Delete %i CMSPlugins on placeholder %s...", queryset.count(),
placeholder)
queryset.delete()
return placeholder, created
class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_slug(self, language_code, lang_name):
"""
Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page()
"""
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_home_page(self):
"""
Return the published home page.
Used for 'parent' in cms.api.create_page()
"""
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages)
def create_page(self, **extra_kwargs):
"""
Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
}
"""
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created
def create_title(self, page):
"""
Create page title in all other languages with cms.api.create_title()
"""
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title)
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
}
def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save()
def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created
def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder)
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
@pytest.fixture(scope="class")
def create_cms_index_pages(placeholder_slot="content"):
"""
create cms home page and fill >content< placeholder with TextPlugin
"""
try:
index_page = Page.objects.get(is_home=True, publisher_is_draft=False)
except Page.DoesNotExist:
log.debug('Create index page in "en" and...')
index_page = create_page(
title="index in English",
template=TEMPLATE_INHERITANCE_MAGIC,
language=settings.LANGUAGE_CODE,
published=False,
in_navigation=True)
placeholder, created = index_page.placeholders.get_or_create(
slot=placeholder_slot)
for language_code, lang_name in settings.LANGUAGES:
with translation.override(language_code):
title = 'index in %s' % lang_name
log.info('create %r', title)
if language_code != settings.LANGUAGE_CODE:
create_title(language_code, title, index_page)
add_plugin(
placeholder=placeholder,
plugin_type='TextPlugin', # djangocms_text_ckeditor
language=language_code,
body='index page in %s' % lang_name)
index_page.publish(language_code)
created = True
else:
created = False
log.debug('Index page already exists.')
return index_page, created
class CmsPluginPageCreator(CmsPageCreator):
"""
Create a Django CMS plugin page and fill the content.
Useable for default production fixtures or unittests fixtures.
The idea is to inherit from this class and update it for your need by
overwrite some methods ;)
"""
placeholder_slots = () # Fill no placeholders
def __init__(self, apphook, apphook_namespace, *args, **kwargs):
self.apphook = apphook
self.apphook_namespace = apphook_namespace
super(CmsPluginPageCreator, self).__init__(*args, **kwargs)
def get_title(self, language_code, lang_name):
"""
Contruct the page title. Called from self.create_title()
"""
return '%s in %s' % (self.apphook, lang_name)
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return self.get_home_page()
def create(self):
"""
Create the plugin page in all languages and fill dummy content.
"""
plugin = CMSPlugin.objects.filter(plugin_type=self.apphook)
if plugin.exists():
log.debug('Plugin page for "%s" plugin already exist, ok.',
self.apphook)
raise plugin
page, created = super(CmsPluginPageCreator, self).create()
if created:
# Add a plugin with content in all languages to the created page.
# But only on new created page
for placeholder_slot in self.placeholder_slots:
self.fill_content(page, placeholder_slot)
return page, created
def create_cms_plugin_page(apphook, apphook_namespace, placeholder_slot=None):
"""
Create cms plugin page in all existing languages.
Add a link to the index page.
:param apphook: e.g...........: 'FooBarApp'
:param apphook_namespace: e.g.: 'foobar'
:return:
"""
creator = CmsPluginPageCreator(
apphook=apphook,
apphook_namespace=apphook_namespace,
)
creator.placeholder_slot = placeholder_slot
plugin_page = creator.create()
return plugin_page
class DummyPageGenerator(CmsPageCreator):
def __init__(self,
delete_first=False,
title_prefix=None,
levels=3,
count=2):
if title_prefix is None:
self.title_prefix = self.__class__.__name__
else:
self.title_prefix = title_prefix
self.levels = levels
self.current_level = 1
self.count = count
self.current_count = 1
self.page_data = {}
super(DummyPageGenerator, self).__init__(delete_first=delete_first)
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
title = "%s %i-%i in %s" % (self.title_prefix, self.current_count,
self.current_level, language_code)
log.info(title)
return title
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
if self.current_level == 1:
# 'root' page
return None
else:
return self.page_data[(self.current_level - 1, self.current_count)]
def create(self):
for count in range(1, self.count + 1):
self.current_count = count
for level in range(1, self.levels + 1):
self.current_level = level
log.info("Level: %i current count: %i" % (self.current_level,
self.current_count))
page, created = super().create(
) # Create page (and page title) in default language
self.page_data[(self.current_level, self.current_count)] = page
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return self.page_data
def create_dummy_pages(delete_first, title_prefix, levels, count):
page_data = DummyPageGenerator(
delete_first=delete_first,
title_prefix=title_prefix,
levels=levels,
count=count).create()
return page_data
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | create_cms_index_pages | python | def create_cms_index_pages(placeholder_slot="content"):
try:
index_page = Page.objects.get(is_home=True, publisher_is_draft=False)
except Page.DoesNotExist:
log.debug('Create index page in "en" and...')
index_page = create_page(
title="index in English",
template=TEMPLATE_INHERITANCE_MAGIC,
language=settings.LANGUAGE_CODE,
published=False,
in_navigation=True)
placeholder, created = index_page.placeholders.get_or_create(
slot=placeholder_slot)
for language_code, lang_name in settings.LANGUAGES:
with translation.override(language_code):
title = 'index in %s' % lang_name
log.info('create %r', title)
if language_code != settings.LANGUAGE_CODE:
create_title(language_code, title, index_page)
add_plugin(
placeholder=placeholder,
plugin_type='TextPlugin', # djangocms_text_ckeditor
language=language_code,
body='index page in %s' % lang_name)
index_page.publish(language_code)
created = True
else:
created = False
log.debug('Index page already exists.')
return index_page, created | create cms home page and fill >content< placeholder with TextPlugin | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L372-L406 | null | import logging
import pytest
from django.utils import translation
from cms.api import add_plugin, create_page, create_title
from cms.constants import TEMPLATE_INHERITANCE_MAGIC
from cms.models import CMSPlugin, Page, Title, settings
from cms.utils import apphook_reload
# https://github.com/jedie/django-tools
from django_tools.fixture_tools.languages import iter_languages
try:
# https://pypi.org/project/python-slugify/
from slugify import slugify
except ImportError:
from django.template.defaultfilters import slugify
log = logging.getLogger(__name__)
def get_or_create_placeholder(page, placeholder_slot, delete_existing=False):
"""
Get or create a placeholder on the given page.
Optional: Delete existing placeholder.
"""
placeholder, created = page.placeholders.get_or_create(
slot=placeholder_slot)
if created:
log.debug("Create placeholder %r for page %r", placeholder_slot,
page.get_title())
else:
log.debug("Use existing placeholder %r for page %r", placeholder_slot,
page.get_title())
if delete_existing:
queryset = CMSPlugin.objects.all().filter(placeholder=placeholder)
log.info("Delete %i CMSPlugins on placeholder %s...", queryset.count(),
placeholder)
queryset.delete()
return placeholder, created
def publish_page(page, languages):
"""
Publish a CMS page in all given languages.
"""
for language_code, lang_name in iter_languages(languages):
url = page.get_absolute_url()
if page.publisher_is_draft:
page.publish(language_code)
log.info('page "%s" published in %s: %s', page, lang_name, url)
else:
log.info('published page "%s" already exists in %s: %s', page,
lang_name, url)
return page.reload()
class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_slug(self, language_code, lang_name):
"""
Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page()
"""
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_home_page(self):
"""
Return the published home page.
Used for 'parent' in cms.api.create_page()
"""
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages)
def create_page(self, **extra_kwargs):
"""
Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
}
"""
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created
def create_title(self, page):
"""
Create page title in all other languages with cms.api.create_title()
"""
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title)
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
}
def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save()
def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created
def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder)
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
@pytest.fixture(scope="class")
class CmsPluginPageCreator(CmsPageCreator):
"""
Create a Django CMS plugin page and fill the content.
Useable for default production fixtures or unittests fixtures.
The idea is to inherit from this class and update it for your need by
overwrite some methods ;)
"""
placeholder_slots = () # Fill no placeholders
def __init__(self, apphook, apphook_namespace, *args, **kwargs):
self.apphook = apphook
self.apphook_namespace = apphook_namespace
super(CmsPluginPageCreator, self).__init__(*args, **kwargs)
def get_title(self, language_code, lang_name):
"""
Contruct the page title. Called from self.create_title()
"""
return '%s in %s' % (self.apphook, lang_name)
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return self.get_home_page()
def create(self):
"""
Create the plugin page in all languages and fill dummy content.
"""
plugin = CMSPlugin.objects.filter(plugin_type=self.apphook)
if plugin.exists():
log.debug('Plugin page for "%s" plugin already exist, ok.',
self.apphook)
raise plugin
page, created = super(CmsPluginPageCreator, self).create()
if created:
# Add a plugin with content in all languages to the created page.
# But only on new created page
for placeholder_slot in self.placeholder_slots:
self.fill_content(page, placeholder_slot)
return page, created
def create_cms_plugin_page(apphook, apphook_namespace, placeholder_slot=None):
"""
Create cms plugin page in all existing languages.
Add a link to the index page.
:param apphook: e.g...........: 'FooBarApp'
:param apphook_namespace: e.g.: 'foobar'
:return:
"""
creator = CmsPluginPageCreator(
apphook=apphook,
apphook_namespace=apphook_namespace,
)
creator.placeholder_slot = placeholder_slot
plugin_page = creator.create()
return plugin_page
class DummyPageGenerator(CmsPageCreator):
def __init__(self,
delete_first=False,
title_prefix=None,
levels=3,
count=2):
if title_prefix is None:
self.title_prefix = self.__class__.__name__
else:
self.title_prefix = title_prefix
self.levels = levels
self.current_level = 1
self.count = count
self.current_count = 1
self.page_data = {}
super(DummyPageGenerator, self).__init__(delete_first=delete_first)
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
title = "%s %i-%i in %s" % (self.title_prefix, self.current_count,
self.current_level, language_code)
log.info(title)
return title
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
if self.current_level == 1:
# 'root' page
return None
else:
return self.page_data[(self.current_level - 1, self.current_count)]
def create(self):
for count in range(1, self.count + 1):
self.current_count = count
for level in range(1, self.levels + 1):
self.current_level = level
log.info("Level: %i current count: %i" % (self.current_level,
self.current_count))
page, created = super().create(
) # Create page (and page title) in default language
self.page_data[(self.current_level, self.current_count)] = page
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return self.page_data
def create_dummy_pages(delete_first, title_prefix, levels, count):
page_data = DummyPageGenerator(
delete_first=delete_first,
title_prefix=title_prefix,
levels=levels,
count=count).create()
return page_data
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | create_cms_plugin_page | python | def create_cms_plugin_page(apphook, apphook_namespace, placeholder_slot=None):
creator = CmsPluginPageCreator(
apphook=apphook,
apphook_namespace=apphook_namespace,
)
creator.placeholder_slot = placeholder_slot
plugin_page = creator.create()
return plugin_page | Create cms plugin page in all existing languages.
Add a link to the index page.
:param apphook: e.g...........: 'FooBarApp'
:param apphook_namespace: e.g.: 'foobar'
:return: | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L458-L473 | [
"def create(self):\n \"\"\"\n Create the plugin page in all languages and fill dummy content.\n \"\"\"\n plugin = CMSPlugin.objects.filter(plugin_type=self.apphook)\n if plugin.exists():\n log.debug('Plugin page for \"%s\" plugin already exist, ok.',\n self.apphook)\n raise plugin\n\n page, created = super(CmsPluginPageCreator, self).create()\n\n if created:\n # Add a plugin with content in all languages to the created page.\n # But only on new created page\n for placeholder_slot in self.placeholder_slots:\n self.fill_content(page, placeholder_slot)\n\n return page, created\n"
] | import logging
import pytest
from django.utils import translation
from cms.api import add_plugin, create_page, create_title
from cms.constants import TEMPLATE_INHERITANCE_MAGIC
from cms.models import CMSPlugin, Page, Title, settings
from cms.utils import apphook_reload
# https://github.com/jedie/django-tools
from django_tools.fixture_tools.languages import iter_languages
try:
# https://pypi.org/project/python-slugify/
from slugify import slugify
except ImportError:
from django.template.defaultfilters import slugify
log = logging.getLogger(__name__)
def get_or_create_placeholder(page, placeholder_slot, delete_existing=False):
"""
Get or create a placeholder on the given page.
Optional: Delete existing placeholder.
"""
placeholder, created = page.placeholders.get_or_create(
slot=placeholder_slot)
if created:
log.debug("Create placeholder %r for page %r", placeholder_slot,
page.get_title())
else:
log.debug("Use existing placeholder %r for page %r", placeholder_slot,
page.get_title())
if delete_existing:
queryset = CMSPlugin.objects.all().filter(placeholder=placeholder)
log.info("Delete %i CMSPlugins on placeholder %s...", queryset.count(),
placeholder)
queryset.delete()
return placeholder, created
def publish_page(page, languages):
"""
Publish a CMS page in all given languages.
"""
for language_code, lang_name in iter_languages(languages):
url = page.get_absolute_url()
if page.publisher_is_draft:
page.publish(language_code)
log.info('page "%s" published in %s: %s', page, lang_name, url)
else:
log.info('published page "%s" already exists in %s: %s', page,
lang_name, url)
return page.reload()
class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_slug(self, language_code, lang_name):
"""
Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page()
"""
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_home_page(self):
"""
Return the published home page.
Used for 'parent' in cms.api.create_page()
"""
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages)
def create_page(self, **extra_kwargs):
"""
Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
}
"""
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created
def create_title(self, page):
"""
Create page title in all other languages with cms.api.create_title()
"""
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title)
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
}
def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save()
def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created
def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder)
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
@pytest.fixture(scope="class")
def create_cms_index_pages(placeholder_slot="content"):
"""
create cms home page and fill >content< placeholder with TextPlugin
"""
try:
index_page = Page.objects.get(is_home=True, publisher_is_draft=False)
except Page.DoesNotExist:
log.debug('Create index page in "en" and...')
index_page = create_page(
title="index in English",
template=TEMPLATE_INHERITANCE_MAGIC,
language=settings.LANGUAGE_CODE,
published=False,
in_navigation=True)
placeholder, created = index_page.placeholders.get_or_create(
slot=placeholder_slot)
for language_code, lang_name in settings.LANGUAGES:
with translation.override(language_code):
title = 'index in %s' % lang_name
log.info('create %r', title)
if language_code != settings.LANGUAGE_CODE:
create_title(language_code, title, index_page)
add_plugin(
placeholder=placeholder,
plugin_type='TextPlugin', # djangocms_text_ckeditor
language=language_code,
body='index page in %s' % lang_name)
index_page.publish(language_code)
created = True
else:
created = False
log.debug('Index page already exists.')
return index_page, created
class CmsPluginPageCreator(CmsPageCreator):
"""
Create a Django CMS plugin page and fill the content.
Useable for default production fixtures or unittests fixtures.
The idea is to inherit from this class and update it for your need by
overwrite some methods ;)
"""
placeholder_slots = () # Fill no placeholders
def __init__(self, apphook, apphook_namespace, *args, **kwargs):
self.apphook = apphook
self.apphook_namespace = apphook_namespace
super(CmsPluginPageCreator, self).__init__(*args, **kwargs)
def get_title(self, language_code, lang_name):
"""
Contruct the page title. Called from self.create_title()
"""
return '%s in %s' % (self.apphook, lang_name)
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return self.get_home_page()
def create(self):
"""
Create the plugin page in all languages and fill dummy content.
"""
plugin = CMSPlugin.objects.filter(plugin_type=self.apphook)
if plugin.exists():
log.debug('Plugin page for "%s" plugin already exist, ok.',
self.apphook)
raise plugin
page, created = super(CmsPluginPageCreator, self).create()
if created:
# Add a plugin with content in all languages to the created page.
# But only on new created page
for placeholder_slot in self.placeholder_slots:
self.fill_content(page, placeholder_slot)
return page, created
class DummyPageGenerator(CmsPageCreator):
def __init__(self,
delete_first=False,
title_prefix=None,
levels=3,
count=2):
if title_prefix is None:
self.title_prefix = self.__class__.__name__
else:
self.title_prefix = title_prefix
self.levels = levels
self.current_level = 1
self.count = count
self.current_count = 1
self.page_data = {}
super(DummyPageGenerator, self).__init__(delete_first=delete_first)
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
title = "%s %i-%i in %s" % (self.title_prefix, self.current_count,
self.current_level, language_code)
log.info(title)
return title
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
if self.current_level == 1:
# 'root' page
return None
else:
return self.page_data[(self.current_level - 1, self.current_count)]
def create(self):
for count in range(1, self.count + 1):
self.current_count = count
for level in range(1, self.levels + 1):
self.current_level = level
log.info("Level: %i current count: %i" % (self.current_level,
self.current_count))
page, created = super().create(
) # Create page (and page title) in default language
self.page_data[(self.current_level, self.current_count)] = page
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return self.page_data
def create_dummy_pages(delete_first, title_prefix, levels, count):
page_data = DummyPageGenerator(
delete_first=delete_first,
title_prefix=title_prefix,
levels=levels,
count=count).create()
return page_data
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | CmsPageCreator.get_slug | python | def get_slug(self, language_code, lang_name):
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug | Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page() | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L107-L121 | [
"def get_title(self, language_code, lang_name):\n \"\"\"\n :return: 'title' string for cms.api.create_page()\n \"\"\"\n return \"%s in %s\" % (self.__class__.__name__, language_code)\n"
] | class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_home_page(self):
"""
Return the published home page.
Used for 'parent' in cms.api.create_page()
"""
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages)
def create_page(self, **extra_kwargs):
"""
Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
}
"""
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created
def create_title(self, page):
"""
Create page title in all other languages with cms.api.create_title()
"""
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title)
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
}
def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save()
def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created
def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder)
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | CmsPageCreator.get_home_page | python | def get_home_page(self):
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft | Return the published home page.
Used for 'parent' in cms.api.create_page() | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L129-L140 | null | class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_slug(self, language_code, lang_name):
"""
Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page()
"""
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages)
def create_page(self, **extra_kwargs):
"""
Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
}
"""
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created
def create_title(self, page):
"""
Create page title in all other languages with cms.api.create_title()
"""
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title)
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
}
def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save()
def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created
def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder)
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | CmsPageCreator.publish | python | def publish(self, page):
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages) | Publish the page in all languages. | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L148-L153 | [
"def publish_page(page, languages):\n \"\"\"\n Publish a CMS page in all given languages.\n \"\"\"\n for language_code, lang_name in iter_languages(languages):\n url = page.get_absolute_url()\n\n if page.publisher_is_draft:\n page.publish(language_code)\n log.info('page \"%s\" published in %s: %s', page, lang_name, url)\n else:\n log.info('published page \"%s\" already exists in %s: %s', page,\n lang_name, url)\n return page.reload()\n"
] | class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_slug(self, language_code, lang_name):
"""
Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page()
"""
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_home_page(self):
"""
Return the published home page.
Used for 'parent' in cms.api.create_page()
"""
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def create_page(self, **extra_kwargs):
"""
Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
}
"""
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created
def create_title(self, page):
"""
Create page title in all other languages with cms.api.create_title()
"""
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title)
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
}
def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save()
def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created
def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder)
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | CmsPageCreator.create_page | python | def create_page(self, **extra_kwargs):
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created | Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
} | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L155-L250 | [
"def get_title(self, language_code, lang_name):\n \"\"\"\n :return: 'title' string for cms.api.create_page()\n \"\"\"\n return \"%s in %s\" % (self.__class__.__name__, language_code)\n",
"def get_menu_title(self, language_code, lang_name):\n \"\"\"\n :return: 'menu_title' string for cms.api.create_page()\n \"\"\"\n return None # No extra title for menu\n",
"def get_slug(self, language_code, lang_name):\n \"\"\"\n Notes:\n - slug must be unique!\n - slug is used to check if page already exists!\n :return: 'slug' string for cms.api.create_page()\n \"\"\"\n title = self.get_title(language_code, lang_name)\n assert title != \"\"\n\n title = str(title) # e.g.: evaluate a lazy translation\n\n slug = slugify(title)\n assert slug != \"\", \"Title %r results in empty slug!\" % title\n return slug\n",
"def get_template(self, language_code, lang_name):\n \"\"\"\n :return: 'template' string for cms.api.create_page()\n \"\"\"\n return self.template\n",
"def get_parent_page(self):\n \"\"\"\n For 'parent' in cms.api.create_page()\n \"\"\"\n return None\n"
] | class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_slug(self, language_code, lang_name):
"""
Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page()
"""
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_home_page(self):
"""
Return the published home page.
Used for 'parent' in cms.api.create_page()
"""
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages)
def create_title(self, page):
"""
Create page title in all other languages with cms.api.create_title()
"""
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title)
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
}
def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save()
def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created
def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder)
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | CmsPageCreator.create_title | python | def create_title(self, page):
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title) | Create page title in all other languages with cms.api.create_title() | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L252-L270 | [
"def get_title(self, language_code, lang_name):\n \"\"\"\n :return: 'title' string for cms.api.create_page()\n \"\"\"\n return \"%s in %s\" % (self.__class__.__name__, language_code)\n",
"def get_slug(self, language_code, lang_name):\n \"\"\"\n Notes:\n - slug must be unique!\n - slug is used to check if page already exists!\n :return: 'slug' string for cms.api.create_page()\n \"\"\"\n title = self.get_title(language_code, lang_name)\n assert title != \"\"\n\n title = str(title) # e.g.: evaluate a lazy translation\n\n slug = slugify(title)\n assert slug != \"\", \"Title %r results in empty slug!\" % title\n return slug\n"
] | class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_slug(self, language_code, lang_name):
"""
Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page()
"""
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_home_page(self):
"""
Return the published home page.
Used for 'parent' in cms.api.create_page()
"""
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages)
def create_page(self, **extra_kwargs):
"""
Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
}
"""
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
}
def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save()
def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created
def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder)
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | CmsPageCreator.get_add_plugin_kwargs | python | def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
} | Return "content" for create the plugin.
Called from self.add_plugins() | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L289-L301 | [
"def get_dummy_text(self, page, no, placeholder, language_code, lang_name):\n if no == 1:\n source = self.prefix_dummy_part\n elif no == self.dummy_text_count:\n source = self.suffix_dummy_part\n else:\n source = self.dummy_text_part\n\n dummy_text = source.format(\n absolute_url=page.get_absolute_url(language=language_code),\n no=no,\n slot=placeholder.slot,\n language_code=language_code,\n lang_name=lang_name,\n )\n return dummy_text\n"
] | class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_slug(self, language_code, lang_name):
"""
Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page()
"""
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_home_page(self):
"""
Return the published home page.
Used for 'parent' in cms.api.create_page()
"""
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages)
def create_page(self, **extra_kwargs):
"""
Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
}
"""
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created
def create_title(self, page):
"""
Create page title in all other languages with cms.api.create_title()
"""
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title)
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save()
def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created
def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder)
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | CmsPageCreator.add_plugins | python | def add_plugins(self, page, placeholder):
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save() | Add a "TextPlugin" in all languages. | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L303-L320 | [
"def get_add_plugin_kwargs(self, page, no, placeholder, language_code,\n lang_name):\n \"\"\"\n Return \"content\" for create the plugin.\n Called from self.add_plugins()\n \"\"\"\n return {\n \"plugin_type\":\n 'TextPlugin', # djangocms_text_ckeditor\n \"body\":\n self.get_dummy_text(page, no, placeholder, language_code,\n lang_name)\n }\n"
] | class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_slug(self, language_code, lang_name):
"""
Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page()
"""
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_home_page(self):
"""
Return the published home page.
Used for 'parent' in cms.api.create_page()
"""
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages)
def create_page(self, **extra_kwargs):
"""
Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
}
"""
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created
def create_title(self, page):
"""
Create page title in all other languages with cms.api.create_title()
"""
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title)
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
}
def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created
def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder)
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | CmsPageCreator.get_or_create_placeholder | python | def get_or_create_placeholder(self, page, placeholder_slot):
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created | Add a placeholder if not exists. | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L322-L328 | [
"def get_or_create_placeholder(page, placeholder_slot, delete_existing=False):\n \"\"\"\n Get or create a placeholder on the given page.\n Optional: Delete existing placeholder.\n \"\"\"\n placeholder, created = page.placeholders.get_or_create(\n slot=placeholder_slot)\n if created:\n log.debug(\"Create placeholder %r for page %r\", placeholder_slot,\n page.get_title())\n else:\n log.debug(\"Use existing placeholder %r for page %r\", placeholder_slot,\n page.get_title())\n\n if delete_existing:\n queryset = CMSPlugin.objects.all().filter(placeholder=placeholder)\n log.info(\"Delete %i CMSPlugins on placeholder %s...\", queryset.count(),\n placeholder)\n queryset.delete()\n\n return placeholder, created\n"
] | class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_slug(self, language_code, lang_name):
"""
Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page()
"""
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_home_page(self):
"""
Return the published home page.
Used for 'parent' in cms.api.create_page()
"""
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages)
def create_page(self, **extra_kwargs):
"""
Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
}
"""
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created
def create_title(self, page):
"""
Create page title in all other languages with cms.api.create_title()
"""
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title)
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
}
def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save()
def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder)
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | CmsPageCreator.fill_content | python | def fill_content(self, page, placeholder_slot):
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder) | Add a placeholder to the page.
Here we add a "TextPlugin" in all languages. | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L330-L339 | [
"def add_plugins(self, page, placeholder):\n \"\"\"\n Add a \"TextPlugin\" in all languages.\n \"\"\"\n for language_code, lang_name in iter_languages(self.languages):\n for no in range(1, self.dummy_text_count + 1):\n add_plugin_kwargs = self.get_add_plugin_kwargs(\n page, no, placeholder, language_code, lang_name)\n\n log.info(\n 'add plugin to placeholder \"%s\" (pk:%i) in: %s - no: %i',\n placeholder, placeholder.pk, lang_name, no)\n plugin = add_plugin(\n placeholder=placeholder,\n language=language_code,\n **add_plugin_kwargs)\n log.info('Plugin \"%s\" (pk:%r) added.', str(plugin), plugin.pk)\n placeholder.save()\n",
"def get_or_create_placeholder(self, page, placeholder_slot):\n \"\"\"\n Add a placeholder if not exists.\n \"\"\"\n placeholder, created = get_or_create_placeholder(\n page, placeholder_slot, delete_existing=self.delete_first)\n return placeholder, created\n"
] | class CmsPageCreator(object):
"""
Create a normal Django CMS page
"""
# Some defaults:
languages = settings.LANGUAGES # Languages for created content.
default_language_code = settings.LANGUAGE_CODE # First language to start create the page
template = TEMPLATE_INHERITANCE_MAGIC
in_navigation = True
apphook = None # e.g.: "FooBarApp"
apphook_namespace = None # e.g.: "foobar"
placeholder_slots = ("content", )
dummy_text_count = 3
prefix_dummy_part = "<h2>Dummy no. {no} in {lang_name} (placeholder {slot})</h2>"
dummy_text_part = (
"<h3>dummy text part no. {no} in placeholder {slot}</h3>\n"
"<p>Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt"
" ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute"
" iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
" Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt"
" mollit anim id est laborum.</p>")
suffix_dummy_part = "<p>(absolute url: {absolute_url})</p>"
def __init__(self, delete_first=False, placeholder_slots=None):
self.delete_first = delete_first
if placeholder_slots is not None:
self.placeholder_slots = placeholder_slots
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
return "%s in %s" % (self.__class__.__name__, language_code)
def get_menu_title(self, language_code, lang_name):
"""
:return: 'menu_title' string for cms.api.create_page()
"""
return None # No extra title for menu
def get_slug(self, language_code, lang_name):
"""
Notes:
- slug must be unique!
- slug is used to check if page already exists!
:return: 'slug' string for cms.api.create_page()
"""
title = self.get_title(language_code, lang_name)
assert title != ""
title = str(title) # e.g.: evaluate a lazy translation
slug = slugify(title)
assert slug != "", "Title %r results in empty slug!" % title
return slug
def get_template(self, language_code, lang_name):
"""
:return: 'template' string for cms.api.create_page()
"""
return self.template
def get_home_page(self):
"""
Return the published home page.
Used for 'parent' in cms.api.create_page()
"""
try:
home_page_draft = Page.objects.get(
is_home=True, publisher_is_draft=True)
except Page.DoesNotExist:
log.error('ERROR: "home page" doesn\'t exists!')
raise RuntimeError('no home page')
return home_page_draft
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return None
def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages)
def create_page(self, **extra_kwargs):
"""
Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
}
"""
with translation.override(self.default_language_code):
# for evaluate the language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
# Create a plugin page
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass # Create page
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
# Not a plugin page
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass # Create page
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
# set right translation language
# for evaluate language name lazy translation
# e.g.: settings.LANGUAGE_CODE is not "en"
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created
def create_title(self, page):
"""
Create page title in all other languages with cms.api.create_title()
"""
for language_code, lang_name in iter_languages(self.languages):
try:
title = Title.objects.get(page=page, language=language_code)
except Title.DoesNotExist:
slug = self.get_slug(language_code, lang_name)
assert slug != "", "No slug for %r" % language_code
title = create_title(
language=language_code,
title=self.get_title(language_code, lang_name),
page=page,
slug=slug,
)
log.debug("Title created: %s", title)
else:
log.debug("Page title exist: %s", title)
def get_dummy_text(self, page, no, placeholder, language_code, lang_name):
if no == 1:
source = self.prefix_dummy_part
elif no == self.dummy_text_count:
source = self.suffix_dummy_part
else:
source = self.dummy_text_part
dummy_text = source.format(
absolute_url=page.get_absolute_url(language=language_code),
no=no,
slot=placeholder.slot,
language_code=language_code,
lang_name=lang_name,
)
return dummy_text
def get_add_plugin_kwargs(self, page, no, placeholder, language_code,
lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type":
'TextPlugin', # djangocms_text_ckeditor
"body":
self.get_dummy_text(page, no, placeholder, language_code,
lang_name)
}
def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save()
def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created
def create(self):
page, created = self.create_page(
) # Create page (and page title) in default language
self.create_title(page) # Create page title in all other languages
#
# We publish the page before self.fill_content()
#
# Maybe the create process will try to find the published
# page instance (e.g.: get_absolute_url() may be called)
self.publish(page)
if created:
# Add plugins only on new created pages
# otherwise we will add more and more plugins
# on every run!
for placeholder_slot in self.placeholder_slots:
self.fill_content(
page, placeholder_slot) # Add content to the created page.
# Publish again, to make the filled content available:
self.publish(page)
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return page, created
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | CmsPluginPageCreator.create | python | def create(self):
plugin = CMSPlugin.objects.filter(plugin_type=self.apphook)
if plugin.exists():
log.debug('Plugin page for "%s" plugin already exist, ok.',
self.apphook)
raise plugin
page, created = super(CmsPluginPageCreator, self).create()
if created:
# Add a plugin with content in all languages to the created page.
# But only on new created page
for placeholder_slot in self.placeholder_slots:
self.fill_content(page, placeholder_slot)
return page, created | Create the plugin page in all languages and fill dummy content. | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L437-L455 | [
"def fill_content(self, page, placeholder_slot):\n \"\"\"\n Add a placeholder to the page.\n Here we add a \"TextPlugin\" in all languages.\n \"\"\"\n if len(placeholder_slot) == 1:\n raise RuntimeError(placeholder_slot)\n placeholder, created = self.get_or_create_placeholder(\n page, placeholder_slot)\n self.add_plugins(page, placeholder)\n",
"def create(self):\n page, created = self.create_page(\n ) # Create page (and page title) in default language\n self.create_title(page) # Create page title in all other languages\n\n #\n # We publish the page before self.fill_content()\n #\n # Maybe the create process will try to find the published\n # page instance (e.g.: get_absolute_url() may be called)\n self.publish(page)\n\n if created:\n # Add plugins only on new created pages\n # otherwise we will add more and more plugins\n # on every run!\n for placeholder_slot in self.placeholder_slots:\n self.fill_content(\n page, placeholder_slot) # Add content to the created page.\n\n # Publish again, to make the filled content available:\n self.publish(page)\n\n # Force to reload the url configuration.\n # Important for unittests to \"find\" all plugins ;)\n apphook_reload.reload_urlconf()\n\n return page, created\n"
] | class CmsPluginPageCreator(CmsPageCreator):
"""
Create a Django CMS plugin page and fill the content.
Useable for default production fixtures or unittests fixtures.
The idea is to inherit from this class and update it for your need by
overwrite some methods ;)
"""
placeholder_slots = () # Fill no placeholders
def __init__(self, apphook, apphook_namespace, *args, **kwargs):
self.apphook = apphook
self.apphook_namespace = apphook_namespace
super(CmsPluginPageCreator, self).__init__(*args, **kwargs)
def get_title(self, language_code, lang_name):
"""
Contruct the page title. Called from self.create_title()
"""
return '%s in %s' % (self.apphook, lang_name)
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
return self.get_home_page()
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | DummyPageGenerator.get_title | python | def get_title(self, language_code, lang_name):
title = "%s %i-%i in %s" % (self.title_prefix, self.current_count,
self.current_level, language_code)
log.info(title)
return title | :return: 'title' string for cms.api.create_page() | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L494-L501 | null | class DummyPageGenerator(CmsPageCreator):
def __init__(self,
delete_first=False,
title_prefix=None,
levels=3,
count=2):
if title_prefix is None:
self.title_prefix = self.__class__.__name__
else:
self.title_prefix = title_prefix
self.levels = levels
self.current_level = 1
self.count = count
self.current_count = 1
self.page_data = {}
super(DummyPageGenerator, self).__init__(delete_first=delete_first)
def get_parent_page(self):
"""
For 'parent' in cms.api.create_page()
"""
if self.current_level == 1:
# 'root' page
return None
else:
return self.page_data[(self.current_level - 1, self.current_count)]
def create(self):
for count in range(1, self.count + 1):
self.current_count = count
for level in range(1, self.levels + 1):
self.current_level = level
log.info("Level: %i current count: %i" % (self.current_level,
self.current_count))
page, created = super().create(
) # Create page (and page title) in default language
self.page_data[(self.current_level, self.current_count)] = page
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return self.page_data
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/pages.py | DummyPageGenerator.get_parent_page | python | def get_parent_page(self):
if self.current_level == 1:
# 'root' page
return None
else:
return self.page_data[(self.current_level - 1, self.current_count)] | For 'parent' in cms.api.create_page() | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L503-L511 | null | class DummyPageGenerator(CmsPageCreator):
def __init__(self,
delete_first=False,
title_prefix=None,
levels=3,
count=2):
if title_prefix is None:
self.title_prefix = self.__class__.__name__
else:
self.title_prefix = title_prefix
self.levels = levels
self.current_level = 1
self.count = count
self.current_count = 1
self.page_data = {}
super(DummyPageGenerator, self).__init__(delete_first=delete_first)
def get_title(self, language_code, lang_name):
"""
:return: 'title' string for cms.api.create_page()
"""
title = "%s %i-%i in %s" % (self.title_prefix, self.current_count,
self.current_level, language_code)
log.info(title)
return title
def create(self):
for count in range(1, self.count + 1):
self.current_count = count
for level in range(1, self.levels + 1):
self.current_level = level
log.info("Level: %i current count: %i" % (self.current_level,
self.current_count))
page, created = super().create(
) # Create page (and page title) in default language
self.page_data[(self.current_level, self.current_count)] = page
# Force to reload the url configuration.
# Important for unittests to "find" all plugins ;)
apphook_reload.reload_urlconf()
return self.page_data
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/page_utils.py | get_public_cms_app_namespaces | python | def get_public_cms_app_namespaces():
qs = Page.objects.public()
qs = qs.exclude(application_namespace=None)
qs = qs.order_by('application_namespace')
try:
application_namespaces = list(
qs.distinct('application_namespace').values_list(
'application_namespace', flat=True))
except NotImplementedError:
# If SQLite used:
# DISTINCT ON fields is not supported by this database backend
application_namespaces = list(
set(qs.values_list('application_namespace', flat=True)))
application_namespaces.sort()
return tuple(application_namespaces) | :return: a tuple() with all cms app namespaces | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/page_utils.py#L10-L30 | null | """
:created: 17.09.2018 by Jens Diemer
:copyleft: 2018 by the django-cms-tools team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from cms.models import Page
def get_public_cms_page_urls(*, language_code):
"""
:param language_code: e.g.: "en" or "de"
:return: Tuple with all public urls in the given language
"""
pages = Page.objects.public()
urls = [page.get_absolute_url(language=language_code) for page in pages]
urls.sort()
return tuple(urls)
|
jedie/django-cms-tools | django_cms_tools/fixture_helper/page_utils.py | get_public_cms_page_urls | python | def get_public_cms_page_urls(*, language_code):
pages = Page.objects.public()
urls = [page.get_absolute_url(language=language_code) for page in pages]
urls.sort()
return tuple(urls) | :param language_code: e.g.: "en" or "de"
:return: Tuple with all public urls in the given language | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/page_utils.py#L33-L41 | null | """
:created: 17.09.2018 by Jens Diemer
:copyleft: 2018 by the django-cms-tools team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from cms.models import Page
def get_public_cms_app_namespaces():
"""
:return: a tuple() with all cms app namespaces
"""
qs = Page.objects.public()
qs = qs.exclude(application_namespace=None)
qs = qs.order_by('application_namespace')
try:
application_namespaces = list(
qs.distinct('application_namespace').values_list(
'application_namespace', flat=True))
except NotImplementedError:
# If SQLite used:
# DISTINCT ON fields is not supported by this database backend
application_namespaces = list(
set(qs.values_list('application_namespace', flat=True)))
application_namespaces.sort()
return tuple(application_namespaces)
|
jedie/django-cms-tools | django_cms_tools/plugin_landing_page/views.py | LandingPageDetailView.set_meta | python | def set_meta(self, instance):
self.use_title_tag = True
self.title = instance.title | Set django-meta stuff from LandingPageModel instance. | train | https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/plugin_landing_page/views.py#L36-L41 | null | class LandingPageDetailView(MetadataMixin, TranslatableSlugMixin, PublisherCmsDetailView):
model = LandingPageModel
# key and name of the "item" toolbar
toolbar_key = LANDING_PAGE_TOOLBAR_NAME
toolbar_verbose_name = LANDING_PAGE_TOOLBAR_VERBOSE_NAME
def get(self, request, *args, **kwargs):
self.language = get_language_from_request(request)
return super().get(request, *args, **kwargs)
def get_object(self, queryset=None):
instance = super().get_object(queryset=queryset)
# Translate the slug while changing the language:
set_language_changer(self.request, instance.get_absolute_url)
# append publisher buttons:
self.extend_toolbar(publisher_instance=instance)
# Set django-meta stuff from LandingPageModel instance:
self.set_meta(instance)
return instance
def get_template_names(self):
return ["landing_page/landing_page.html"]
|
grabbles/grabbit | grabbit/core.py | merge_layouts | python | def merge_layouts(layouts):
''' Utility function for merging multiple layouts.
Args:
layouts (list): A list of BIDSLayout instances to merge.
Returns:
A BIDSLayout containing merged files and entities.
Notes:
Layouts will be merged in the order of the elements in the list. I.e.,
the first Layout will be updated with all values in the 2nd Layout,
then the result will be updated with values from the 3rd Layout, etc.
This means that order matters: in the event of entity or filename
conflicts, later layouts will take precedence.
'''
layout = layouts[0].clone()
for l in layouts[1:]:
layout.files.update(l.files)
layout.domains.update(l.domains)
for k, v in l.entities.items():
if k not in layout.entities:
layout.entities[k] = v
else:
layout.entities[k].files.update(v.files)
return layout | Utility function for merging multiple layouts.
Args:
layouts (list): A list of BIDSLayout instances to merge.
Returns:
A BIDSLayout containing merged files and entities.
Notes:
Layouts will be merged in the order of the elements in the list. I.e.,
the first Layout will be updated with all values in the 2nd Layout,
then the result will be updated with values from the 3rd Layout, etc.
This means that order matters: in the event of entity or filename
conflicts, later layouts will take precedence. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L1079-L1105 | null | import json
import os
import re
from collections import defaultdict, OrderedDict, namedtuple
from grabbit.external import six, inflect
from grabbit.utils import natural_sort, listify
from grabbit.extensions.writable import build_path, write_contents_to_file
from os.path import (join, basename, dirname, abspath, split, exists, isdir,
relpath, isabs)
from functools import partial
from copy import copy, deepcopy
import warnings
from keyword import iskeyword
__all__ = ['File', 'Entity', 'Layout']
class File(object):
def __init__(self, filename, domains=None):
"""
Represents a single file.
"""
self.path = filename
self.filename = basename(self.path)
self.dirname = dirname(self.path)
self.tags = {}
self.domains = domains or []
@property
def entities(self):
return {k: v.value for k, v in self.tags.items()}
def _matches(self, entities=None, extensions=None, domains=None,
regex_search=False):
"""
Checks whether the file matches all of the passed entities and
extensions.
Args:
entities (dict): A dictionary of entity names -> regex patterns.
extensions (str, list): One or more file extensions to allow.
domains (str, list): One or more domains the file must match.
regex_search (bool): Whether to require exact match (False) or
regex search (True) when comparing the query string to each
entity.
Returns:
True if _all_ entities and extensions match; False otherwise.
"""
if extensions is not None:
if isinstance(extensions, six.string_types):
extensions = [extensions]
extensions = '(' + '|'.join(extensions) + ')$'
if re.search(extensions, self.filename) is None:
return False
if domains is not None:
domains = listify(domains)
if not set(self.domains) & set(domains):
return False
if entities is not None:
for name, val in entities.items():
if (name not in self.tags) ^ (val is None):
return False
if val is None:
continue
def make_patt(x):
patt = '%s' % x
if isinstance(x, (int, float)):
# allow for leading zeros if a number was specified
# regardless of regex_search
patt = '0*' + patt
if not regex_search:
patt = '^%s$' % patt
return patt
ent_patts = [make_patt(x) for x in listify(val)]
patt = '|'.join(ent_patts)
if re.search(patt, str(self.tags[name].value)) is None:
return False
return True
def as_named_tuple(self):
"""
Returns the File as a named tuple. The full path plus all entity
key/value pairs are returned as attributes.
"""
keys = list(self.entities.keys())
replaced = []
for i, k in enumerate(keys):
if iskeyword(k):
replaced.append(k)
keys[i] = '%s_' % k
if replaced:
safe = ['%s_' % k for k in replaced]
warnings.warn("Entity names cannot be reserved keywords when "
"representing a File as a namedtuple. Replacing "
"entities %s with safe versions %s." % (keys, safe))
entities = dict(zip(keys, self.entities.values()))
_File = namedtuple('File', 'filename ' + ' '.join(entities.keys()))
return _File(filename=self.path, **entities)
def copy(self, path_patterns, symbolic_link=False, root=None,
conflicts='fail'):
''' Copy the contents of a file to a new location, with target
filename defined by the current File's entities and the specified
path_patterns. '''
new_filename = build_path(self.entities, path_patterns)
if not new_filename:
return None
if new_filename[-1] == os.sep:
new_filename += self.filename
if isabs(self.path) or root is None:
path = self.path
else:
path = join(root, self.path)
if not exists(path):
raise ValueError("Target filename to copy/symlink (%s) doesn't "
"exist." % path)
if symbolic_link:
contents = None
link_to = path
else:
with open(path, 'r') as f:
contents = f.read()
link_to = None
write_contents_to_file(new_filename, contents=contents,
link_to=link_to, content_mode='text', root=root,
conflicts=conflicts)
class Domain(object):
def __init__(self, config):
"""
A set of rules that applies to one or more directories
within a Layout.
Args:
name (str): The name of the Domain.
config (dict): The configuration dictionary that defines the
entities and paths for the current domain.
"""
self.name = config['name']
self.config = config
self.entities = {}
self.files = []
self.include = listify(self.config.get('include', []))
self.exclude = listify(self.config.get('exclude', []))
if self.include and self.exclude:
raise ValueError("The 'include' and 'exclude' arguments cannot "
"both be set. Please pass at most one of these "
"for domain '%s'." % self.name)
self.path_patterns = listify(config.get('default_path_patterns', []))
def add_entity(self, ent):
''' Add an Entity.
Args:
ent (Entity): The Entity to add.
'''
self.entities[ent.name] = ent
def add_file(self, file):
''' Add a file to tracking.
Args:
file (File): The File to add to tracking.
'''
self.files.append(file)
Tag = namedtuple('Tag', ['entity', 'value'])
class Entity(object):
def __init__(self, name, pattern=None, domain=None, mandatory=False,
directory=None, map_func=None, dtype=None, aliases=None,
**kwargs):
"""
Represents a single entity defined in the JSON config.
Args:
name (str): The name of the entity (e.g., 'subject', 'run', etc.)
pattern (str): A regex pattern used to match against file names.
Must define at least one group, and only the first group is
kept as the match.
domain (Domain): The Domain the Entity belongs to.
mandatory (bool): If True, every File _must_ match this entity.
directory (str): Optional pattern defining a directory associated
with the entity.
map_func (callable): Optional callable used to extract the Entity's
value from the passed string (instead of trying to match on the
defined .pattern).
dtype (str): The optional data type of the Entity values. Must be
one of 'int', 'float', 'bool', or 'str'. If None, no type
enforcement will be attempted, which means the dtype of the
value may be unpredictable.
aliases (str or list): Alternative names for the entity.
kwargs (dict): Additional keyword arguments.
"""
if pattern is None and map_func is None:
raise ValueError("Invalid specification for Entity '%s'; no "
"pattern or mapping function provided. Either the"
" 'pattern' or the 'map_func' arguments must be "
"set." % name)
self.name = name
self.pattern = pattern
self.domain = domain
self.mandatory = mandatory
self.directory = directory
self.map_func = map_func
self.kwargs = kwargs
if isinstance(dtype, six.string_types):
dtype = eval(dtype)
if dtype not in [str, float, int, bool, None]:
raise ValueError("Invalid dtype '%s'. Must be one of int, float, "
"bool, or str." % dtype)
self.dtype = dtype
self.files = {}
self.regex = re.compile(pattern) if pattern is not None else None
domain_name = getattr(domain, 'name', '')
self.id = '.'.join([domain_name, name])
aliases = [] if aliases is None else listify(aliases)
self.aliases = ['.'.join([domain_name, alias]) for alias in aliases]
def __iter__(self):
for i in self.unique():
yield(i)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
new_val = getattr(self, k) if k == 'regex' else deepcopy(v, memo)
setattr(result, k, new_val)
return result
def match_file(self, f, update_file=False):
"""
Determine whether the passed file matches the Entity.
Args:
f (File): The File instance to match against.
Returns: the matched value if a match was found, otherwise None.
"""
if self.map_func is not None:
val = self.map_func(f)
else:
m = self.regex.search(f.path)
val = m.group(1) if m is not None else None
return self._astype(val)
def add_file(self, filename, value):
""" Adds the specified filename to tracking. """
self.files[filename] = value
def unique(self):
""" Returns all unique values/levels for the current entity. """
return list(set(self.files.values()))
def count(self, files=False):
""" Returns a count of unique values or files.
Args:
files (bool): When True, counts all files mapped to the Entity.
When False, counts all unique values.
Returns: an int.
"""
return len(self.files) if files else len(self.unique())
def _astype(self, val):
if val is not None and self.dtype is not None:
val = self.dtype(val)
return val
class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name]
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity)
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val)
def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func)
def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
"""
Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details).
"""
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def count(self, entity, files=False):
"""
Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity.
"""
return self._find_entity(entity).count(files)
def as_data_frame(self, **kwargs):
"""
Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict)
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains)
|
grabbles/grabbit | grabbit/core.py | File._matches | python | def _matches(self, entities=None, extensions=None, domains=None,
regex_search=False):
if extensions is not None:
if isinstance(extensions, six.string_types):
extensions = [extensions]
extensions = '(' + '|'.join(extensions) + ')$'
if re.search(extensions, self.filename) is None:
return False
if domains is not None:
domains = listify(domains)
if not set(self.domains) & set(domains):
return False
if entities is not None:
for name, val in entities.items():
if (name not in self.tags) ^ (val is None):
return False
if val is None:
continue
def make_patt(x):
patt = '%s' % x
if isinstance(x, (int, float)):
# allow for leading zeros if a number was specified
# regardless of regex_search
patt = '0*' + patt
if not regex_search:
patt = '^%s$' % patt
return patt
ent_patts = [make_patt(x) for x in listify(val)]
patt = '|'.join(ent_patts)
if re.search(patt, str(self.tags[name].value)) is None:
return False
return True | Checks whether the file matches all of the passed entities and
extensions.
Args:
entities (dict): A dictionary of entity names -> regex patterns.
extensions (str, list): One or more file extensions to allow.
domains (str, list): One or more domains the file must match.
regex_search (bool): Whether to require exact match (False) or
regex search (True) when comparing the query string to each
entity.
Returns:
True if _all_ entities and extensions match; False otherwise. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L35-L88 | null | class File(object):
def __init__(self, filename, domains=None):
"""
Represents a single file.
"""
self.path = filename
self.filename = basename(self.path)
self.dirname = dirname(self.path)
self.tags = {}
self.domains = domains or []
@property
def entities(self):
return {k: v.value for k, v in self.tags.items()}
def as_named_tuple(self):
"""
Returns the File as a named tuple. The full path plus all entity
key/value pairs are returned as attributes.
"""
keys = list(self.entities.keys())
replaced = []
for i, k in enumerate(keys):
if iskeyword(k):
replaced.append(k)
keys[i] = '%s_' % k
if replaced:
safe = ['%s_' % k for k in replaced]
warnings.warn("Entity names cannot be reserved keywords when "
"representing a File as a namedtuple. Replacing "
"entities %s with safe versions %s." % (keys, safe))
entities = dict(zip(keys, self.entities.values()))
_File = namedtuple('File', 'filename ' + ' '.join(entities.keys()))
return _File(filename=self.path, **entities)
def copy(self, path_patterns, symbolic_link=False, root=None,
conflicts='fail'):
''' Copy the contents of a file to a new location, with target
filename defined by the current File's entities and the specified
path_patterns. '''
new_filename = build_path(self.entities, path_patterns)
if not new_filename:
return None
if new_filename[-1] == os.sep:
new_filename += self.filename
if isabs(self.path) or root is None:
path = self.path
else:
path = join(root, self.path)
if not exists(path):
raise ValueError("Target filename to copy/symlink (%s) doesn't "
"exist." % path)
if symbolic_link:
contents = None
link_to = path
else:
with open(path, 'r') as f:
contents = f.read()
link_to = None
write_contents_to_file(new_filename, contents=contents,
link_to=link_to, content_mode='text', root=root,
conflicts=conflicts)
|
grabbles/grabbit | grabbit/core.py | File.as_named_tuple | python | def as_named_tuple(self):
keys = list(self.entities.keys())
replaced = []
for i, k in enumerate(keys):
if iskeyword(k):
replaced.append(k)
keys[i] = '%s_' % k
if replaced:
safe = ['%s_' % k for k in replaced]
warnings.warn("Entity names cannot be reserved keywords when "
"representing a File as a namedtuple. Replacing "
"entities %s with safe versions %s." % (keys, safe))
entities = dict(zip(keys, self.entities.values()))
_File = namedtuple('File', 'filename ' + ' '.join(entities.keys()))
return _File(filename=self.path, **entities) | Returns the File as a named tuple. The full path plus all entity
key/value pairs are returned as attributes. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L90-L108 | null | class File(object):
def __init__(self, filename, domains=None):
"""
Represents a single file.
"""
self.path = filename
self.filename = basename(self.path)
self.dirname = dirname(self.path)
self.tags = {}
self.domains = domains or []
@property
def entities(self):
return {k: v.value for k, v in self.tags.items()}
def _matches(self, entities=None, extensions=None, domains=None,
regex_search=False):
"""
Checks whether the file matches all of the passed entities and
extensions.
Args:
entities (dict): A dictionary of entity names -> regex patterns.
extensions (str, list): One or more file extensions to allow.
domains (str, list): One or more domains the file must match.
regex_search (bool): Whether to require exact match (False) or
regex search (True) when comparing the query string to each
entity.
Returns:
True if _all_ entities and extensions match; False otherwise.
"""
if extensions is not None:
if isinstance(extensions, six.string_types):
extensions = [extensions]
extensions = '(' + '|'.join(extensions) + ')$'
if re.search(extensions, self.filename) is None:
return False
if domains is not None:
domains = listify(domains)
if not set(self.domains) & set(domains):
return False
if entities is not None:
for name, val in entities.items():
if (name not in self.tags) ^ (val is None):
return False
if val is None:
continue
def make_patt(x):
patt = '%s' % x
if isinstance(x, (int, float)):
# allow for leading zeros if a number was specified
# regardless of regex_search
patt = '0*' + patt
if not regex_search:
patt = '^%s$' % patt
return patt
ent_patts = [make_patt(x) for x in listify(val)]
patt = '|'.join(ent_patts)
if re.search(patt, str(self.tags[name].value)) is None:
return False
return True
def copy(self, path_patterns, symbolic_link=False, root=None,
conflicts='fail'):
''' Copy the contents of a file to a new location, with target
filename defined by the current File's entities and the specified
path_patterns. '''
new_filename = build_path(self.entities, path_patterns)
if not new_filename:
return None
if new_filename[-1] == os.sep:
new_filename += self.filename
if isabs(self.path) or root is None:
path = self.path
else:
path = join(root, self.path)
if not exists(path):
raise ValueError("Target filename to copy/symlink (%s) doesn't "
"exist." % path)
if symbolic_link:
contents = None
link_to = path
else:
with open(path, 'r') as f:
contents = f.read()
link_to = None
write_contents_to_file(new_filename, contents=contents,
link_to=link_to, content_mode='text', root=root,
conflicts=conflicts)
|
grabbles/grabbit | grabbit/core.py | File.copy | python | def copy(self, path_patterns, symbolic_link=False, root=None,
conflicts='fail'):
''' Copy the contents of a file to a new location, with target
filename defined by the current File's entities and the specified
path_patterns. '''
new_filename = build_path(self.entities, path_patterns)
if not new_filename:
return None
if new_filename[-1] == os.sep:
new_filename += self.filename
if isabs(self.path) or root is None:
path = self.path
else:
path = join(root, self.path)
if not exists(path):
raise ValueError("Target filename to copy/symlink (%s) doesn't "
"exist." % path)
if symbolic_link:
contents = None
link_to = path
else:
with open(path, 'r') as f:
contents = f.read()
link_to = None
write_contents_to_file(new_filename, contents=contents,
link_to=link_to, content_mode='text', root=root,
conflicts=conflicts) | Copy the contents of a file to a new location, with target
filename defined by the current File's entities and the specified
path_patterns. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L110-L141 | [
"def build_path(entities, path_patterns, strict=False):\n \"\"\"\n Constructs a path given a set of entities and a list of potential\n filename patterns to use.\n\n Args:\n entities (dict): A dictionary mapping entity names to entity values.\n path_patterns (str, list): One or more filename patterns to write\n the file to. Entities should be represented by the name\n surrounded by curly braces. Optional portions of the patterns\n should be denoted by square brackets. Entities that require a\n specific value for the pattern to match can pass them inside\n carets. Default values can be assigned by specifying a string after\n the pipe operator. E.g., (e.g., {type<image>|bold} would only match\n the pattern if the entity 'type' was passed and its value is\n \"image\", otherwise the default value \"bold\" will be used).\n Example 1: 'sub-{subject}/[var-{name}/]{id}.csv'\n Result 2: 'sub-01/var-SES/1045.csv'\n strict (bool): If True, all passed entities must be matched inside a\n pattern in order to be a valid match. If False, extra entities will\n be ignored so long as all mandatory entities are found.\n\n Returns:\n A constructed path for this file based on the provided patterns.\n \"\"\"\n if isinstance(path_patterns, string_types):\n path_patterns = [path_patterns]\n\n # Loop over available patherns, return first one that matches all\n for pattern in path_patterns:\n # If strict, all entities must be contained in the pattern\n if strict:\n defined = re.findall('\\{(.*?)(?:<[^>]+>)?\\}', pattern)\n if set(entities.keys()) - set(defined):\n continue\n # Iterate through the provided path patterns\n new_path = pattern\n optional_patterns = re.findall('\\[(.*?)\\]', pattern)\n # First build from optional patterns if possible\n for optional_pattern in optional_patterns:\n optional_chunk = replace_entities(entities, optional_pattern) or ''\n new_path = new_path.replace('[%s]' % optional_pattern,\n optional_chunk)\n # Replace remaining entities\n new_path = replace_entities(entities, new_path)\n\n if new_path:\n return new_path\n\n return None\n",
"def write_contents_to_file(path, contents=None, link_to=None,\n content_mode='text', root=None, conflicts='fail'):\n \"\"\"\n Uses provided filename patterns to write contents to a new path, given\n a corresponding entity map.\n\n Args:\n path (str): Destination path of the desired contents.\n contents (str): Raw text or binary encoded string of contents to write\n to the new path.\n link_to (str): Optional path with which to create a symbolic link to.\n Used as an alternative to and takes priority over the contents\n argument.\n content_mode (str): Either 'text' or 'binary' to indicate the writing\n mode for the new file. Only relevant if contents is provided.\n root (str): Optional root directory that all patterns are relative\n to. Defaults to current working directory.\n conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'\n that defines the desired action when the output path already\n exists. 'fail' raises an exception; 'skip' does nothing;\n 'overwrite' overwrites the existing file; 'append' adds a suffix\n to each file copy, starting with 1. Default is 'fail'.\n \"\"\"\n\n if root is None and not isabs(path):\n root = os.getcwd()\n\n if root:\n path = join(root, path)\n\n if exists(path) or islink(path):\n if conflicts == 'fail':\n msg = 'A file at path {} already exists.'\n raise ValueError(msg.format(path))\n elif conflicts == 'skip':\n msg = 'A file at path {} already exists, skipping writing file.'\n logging.warn(msg.format(path))\n return\n elif conflicts == 'overwrite':\n if isdir(path):\n logging.warn('New path is a directory, not going to '\n 'overwrite it, skipping instead.')\n return\n os.remove(path)\n elif conflicts == 'append':\n i = 1\n while i < sys.maxsize:\n path_splits = splitext(path)\n path_splits[0] = path_splits[0] + '_%d' % i\n appended_filename = os.extsep.join(path_splits)\n if not exists(appended_filename) and \\\n not islink(appended_filename):\n path = appended_filename\n break\n i += 1\n else:\n raise ValueError('Did not provide a valid conflicts parameter')\n\n if not exists(dirname(path)):\n os.makedirs(dirname(path))\n\n if link_to:\n os.symlink(link_to, path)\n elif contents:\n mode = 'wb' if content_mode == 'binary' else 'w'\n with open(path, mode) as f:\n f.write(contents)\n else:\n raise ValueError('One of contents or link_to must be provided.')\n"
] | class File(object):
def __init__(self, filename, domains=None):
"""
Represents a single file.
"""
self.path = filename
self.filename = basename(self.path)
self.dirname = dirname(self.path)
self.tags = {}
self.domains = domains or []
@property
def entities(self):
return {k: v.value for k, v in self.tags.items()}
def _matches(self, entities=None, extensions=None, domains=None,
regex_search=False):
"""
Checks whether the file matches all of the passed entities and
extensions.
Args:
entities (dict): A dictionary of entity names -> regex patterns.
extensions (str, list): One or more file extensions to allow.
domains (str, list): One or more domains the file must match.
regex_search (bool): Whether to require exact match (False) or
regex search (True) when comparing the query string to each
entity.
Returns:
True if _all_ entities and extensions match; False otherwise.
"""
if extensions is not None:
if isinstance(extensions, six.string_types):
extensions = [extensions]
extensions = '(' + '|'.join(extensions) + ')$'
if re.search(extensions, self.filename) is None:
return False
if domains is not None:
domains = listify(domains)
if not set(self.domains) & set(domains):
return False
if entities is not None:
for name, val in entities.items():
if (name not in self.tags) ^ (val is None):
return False
if val is None:
continue
def make_patt(x):
patt = '%s' % x
if isinstance(x, (int, float)):
# allow for leading zeros if a number was specified
# regardless of regex_search
patt = '0*' + patt
if not regex_search:
patt = '^%s$' % patt
return patt
ent_patts = [make_patt(x) for x in listify(val)]
patt = '|'.join(ent_patts)
if re.search(patt, str(self.tags[name].value)) is None:
return False
return True
def as_named_tuple(self):
"""
Returns the File as a named tuple. The full path plus all entity
key/value pairs are returned as attributes.
"""
keys = list(self.entities.keys())
replaced = []
for i, k in enumerate(keys):
if iskeyword(k):
replaced.append(k)
keys[i] = '%s_' % k
if replaced:
safe = ['%s_' % k for k in replaced]
warnings.warn("Entity names cannot be reserved keywords when "
"representing a File as a namedtuple. Replacing "
"entities %s with safe versions %s." % (keys, safe))
entities = dict(zip(keys, self.entities.values()))
_File = namedtuple('File', 'filename ' + ' '.join(entities.keys()))
return _File(filename=self.path, **entities)
|
grabbles/grabbit | grabbit/core.py | Entity.match_file | python | def match_file(self, f, update_file=False):
if self.map_func is not None:
val = self.map_func(f)
else:
m = self.regex.search(f.path)
val = m.group(1) if m is not None else None
return self._astype(val) | Determine whether the passed file matches the Entity.
Args:
f (File): The File instance to match against.
Returns: the matched value if a match was found, otherwise None. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L261-L276 | [
"def _astype(self, val):\n if val is not None and self.dtype is not None:\n val = self.dtype(val)\n return val\n"
] | class Entity(object):
def __init__(self, name, pattern=None, domain=None, mandatory=False,
directory=None, map_func=None, dtype=None, aliases=None,
**kwargs):
"""
Represents a single entity defined in the JSON config.
Args:
name (str): The name of the entity (e.g., 'subject', 'run', etc.)
pattern (str): A regex pattern used to match against file names.
Must define at least one group, and only the first group is
kept as the match.
domain (Domain): The Domain the Entity belongs to.
mandatory (bool): If True, every File _must_ match this entity.
directory (str): Optional pattern defining a directory associated
with the entity.
map_func (callable): Optional callable used to extract the Entity's
value from the passed string (instead of trying to match on the
defined .pattern).
dtype (str): The optional data type of the Entity values. Must be
one of 'int', 'float', 'bool', or 'str'. If None, no type
enforcement will be attempted, which means the dtype of the
value may be unpredictable.
aliases (str or list): Alternative names for the entity.
kwargs (dict): Additional keyword arguments.
"""
if pattern is None and map_func is None:
raise ValueError("Invalid specification for Entity '%s'; no "
"pattern or mapping function provided. Either the"
" 'pattern' or the 'map_func' arguments must be "
"set." % name)
self.name = name
self.pattern = pattern
self.domain = domain
self.mandatory = mandatory
self.directory = directory
self.map_func = map_func
self.kwargs = kwargs
if isinstance(dtype, six.string_types):
dtype = eval(dtype)
if dtype not in [str, float, int, bool, None]:
raise ValueError("Invalid dtype '%s'. Must be one of int, float, "
"bool, or str." % dtype)
self.dtype = dtype
self.files = {}
self.regex = re.compile(pattern) if pattern is not None else None
domain_name = getattr(domain, 'name', '')
self.id = '.'.join([domain_name, name])
aliases = [] if aliases is None else listify(aliases)
self.aliases = ['.'.join([domain_name, alias]) for alias in aliases]
def __iter__(self):
for i in self.unique():
yield(i)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
new_val = getattr(self, k) if k == 'regex' else deepcopy(v, memo)
setattr(result, k, new_val)
return result
def add_file(self, filename, value):
""" Adds the specified filename to tracking. """
self.files[filename] = value
def unique(self):
""" Returns all unique values/levels for the current entity. """
return list(set(self.files.values()))
def count(self, files=False):
""" Returns a count of unique values or files.
Args:
files (bool): When True, counts all files mapped to the Entity.
When False, counts all unique values.
Returns: an int.
"""
return len(self.files) if files else len(self.unique())
def _astype(self, val):
if val is not None and self.dtype is not None:
val = self.dtype(val)
return val
|
grabbles/grabbit | grabbit/core.py | Layout._get_or_load_domain | python | def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name] | Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L423-L457 | null | class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity)
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val)
def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func)
def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
"""
Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details).
"""
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def count(self, entity, files=False):
"""
Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity.
"""
return self._find_entity(entity).count(files)
def as_data_frame(self, **kwargs):
"""
Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict)
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains)
|
grabbles/grabbit | grabbit/core.py | Layout._check_inclusions | python | def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True | Check file or directory against regexes in config to determine if
it should be included in the index | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L470-L495 | null | class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name]
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity)
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val)
def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func)
def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
"""
Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details).
"""
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def count(self, entity, files=False):
"""
Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity.
"""
return self._find_entity(entity).count(files)
def as_data_frame(self, **kwargs):
"""
Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict)
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains)
|
grabbles/grabbit | grabbit/core.py | Layout._find_entity | python | def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity) | Find an Entity instance by name. Checks both name and id fields. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L557-L570 | null | class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name]
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val)
def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func)
def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
"""
Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details).
"""
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def count(self, entity, files=False):
"""
Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity.
"""
return self._find_entity(entity).count(files)
def as_data_frame(self, **kwargs):
"""
Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict)
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains)
|
grabbles/grabbit | grabbit/core.py | Layout.save_index | python | def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile) | Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L613-L628 | null | class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name]
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity)
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val)
def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func)
def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
"""
Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details).
"""
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def count(self, entity, files=False):
"""
Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity.
"""
return self._find_entity(entity).count(files)
def as_data_frame(self, **kwargs):
"""
Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict)
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains)
|
grabbles/grabbit | grabbit/core.py | Layout.load_index | python | def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val) | Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L630-L664 | [
"def _make_file_object(self, root, f):\n ''' Initialize a new File oject from a directory and filename. Extend\n in subclasses as needed. '''\n return File(join(root, f))\n",
"def _reset_index(self):\n # Reset indexes\n self.files = {}\n for ent in self.entities.values():\n ent.files = {}\n",
"def _index_file(self, root, f, domains, update_layout=True):\n\n # Create the file object--allows for subclassing\n f = self._make_file_object(root, f)\n\n for domain in listify(domains):\n domain = self.domains[domain]\n match_vals = {}\n for e in domain.entities.values():\n m = e.match_file(f)\n if m is None and e.mandatory:\n break\n if m is not None:\n match_vals[e.name] = (e, m)\n\n if match_vals:\n for k, (ent, val) in match_vals.items():\n f.tags[k] = Tag(ent, val)\n if update_layout:\n ent.add_file(f.path, val)\n\n if update_layout:\n domain.add_file(f)\n\n if update_layout:\n f.domains = domains\n\n self.files[f.path] = f\n\n return f\n",
"def _make_file_object(self, root, f):\n filepath = str(psp.join(root, f))\n with self._hdfs_client.read(filepath):\n return File(filepath)\n"
] | class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name]
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity)
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func)
def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
"""
Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details).
"""
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def count(self, entity, files=False):
"""
Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity.
"""
return self._find_entity(entity).count(files)
def as_data_frame(self, **kwargs):
"""
Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict)
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains)
|
grabbles/grabbit | grabbit/core.py | Layout.add_entity | python | def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func) | Add a new Entity to tracking. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L666-L697 | [
"def plural(self, text, count=None):\n \"\"\"\n Return the plural of text.\n\n If count supplied, then return text if count is one of:\n 1, a, an, one, each, every, this, that\n otherwise return the plural.\n\n Whitespace at the start and end is preserved.\n\n \"\"\"\n pre, word, post = self.partition_word(text)\n if not word:\n return text\n plural = self.postprocess(\n word,\n self._pl_special_adjective(word, count)\n or self._pl_special_verb(word, count)\n or self._plnoun(word, count),\n )\n return \"{}{}{}\".format(pre, plural, post)\n"
] | class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name]
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity)
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val)
def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
"""
Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details).
"""
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def count(self, entity, files=False):
"""
Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity.
"""
return self._find_entity(entity).count(files)
def as_data_frame(self, **kwargs):
"""
Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict)
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains)
|
grabbles/grabbit | grabbit/core.py | Layout.get | python | def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.") | Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details). | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L699-L791 | [
"def natural_sort(l, field=None):\n '''\n based on snippet found at http://stackoverflow.com/a/4836734/2445984\n '''\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n\n def alphanum_key(key):\n if field is not None:\n key = getattr(key, field)\n if not isinstance(key, str):\n key = str(key)\n return [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)\n",
"def get_domain_entities(self, domains=None):\n # Get all Entities included in the specified Domains, in the same\n # order as Domains in the list.\n if domains is None:\n domains = list(self.domains.keys())\n\n ents = {}\n for d in domains:\n ents.update(self.domains[d].entities)\n return ents\n"
] | class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name]
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity)
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val)
def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func)
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def count(self, entity, files=False):
"""
Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity.
"""
return self._find_entity(entity).count(files)
def as_data_frame(self, **kwargs):
"""
Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict)
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains)
|
grabbles/grabbit | grabbit/core.py | Layout.count | python | def count(self, entity, files=False):
return self._find_entity(entity).count(files) | Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L802-L812 | [
"def _find_entity(self, entity):\n ''' Find an Entity instance by name. Checks both name and id fields.'''\n if entity in self.entities:\n return self.entities[entity]\n _ent = [e for e in self.entities.values() if e.name == entity]\n if len(_ent) > 1:\n raise ValueError(\"Entity name '%s' matches %d entities. To \"\n \"avoid ambiguity, please prefix the entity \"\n \"name with its domain (e.g., 'bids.%s'.\" %\n (entity, len(_ent), entity))\n if _ent:\n return _ent[0]\n\n raise ValueError(\"No entity '%s' found.\" % entity)\n"
] | class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name]
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity)
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val)
def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func)
def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
"""
Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details).
"""
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def as_data_frame(self, **kwargs):
"""
Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict)
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains)
|
grabbles/grabbit | grabbit/core.py | Layout.as_data_frame | python | def as_data_frame(self, **kwargs):
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data | Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L814-L839 | [
"def get(self, return_type='tuple', target=None, extensions=None,\n domains=None, regex_search=None, **kwargs):\n \"\"\"\n Retrieve files and/or metadata from the current Layout.\n\n Args:\n return_type (str): Type of result to return. Valid values:\n 'tuple': returns a list of namedtuples containing file name as\n well as attribute/value pairs for all named entities.\n 'file': returns a list of matching filenames.\n 'dir': returns a list of directories.\n 'id': returns a list of unique IDs. Must be used together with\n a valid target.\n 'obj': returns a list of matching File objects.\n target (str): The name of the target entity to get results for\n (if return_type is 'dir' or 'id').\n extensions (str, list): One or more file extensions to filter on.\n Files with any other extensions will be excluded.\n domains (list): Optional list of domain names to scan for files.\n If None, all available domains are scanned.\n regex_search (bool or None): Whether to require exact matching\n (False) or regex search (True) when comparing the query string\n to each entity. If None (default), uses the value found in\n self.\n kwargs (dict): Any optional key/values to filter the entities on.\n Keys are entity names, values are regexes to filter on. For\n example, passing filter={ 'subject': 'sub-[12]'} would return\n only files that match the first two subjects.\n\n Returns:\n A named tuple (default) or a list (see return_type for details).\n \"\"\"\n\n if regex_search is None:\n regex_search = self.regex_search\n\n result = []\n filters = {}\n filters.update(kwargs)\n\n for filename, file in self.files.items():\n if not file._matches(filters, extensions, domains, regex_search):\n continue\n result.append(file)\n\n # Convert to relative paths if needed\n if not self.absolute_paths:\n for i, f in enumerate(result):\n f = copy(f)\n f.path = relpath(f.path, self.root)\n result[i] = f\n\n if return_type == 'file':\n return natural_sort([f.path for f in result])\n\n if return_type == 'tuple':\n result = [r.as_named_tuple() for r in result]\n return natural_sort(result, field='filename')\n\n if return_type.startswith('obj'):\n return result\n\n else:\n valid_entities = self.get_domain_entities(domains)\n\n if target is None:\n raise ValueError('If return_type is \"id\" or \"dir\", a valid '\n 'target entity must also be specified.')\n result = [x for x in result if target in x.entities]\n\n if return_type == 'id':\n result = list(set([x.entities[target] for x in result]))\n return natural_sort(result)\n\n elif return_type == 'dir':\n template = valid_entities[target].directory\n if template is None:\n raise ValueError('Return type set to directory, but no '\n 'directory template is defined for the '\n 'target entity (\\\"%s\\\").' % target)\n # Construct regex search pattern from target directory template\n to_rep = re.findall('\\{(.*?)\\}', template)\n for ent in to_rep:\n patt = valid_entities[ent].pattern\n template = template.replace('{%s}' % ent, patt)\n template += '[^\\%s]*$' % os.path.sep\n matches = [f.dirname for f in result\n if re.search(template, f.dirname)]\n return natural_sort(list(set(matches)))\n\n else:\n raise ValueError(\"Invalid return_type specified (must be one \"\n \"of 'tuple', 'file', 'id', or 'dir'.\")\n"
] | class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name]
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity)
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val)
def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func)
def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
"""
Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details).
"""
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def count(self, entity, files=False):
"""
Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity.
"""
return self._find_entity(entity).count(files)
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict)
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains)
|
grabbles/grabbit | grabbit/core.py | Layout.get_nearest | python | def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None | Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get(). | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L845-L929 | [
"def get(self, return_type='tuple', target=None, extensions=None,\n domains=None, regex_search=None, **kwargs):\n \"\"\"\n Retrieve files and/or metadata from the current Layout.\n\n Args:\n return_type (str): Type of result to return. Valid values:\n 'tuple': returns a list of namedtuples containing file name as\n well as attribute/value pairs for all named entities.\n 'file': returns a list of matching filenames.\n 'dir': returns a list of directories.\n 'id': returns a list of unique IDs. Must be used together with\n a valid target.\n 'obj': returns a list of matching File objects.\n target (str): The name of the target entity to get results for\n (if return_type is 'dir' or 'id').\n extensions (str, list): One or more file extensions to filter on.\n Files with any other extensions will be excluded.\n domains (list): Optional list of domain names to scan for files.\n If None, all available domains are scanned.\n regex_search (bool or None): Whether to require exact matching\n (False) or regex search (True) when comparing the query string\n to each entity. If None (default), uses the value found in\n self.\n kwargs (dict): Any optional key/values to filter the entities on.\n Keys are entity names, values are regexes to filter on. For\n example, passing filter={ 'subject': 'sub-[12]'} would return\n only files that match the first two subjects.\n\n Returns:\n A named tuple (default) or a list (see return_type for details).\n \"\"\"\n\n if regex_search is None:\n regex_search = self.regex_search\n\n result = []\n filters = {}\n filters.update(kwargs)\n\n for filename, file in self.files.items():\n if not file._matches(filters, extensions, domains, regex_search):\n continue\n result.append(file)\n\n # Convert to relative paths if needed\n if not self.absolute_paths:\n for i, f in enumerate(result):\n f = copy(f)\n f.path = relpath(f.path, self.root)\n result[i] = f\n\n if return_type == 'file':\n return natural_sort([f.path for f in result])\n\n if return_type == 'tuple':\n result = [r.as_named_tuple() for r in result]\n return natural_sort(result, field='filename')\n\n if return_type.startswith('obj'):\n return result\n\n else:\n valid_entities = self.get_domain_entities(domains)\n\n if target is None:\n raise ValueError('If return_type is \"id\" or \"dir\", a valid '\n 'target entity must also be specified.')\n result = [x for x in result if target in x.entities]\n\n if return_type == 'id':\n result = list(set([x.entities[target] for x in result]))\n return natural_sort(result)\n\n elif return_type == 'dir':\n template = valid_entities[target].directory\n if template is None:\n raise ValueError('Return type set to directory, but no '\n 'directory template is defined for the '\n 'target entity (\\\"%s\\\").' % target)\n # Construct regex search pattern from target directory template\n to_rep = re.findall('\\{(.*?)\\}', template)\n for ent in to_rep:\n patt = valid_entities[ent].pattern\n template = template.replace('{%s}' % ent, patt)\n template += '[^\\%s]*$' % os.path.sep\n matches = [f.dirname for f in result\n if re.search(template, f.dirname)]\n return natural_sort(list(set(matches)))\n\n else:\n raise ValueError(\"Invalid return_type specified (must be one \"\n \"of 'tuple', 'file', 'id', or 'dir'.\")\n",
"def get_file(self, f):\n ''' Return File object for the specified path. '''\n return self.files[f]\n"
] | class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name]
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity)
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val)
def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func)
def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
"""
Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details).
"""
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def count(self, entity, files=False):
"""
Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity.
"""
return self._find_entity(entity).count(files)
def as_data_frame(self, **kwargs):
"""
Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict)
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains)
|
grabbles/grabbit | grabbit/core.py | Layout.build_path | python | def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict) | Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence). | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L948-L989 | [
"def build_path(entities, path_patterns, strict=False):\n \"\"\"\n Constructs a path given a set of entities and a list of potential\n filename patterns to use.\n\n Args:\n entities (dict): A dictionary mapping entity names to entity values.\n path_patterns (str, list): One or more filename patterns to write\n the file to. Entities should be represented by the name\n surrounded by curly braces. Optional portions of the patterns\n should be denoted by square brackets. Entities that require a\n specific value for the pattern to match can pass them inside\n carets. Default values can be assigned by specifying a string after\n the pipe operator. E.g., (e.g., {type<image>|bold} would only match\n the pattern if the entity 'type' was passed and its value is\n \"image\", otherwise the default value \"bold\" will be used).\n Example 1: 'sub-{subject}/[var-{name}/]{id}.csv'\n Result 2: 'sub-01/var-SES/1045.csv'\n strict (bool): If True, all passed entities must be matched inside a\n pattern in order to be a valid match. If False, extra entities will\n be ignored so long as all mandatory entities are found.\n\n Returns:\n A constructed path for this file based on the provided patterns.\n \"\"\"\n if isinstance(path_patterns, string_types):\n path_patterns = [path_patterns]\n\n # Loop over available patherns, return first one that matches all\n for pattern in path_patterns:\n # If strict, all entities must be contained in the pattern\n if strict:\n defined = re.findall('\\{(.*?)(?:<[^>]+>)?\\}', pattern)\n if set(entities.keys()) - set(defined):\n continue\n # Iterate through the provided path patterns\n new_path = pattern\n optional_patterns = re.findall('\\[(.*?)\\]', pattern)\n # First build from optional patterns if possible\n for optional_pattern in optional_patterns:\n optional_chunk = replace_entities(entities, optional_pattern) or ''\n new_path = new_path.replace('[%s]' % optional_pattern,\n optional_chunk)\n # Replace remaining entities\n new_path = replace_entities(entities, new_path)\n\n if new_path:\n return new_path\n\n return None\n",
"def listify(obj, ignore=(list, tuple, type(None))):\n ''' Wraps all non-list or tuple objects in a list; provides a simple way\n to accept flexible arguments. '''\n return obj if isinstance(obj, ignore) else [obj]\n",
"def get_file(self, f):\n ''' Return File object for the specified path. '''\n return self.files[f]\n"
] | class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name]
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity)
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val)
def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func)
def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
"""
Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details).
"""
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def count(self, entity, files=False):
"""
Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity.
"""
return self._find_entity(entity).count(files)
def as_data_frame(self, **kwargs):
"""
Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains)
|
grabbles/grabbit | grabbit/core.py | Layout.write_contents_to_file | python | def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains) | Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L1024-L1076 | [
"def write_contents_to_file(path, contents=None, link_to=None,\n content_mode='text', root=None, conflicts='fail'):\n \"\"\"\n Uses provided filename patterns to write contents to a new path, given\n a corresponding entity map.\n\n Args:\n path (str): Destination path of the desired contents.\n contents (str): Raw text or binary encoded string of contents to write\n to the new path.\n link_to (str): Optional path with which to create a symbolic link to.\n Used as an alternative to and takes priority over the contents\n argument.\n content_mode (str): Either 'text' or 'binary' to indicate the writing\n mode for the new file. Only relevant if contents is provided.\n root (str): Optional root directory that all patterns are relative\n to. Defaults to current working directory.\n conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'\n that defines the desired action when the output path already\n exists. 'fail' raises an exception; 'skip' does nothing;\n 'overwrite' overwrites the existing file; 'append' adds a suffix\n to each file copy, starting with 1. Default is 'fail'.\n \"\"\"\n\n if root is None and not isabs(path):\n root = os.getcwd()\n\n if root:\n path = join(root, path)\n\n if exists(path) or islink(path):\n if conflicts == 'fail':\n msg = 'A file at path {} already exists.'\n raise ValueError(msg.format(path))\n elif conflicts == 'skip':\n msg = 'A file at path {} already exists, skipping writing file.'\n logging.warn(msg.format(path))\n return\n elif conflicts == 'overwrite':\n if isdir(path):\n logging.warn('New path is a directory, not going to '\n 'overwrite it, skipping instead.')\n return\n os.remove(path)\n elif conflicts == 'append':\n i = 1\n while i < sys.maxsize:\n path_splits = splitext(path)\n path_splits[0] = path_splits[0] + '_%d' % i\n appended_filename = os.extsep.join(path_splits)\n if not exists(appended_filename) and \\\n not islink(appended_filename):\n path = appended_filename\n break\n i += 1\n else:\n raise ValueError('Did not provide a valid conflicts parameter')\n\n if not exists(dirname(path)):\n os.makedirs(dirname(path))\n\n if link_to:\n os.symlink(link_to, path)\n elif contents:\n mode = 'wb' if content_mode == 'binary' else 'w'\n with open(path, mode) as f:\n f.write(contents)\n else:\n raise ValueError('One of contents or link_to must be provided.')\n",
"def _index_file(self, root, f, domains, update_layout=True):\n\n # Create the file object--allows for subclassing\n f = self._make_file_object(root, f)\n\n for domain in listify(domains):\n domain = self.domains[domain]\n match_vals = {}\n for e in domain.entities.values():\n m = e.match_file(f)\n if m is None and e.mandatory:\n break\n if m is not None:\n match_vals[e.name] = (e, m)\n\n if match_vals:\n for k, (ent, val) in match_vals.items():\n f.tags[k] = Tag(ent, val)\n if update_layout:\n ent.add_file(f.path, val)\n\n if update_layout:\n domain.add_file(f)\n\n if update_layout:\n f.domains = domains\n\n self.files[f.path] = f\n\n return f\n",
"def build_path(self, source, path_patterns=None, strict=False,\n domains=None):\n ''' Constructs a target filename for a file or dictionary of entities.\n\n Args:\n source (str, File, dict): The source data to use to construct the\n new file path. Must be one of:\n - A File object\n - A string giving the path of a File contained within the\n current Layout.\n - A dict of entities, with entity names in keys and values in\n values\n path_patterns (list): Optional path patterns to use to construct\n the new file path. If None, the Layout-defined patterns will\n be used.\n strict (bool): If True, all entities must be matched inside a\n pattern in order to be a valid match. If False, extra entities\n will be ignored so long as all mandatory entities are found.\n domains (str, list): Optional name(s) of domain(s) to scan for\n path patterns. If None, all domains are scanned. If two or more\n domains are provided, the order determines the precedence of\n path patterns (i.e., earlier domains will have higher\n precedence).\n '''\n\n if isinstance(source, six.string_types):\n if source not in self.files:\n source = join(self.root, source)\n\n source = self.get_file(source)\n\n if isinstance(source, File):\n source = source.entities\n\n if path_patterns is None:\n if domains is None:\n domains = list(self.domains.keys())\n path_patterns = []\n for dom in listify(domains):\n path_patterns.extend(self.domains[dom].path_patterns)\n\n return build_path(source, path_patterns, strict)\n"
] | class Layout(object):
def __init__(self, paths, root=None, index=None,
dynamic_getters=False, absolute_paths=True,
regex_search=False, entity_mapper=None, path_patterns=None,
config_filename='layout.json', include=None, exclude=None):
"""
A container for all the files and metadata found at the specified path.
Args:
paths (str, list): The path(s) where project files are located.
Must be one of:
- A path to a directory containing files to index
- A list of paths to directories to index
- A list of 2-tuples where each tuple encodes a mapping from
directories to domains. The first element is a string or
list giving the paths to one or more directories to index.
The second element specifies which domains to apply to the
specified files, and can be one of:
* A string giving the path to a JSON config file
* A dictionary containing config information
* A list of any combination of strings or dicts
root (str): Optional directory that all other paths will be
relative to. If set, every other path the Layout sees must be
at this level or below. If None, filesystem root ('/') is used.
index (str): Optional path to a saved index file. If a valid value
is passed, this index is used to populate Files and Entities,
and the normal indexing process (which requires scanning all
files in the project) is skipped.
dynamic_getters (bool): If True, a get_{entity_name}() method will
be dynamically added to the Layout every time a new Entity is
created. This is implemented by creating a partial function of
the get() function that sets the target argument to the
entity name.
absolute_paths (bool): If True, grabbit uses absolute file paths
everywhere (including when returning query results). If False,
the input path will determine the behavior (i.e., relative if
a relative path was passed, absolute if an absolute path was
passed).
regex_search (bool): Whether to require exact matching (True)
or regex search (False, default) when comparing the query
string to each entity in .get() calls. This sets a default for
the instance, but can be overridden in individual .get()
requests.
entity_mapper (object, str): An optional object containing methods
for indexing specific entities. If passed, the object must
contain a named method for every value that appears in the
JSON config file under the "mapper" key of an Entity's entry.
For example, if an entity "type" is defined that contains the
key/value pair "mapper": "extract_type", then the passed object
must contain an .extract_type() method.
Alternatively, the special string "self" can be passed, in
which case the current Layout instance will be used as the
entity mapper (implying that the user has subclassed Layout).
path_patterns (str, list): One or more filename patterns to use
as a default path pattern for this layout's files. Can also
be specified in the config file.
config_filename (str): The name of directory-specific config files.
Every directory will be scanned for this file, and if found,
the config file will be read in and added to the list of
configs.
include (str, list): A string or list specifying regexes used to
globally filter files when indexing. A file or directory
*must* match at least of the passed values in order to be
retained in the index. Cannot be used together with 'exclude'.
exclude (str, list): A string or list specifying regexes used to
globally filter files when indexing. If a file or directory
*must* matches any of the passed values, it will be dropped
from indexing. Cannot be used together with 'include'.
"""
if include is not None and exclude is not None:
raise ValueError("You cannot specify both the include and exclude"
" arguments. Please pass at most one of these.")
self.entities = OrderedDict()
self.files = {}
self.mandatory = set()
self.dynamic_getters = dynamic_getters
self.regex_search = regex_search
self.entity_mapper = self if entity_mapper == 'self' else entity_mapper
self.path_patterns = path_patterns if path_patterns else []
self.config_filename = config_filename
self.domains = OrderedDict()
self.include = listify(include or [])
self.exclude = listify(exclude or [])
self.absolute_paths = absolute_paths
if root is None:
root = '/'
self.root = abspath(root)
self._domain_map = {}
# Extract path --> domain mapping
self._paths_to_index = {}
def add_path(path, val):
path = abspath(path)
self._paths_to_index[path] = val
for p in listify(paths, ignore=list):
if isinstance(p, six.string_types):
add_path(p, [])
else:
doms = listify(p[1])
doms = [self._get_or_load_domain(d) for d in doms]
for elem in listify(p[0]):
add_path(elem, doms)
# Verify existence of all paths
for p in self._paths_to_index:
if not exists(p):
raise ValueError("Search path {} doesn't exist.".format(p))
if index is None:
self.index()
else:
self.load_index(index)
def _get_or_load_domain(self, domain):
''' Return a domain if one already exists, or create a new one if not.
Args:
domain (str, dict): Can be one of:
- The name of the Domain to return (fails if none exists)
- A path to the Domain configuration file
- A dictionary containing configuration information
'''
if isinstance(domain, six.string_types):
if domain in self.domains:
return self.domains[domain]
elif exists(domain):
with open(domain, 'r') as fobj:
domain = json.load(fobj)
else:
raise ValueError("No domain could be found/loaded from input "
"'{}'; value must be either the name of an "
"existing Domain, or a valid path to a "
"configuration file.".format(domain))
# At this point, domain is a dict
name = domain['name']
if name in self.domains:
msg = ("Domain with name '{}' already exists; returning existing "
"Domain configuration.".format(name))
warnings.warn(msg)
return self.domains[name]
entities = domain.get('entities', [])
domain = Domain(domain)
for e in entities:
self.add_entity(domain=domain, **e)
self.domains[name] = domain
return self.domains[name]
def get_domain_entities(self, domains=None):
# Get all Entities included in the specified Domains, in the same
# order as Domains in the list.
if domains is None:
domains = list(self.domains.keys())
ents = {}
for d in domains:
ents.update(self.domains[d].entities)
return ents
def _check_inclusions(self, f, domains=None):
''' Check file or directory against regexes in config to determine if
it should be included in the index '''
filename = f if isinstance(f, six.string_types) else f.path
if domains is None:
domains = list(self.domains.values())
# Inject the Layout at the first position for global include/exclude
domains = list(domains)
domains.insert(0, self)
for dom in domains:
# If file matches any include regex, then True
if dom.include:
for regex in dom.include:
if re.search(regex, filename):
return True
return False
else:
# If file matches any exclude regex, then False
for regex in dom.exclude:
if re.search(regex, filename, flags=re.UNICODE):
return False
return True
def _validate_dir(self, d):
''' Extend this in subclasses to provide additional directory
validation. Will be called the first time a directory is read in; if
False is returned, the directory will be ignored and dropped from the
layout.
'''
return self._validate_file(d)
def _validate_file(self, f):
''' Extend this in subclasses to provide additional file validation.
Will be called the first time each file is read in; if False is
returned, the file will be ignored and dropped from the layout. '''
return True
def _get_files(self, root):
''' Returns all files in directory (non-recursively). '''
return os.listdir(root)
def _make_file_object(self, root, f):
''' Initialize a new File oject from a directory and filename. Extend
in subclasses as needed. '''
return File(join(root, f))
def _reset_index(self):
# Reset indexes
self.files = {}
for ent in self.entities.values():
ent.files = {}
def _index_file(self, root, f, domains, update_layout=True):
# Create the file object--allows for subclassing
f = self._make_file_object(root, f)
for domain in listify(domains):
domain = self.domains[domain]
match_vals = {}
for e in domain.entities.values():
m = e.match_file(f)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for k, (ent, val) in match_vals.items():
f.tags[k] = Tag(ent, val)
if update_layout:
ent.add_file(f.path, val)
if update_layout:
domain.add_file(f)
if update_layout:
f.domains = domains
self.files[f.path] = f
return f
def _find_entity(self, entity):
''' Find an Entity instance by name. Checks both name and id fields.'''
if entity in self.entities:
return self.entities[entity]
_ent = [e for e in self.entities.values() if e.name == entity]
if len(_ent) > 1:
raise ValueError("Entity name '%s' matches %d entities. To "
"avoid ambiguity, please prefix the entity "
"name with its domain (e.g., 'bids.%s'." %
(entity, len(_ent), entity))
if _ent:
return _ent[0]
raise ValueError("No entity '%s' found." % entity)
def index(self):
self._reset_index()
def _index_dir(dir_, domains):
contents = [join(dir_, f) for f in self._get_files(dir_)]
# Check for domain config file
config_file = join(dir_, self.config_filename)
if exists(config_file):
new_dom = self._get_or_load_domain(config_file)
if new_dom not in domains:
domains.append(new_dom)
contents.remove(config_file)
contents = filter(lambda x: self._check_inclusions(x, domains),
contents)
# If the directory was explicitly passed in Layout init,
# overwrite the current set of domains with what was passed
domains = self._paths_to_index.get(dir_, domains)
for f in contents:
full_path = join(dir_, f)
if isdir(full_path):
if self._validate_dir(full_path):
_index_dir(full_path, list(domains))
elif self._validate_file(full_path):
_dir, _base = split(full_path)
dom_names = [d.name for d in domains]
self._index_file(_dir, _base, dom_names)
# Index each directory
for path, domains in self._paths_to_index.items():
_index_dir(path, list(domains))
def save_index(self, filename):
''' Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
'''
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val)
def add_entity(self, domain, **kwargs):
''' Add a new Entity to tracking. '''
# Set the entity's mapping func if one was specified
map_func = kwargs.get('map_func', None)
if map_func is not None and not callable(kwargs['map_func']):
if self.entity_mapper is None:
raise ValueError("Mapping function '%s' specified for Entity "
"'%s', but no entity mapper was passed when "
"initializing the current Layout. Please make"
" sure the 'entity_mapper' argument is set." %
(map_func, kwargs['name']))
map_func = getattr(self.entity_mapper, kwargs['map_func'])
kwargs['map_func'] = map_func
ent = Entity(domain=domain, **kwargs)
domain.add_entity(ent)
if ent.mandatory:
self.mandatory.add(ent.id)
if ent.directory is not None:
ent.directory = ent.directory.replace('{{root}}', self.root)
self.entities[ent.id] = ent
for alias in ent.aliases:
self.entities[alias] = ent
if self.dynamic_getters:
func = partial(getattr(self, 'get'), target=ent.name,
return_type='id')
func_name = inflect.engine().plural(ent.name)
setattr(self, 'get_%s' % func_name, func)
def get(self, return_type='tuple', target=None, extensions=None,
domains=None, regex_search=None, **kwargs):
"""
Retrieve files and/or metadata from the current Layout.
Args:
return_type (str): Type of result to return. Valid values:
'tuple': returns a list of namedtuples containing file name as
well as attribute/value pairs for all named entities.
'file': returns a list of matching filenames.
'dir': returns a list of directories.
'id': returns a list of unique IDs. Must be used together with
a valid target.
'obj': returns a list of matching File objects.
target (str): The name of the target entity to get results for
(if return_type is 'dir' or 'id').
extensions (str, list): One or more file extensions to filter on.
Files with any other extensions will be excluded.
domains (list): Optional list of domain names to scan for files.
If None, all available domains are scanned.
regex_search (bool or None): Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity. If None (default), uses the value found in
self.
kwargs (dict): Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filter={ 'subject': 'sub-[12]'} would return
only files that match the first two subjects.
Returns:
A named tuple (default) or a list (see return_type for details).
"""
if regex_search is None:
regex_search = self.regex_search
result = []
filters = {}
filters.update(kwargs)
for filename, file in self.files.items():
if not file._matches(filters, extensions, domains, regex_search):
continue
result.append(file)
# Convert to relative paths if needed
if not self.absolute_paths:
for i, f in enumerate(result):
f = copy(f)
f.path = relpath(f.path, self.root)
result[i] = f
if return_type == 'file':
return natural_sort([f.path for f in result])
if return_type == 'tuple':
result = [r.as_named_tuple() for r in result]
return natural_sort(result, field='filename')
if return_type.startswith('obj'):
return result
else:
valid_entities = self.get_domain_entities(domains)
if target is None:
raise ValueError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
result = [x for x in result if target in x.entities]
if return_type == 'id':
result = list(set([x.entities[target] for x in result]))
return natural_sort(result)
elif return_type == 'dir':
template = valid_entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
to_rep = re.findall('\{(.*?)\}', template)
for ent in to_rep:
patt = valid_entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += '[^\%s]*$' % os.path.sep
matches = [f.dirname for f in result
if re.search(template, f.dirname)]
return natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'file', 'id', or 'dir'.")
def unique(self, entity):
"""
Return a list of unique values for the named entity.
Args:
entity (str): The name of the entity to retrieve unique values of.
"""
return self._find_entity(entity).unique()
def count(self, entity, files=False):
"""
Return the count of unique values or files for the named entity.
Args:
entity (str): The name of the entity.
files (bool): If True, counts the number of filenames that contain
at least one value of the entity, rather than the number of
unique values of the entity.
"""
return self._find_entity(entity).count(files)
def as_data_frame(self, **kwargs):
"""
Return information for all Files tracked in the Layout as a pandas
DataFrame.
Args:
kwargs: Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns:
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("What are you doing trying to export a Layout "
"as a pandas DataFrame when you don't have "
"pandas installed? Eh? Eh?")
if kwargs:
files = self.get(return_type='obj', **kwargs)
else:
files = self.files.values()
data = pd.DataFrame.from_records([f.entities for f in files])
data.insert(0, 'path', [f.path for f in files])
return data
def get_file(self, f):
''' Return File object for the specified path. '''
return self.files[f]
def get_nearest(self, path, return_type='file', strict=True, all_=False,
ignore_strict_entities=None, full_search=False, **kwargs):
''' Walk up the file tree from the specified path and return the
nearest matching file(s).
Args:
path (str): The file to search from.
return_type (str): What to return; must be one of 'file' (default)
or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (list): Optional list of entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type'].
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
kwargs: Optional keywords to pass on to .get().
'''
entities = {}
for ent in self.entities.values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in ignore_strict_entities:
entities.pop(k, None)
results = self.get(return_type='file', **kwargs)
folders = defaultdict(list)
for filename in results:
f = self.get_file(filename)
folders[f.dirname].append(f)
def count_matches(f):
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
matches.append(num_ents[0][0])
if not all_:
break
matches = [m.path if return_type == 'file' else m.as_named_tuple()
for m in matches]
return matches if all_ else matches[0] if matches else None
def clone(self):
return deepcopy(self)
def parse_file_entities(self, filename, domains=None):
root, f = dirname(filename), basename(filename)
if domains is None:
if not root:
msg = ("If a relative path is provided as the filename "
"argument, you *must* specify the names of the "
"domains whose entities are to be extracted. "
"Available domains for the current layout are: %s"
% list(self.domains.keys()))
raise ValueError(msg)
domains = list(self.domains.keys())
result = self._index_file(root, f, domains, update_layout=False)
return result.entities
def build_path(self, source, path_patterns=None, strict=False,
domains=None):
''' Constructs a target filename for a file or dictionary of entities.
Args:
source (str, File, dict): The source data to use to construct the
new file path. Must be one of:
- A File object
- A string giving the path of a File contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns (list): Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (str, list): Optional name(s) of domain(s) to scan for
path patterns. If None, all domains are scanned. If two or more
domains are provided, the order determines the precedence of
path patterns (i.e., earlier domains will have higher
precedence).
'''
if isinstance(source, six.string_types):
if source not in self.files:
source = join(self.root, source)
source = self.get_file(source)
if isinstance(source, File):
source = source.entities
if path_patterns is None:
if domains is None:
domains = list(self.domains.keys())
path_patterns = []
for dom in listify(domains):
path_patterns.extend(self.domains[dom].path_patterns)
return build_path(source, path_patterns, strict)
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **get_selectors):
"""
Copies one or more Files to new locations defined by each File's
entities and the specified path_patterns.
Args:
files (list): Optional list of File objects to write out. If none
provided, use files from running a get() query using remaining
**kwargs.
path_patterns (str, list): Write patterns to pass to each file's
write_file method.
symbolic_links (bool): Whether to copy each file as a symbolic link
or a deep copy.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when a output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a
suffix
to each file copy, starting with 0. Default is 'fail'.
**get_selectors (kwargs): Optional key word arguments to pass into
a get() query.
"""
_files = self.get(return_type='objects', **get_selectors)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=self.root, conflicts=conflicts)
|
grabbles/grabbit | grabbit/utils.py | listify | python | def listify(obj, ignore=(list, tuple, type(None))):
''' Wraps all non-list or tuple objects in a list; provides a simple way
to accept flexible arguments. '''
return obj if isinstance(obj, ignore) else [obj] | Wraps all non-list or tuple objects in a list; provides a simple way
to accept flexible arguments. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/utils.py#L34-L37 | null | import os
import re
from os.path import join, dirname, basename
def natural_sort(l, field=None):
'''
based on snippet found at http://stackoverflow.com/a/4836734/2445984
'''
convert = lambda text: int(text) if text.isdigit() else text.lower()
def alphanum_key(key):
if field is not None:
key = getattr(key, field)
if not isinstance(key, str):
key = str(key)
return [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
def splitext(path):
"""splitext for paths with directories that may contain dots.
From https://stackoverflow.com/questions/5930036/separating-file-extensions-using-python-os-path-module"""
li = []
path_without_extensions = join(dirname(path), basename(path).split(os.extsep)[0])
extensions = basename(path).split(os.extsep)[1:]
li.append(path_without_extensions)
# li.append(extensions) if you want extensions in another list inside the list that is returned.
li.extend(extensions)
return li
|
grabbles/grabbit | grabbit/extensions/writable.py | build_path | python | def build_path(entities, path_patterns, strict=False):
if isinstance(path_patterns, string_types):
path_patterns = [path_patterns]
# Loop over available patherns, return first one that matches all
for pattern in path_patterns:
# If strict, all entities must be contained in the pattern
if strict:
defined = re.findall('\{(.*?)(?:<[^>]+>)?\}', pattern)
if set(entities.keys()) - set(defined):
continue
# Iterate through the provided path patterns
new_path = pattern
optional_patterns = re.findall('\[(.*?)\]', pattern)
# First build from optional patterns if possible
for optional_pattern in optional_patterns:
optional_chunk = replace_entities(entities, optional_pattern) or ''
new_path = new_path.replace('[%s]' % optional_pattern,
optional_chunk)
# Replace remaining entities
new_path = replace_entities(entities, new_path)
if new_path:
return new_path
return None | Constructs a path given a set of entities and a list of potential
filename patterns to use.
Args:
entities (dict): A dictionary mapping entity names to entity values.
path_patterns (str, list): One or more filename patterns to write
the file to. Entities should be represented by the name
surrounded by curly braces. Optional portions of the patterns
should be denoted by square brackets. Entities that require a
specific value for the pattern to match can pass them inside
carets. Default values can be assigned by specifying a string after
the pipe operator. E.g., (e.g., {type<image>|bold} would only match
the pattern if the entity 'type' was passed and its value is
"image", otherwise the default value "bold" will be used).
Example 1: 'sub-{subject}/[var-{name}/]{id}.csv'
Result 2: 'sub-01/var-SES/1045.csv'
strict (bool): If True, all passed entities must be matched inside a
pattern in order to be a valid match. If False, extra entities will
be ignored so long as all mandatory entities are found.
Returns:
A constructed path for this file based on the provided patterns. | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/extensions/writable.py#L55-L104 | [
"def replace_entities(entities, pattern):\n \"\"\"\n Replaces all entity names in a given pattern with the corresponding\n values provided by entities.\n\n Args:\n entities (dict): A dictionary mapping entity names to entity values.\n pattern (str): A path pattern that contains entity names denoted\n by curly braces. Optional portions denoted by square braces.\n For example: 'sub-{subject}/[var-{name}/]{id}.csv'\n Accepted entity values, using regex matching, denoted within angle\n brackets.\n For example: 'sub-{subject<01|02>}/{task}.csv'\n\n Returns:\n A new string with the entity values inserted where entity names\n were denoted in the provided pattern.\n \"\"\"\n ents = re.findall('\\{(.*?)\\}', pattern)\n new_path = pattern\n for ent in ents:\n match = re.search('([^|<]+)(<.*?>)?(\\|.*)?', ent)\n if match is None:\n return None\n name, valid, default = match.groups()\n default = default[1:] if default is not None else default\n\n if name in entities:\n if valid is not None:\n ent_val = str(entities[name])\n if not re.match(valid[1:-1], ent_val):\n if default is None:\n return None\n entities[name] = default\n\n ent_val = entities.get(name, default)\n if ent_val is None:\n return None\n new_path = new_path.replace('{%s}' % ent, str(ent_val))\n\n return new_path\n"
] | import logging
import os
import re
import sys
from grabbit.utils import splitext
from os.path import join, dirname, exists, islink, isabs, isdir
from six import string_types
__all__ = ['replace_entities', 'build_path', 'write_contents_to_file']
def replace_entities(entities, pattern):
"""
Replaces all entity names in a given pattern with the corresponding
values provided by entities.
Args:
entities (dict): A dictionary mapping entity names to entity values.
pattern (str): A path pattern that contains entity names denoted
by curly braces. Optional portions denoted by square braces.
For example: 'sub-{subject}/[var-{name}/]{id}.csv'
Accepted entity values, using regex matching, denoted within angle
brackets.
For example: 'sub-{subject<01|02>}/{task}.csv'
Returns:
A new string with the entity values inserted where entity names
were denoted in the provided pattern.
"""
ents = re.findall('\{(.*?)\}', pattern)
new_path = pattern
for ent in ents:
match = re.search('([^|<]+)(<.*?>)?(\|.*)?', ent)
if match is None:
return None
name, valid, default = match.groups()
default = default[1:] if default is not None else default
if name in entities:
if valid is not None:
ent_val = str(entities[name])
if not re.match(valid[1:-1], ent_val):
if default is None:
return None
entities[name] = default
ent_val = entities.get(name, default)
if ent_val is None:
return None
new_path = new_path.replace('{%s}' % ent, str(ent_val))
return new_path
def write_contents_to_file(path, contents=None, link_to=None,
content_mode='text', root=None, conflicts='fail'):
"""
Uses provided filename patterns to write contents to a new path, given
a corresponding entity map.
Args:
path (str): Destination path of the desired contents.
contents (str): Raw text or binary encoded string of contents to write
to the new path.
link_to (str): Optional path with which to create a symbolic link to.
Used as an alternative to and takes priority over the contents
argument.
content_mode (str): Either 'text' or 'binary' to indicate the writing
mode for the new file. Only relevant if contents is provided.
root (str): Optional root directory that all patterns are relative
to. Defaults to current working directory.
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
"""
if root is None and not isabs(path):
root = os.getcwd()
if root:
path = join(root, path)
if exists(path) or islink(path):
if conflicts == 'fail':
msg = 'A file at path {} already exists.'
raise ValueError(msg.format(path))
elif conflicts == 'skip':
msg = 'A file at path {} already exists, skipping writing file.'
logging.warn(msg.format(path))
return
elif conflicts == 'overwrite':
if isdir(path):
logging.warn('New path is a directory, not going to '
'overwrite it, skipping instead.')
return
os.remove(path)
elif conflicts == 'append':
i = 1
while i < sys.maxsize:
path_splits = splitext(path)
path_splits[0] = path_splits[0] + '_%d' % i
appended_filename = os.extsep.join(path_splits)
if not exists(appended_filename) and \
not islink(appended_filename):
path = appended_filename
break
i += 1
else:
raise ValueError('Did not provide a valid conflicts parameter')
if not exists(dirname(path)):
os.makedirs(dirname(path))
if link_to:
os.symlink(link_to, path)
elif contents:
mode = 'wb' if content_mode == 'binary' else 'w'
with open(path, mode) as f:
f.write(contents)
else:
raise ValueError('One of contents or link_to must be provided.')
|
alpha-xone/xone | xone/utils.py | trade_day | python | def trade_day(dt, cal='US'):
from xone import calendar
dt = pd.Timestamp(dt).date()
return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1] | Latest trading day w.r.t given dt
Args:
dt: date of reference
cal: trading calendar
Returns:
pd.Timestamp: last trading day
Examples:
>>> trade_day('2018-12-25').strftime('%Y-%m-%d')
'2018-12-24' | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L52-L70 | [
"def trading_dates(start, end, calendar='US'):\n \"\"\"\n Trading dates for given exchange\n\n Args:\n start: start date\n end: end date\n calendar: exchange as string\n\n Returns:\n pd.DatetimeIndex: datetime index\n\n Examples:\n >>> bus_dates = ['2018-12-24', '2018-12-26', '2018-12-27']\n >>> trd_dates = trading_dates(start='2018-12-23', end='2018-12-27')\n >>> assert len(trd_dates) == len(bus_dates)\n >>> assert pd.Series(trd_dates == pd.DatetimeIndex(bus_dates)).all()\n \"\"\"\n kw = dict(start=pd.Timestamp(start, tz='UTC').date(), end=pd.Timestamp(end, tz='UTC').date())\n us_cal = getattr(sys.modules[__name__], f'{calendar}TradingCalendar')()\n return pd.bdate_range(**kw).drop(us_cal.holidays(**kw))\n"
] | import numpy as np
import pandas as pd
import json
import time
import pytz
import inspect
import sys
DEFAULT_TZ = pytz.FixedOffset(-time.timezone / 60)
def tolist(iterable):
"""
Simpler implementation of flatten method
Args:
iterable: any array or value
Returns:
list: list of unique values
Examples:
>>> tolist('xyz')
['xyz']
>>> tolist(['ab', 'cd', 'xy', 'ab'])
['ab', 'cd', 'xy']
"""
return pd.Series(iterable).drop_duplicates().tolist()
def fmt_dt(dt, fmt='%Y-%m-%d'):
"""
Format date string
Args:
dt: any date format
fmt: output date format
Returns:
str: date format
Examples:
>>> fmt_dt(dt='2018-12')
'2018-12-01'
>>> fmt_dt(dt='2018-12-31', fmt='%Y%m%d')
'20181231'
"""
return pd.Timestamp(dt).strftime(fmt)
def cur_time(typ='date', tz=DEFAULT_TZ, trading=True, cal='US'):
"""
Current time
Args:
typ: one of ['date', 'time', 'time_path', 'raw', '']
tz: timezone
trading: check if current date is trading day
cal: trading calendar
Returns:
relevant current time or date
Examples:
>>> cur_dt = pd.Timestamp('now')
>>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d')
True
>>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S')
True
>>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')
True
>>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)
True
>>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp)
True
>>> cur_time(typ='', trading=False) == cur_dt.date()
True
"""
dt = pd.Timestamp('now', tz=tz)
if typ == 'date':
if trading: return trade_day(dt=dt, cal=cal).strftime('%Y-%m-%d')
else: return dt.strftime('%Y-%m-%d')
if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S')
if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S')
if typ == 'raw': return dt
return trade_day(dt).date() if trading else dt.date()
def align_data(*args):
"""
Resample and aligh data for defined frequency
Args:
*args: DataFrame of data to be aligned
Returns:
pd.DataFrame: aligned data with renamed columns
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> d1
price volume
2018-09-10 10:10:00+10:00 31.08 10166
2018-09-10 10:11:00+10:00 31.10 69981
2018-09-10 10:12:00+10:00 31.11 14343
2018-09-10 10:13:00+10:00 31.07 10096
2018-09-10 10:14:00+10:00 31.04 11506
2018-09-10 10:15:00+10:00 31.04 9718
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> d2
price volume
2018-09-10 10:10:00+10:00 70.81 4749
2018-09-10 10:11:00+10:00 70.78 6762
2018-09-10 10:12:00+10:00 70.85 4908
2018-09-10 10:13:00+10:00 70.79 2002
2018-09-10 10:14:00+10:00 70.79 9170
2018-09-10 10:15:00+10:00 70.79 9791
>>> align_data(d1, d2)
price_1 volume_1 price_2 volume_2
2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749
2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762
2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908
2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002
2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170
2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791
"""
res = pd.DataFrame(pd.concat([
d.loc[~d.index.duplicated(keep='first')].rename(
columns=lambda vv: '%s_%d' % (vv, i + 1)
) for i, d in enumerate(args)
], axis=1))
data_cols = [col for col in res.columns if col[-2:] == '_1']
other_cols = [col for col in res.columns if col[-2:] != '_1']
res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='pad')
return res.dropna(subset=data_cols)
def cat_data(data_kw):
"""
Concatenate data with ticker as sub column index
Args:
data_kw: key = ticker, value = pd.DataFrame
Returns:
pd.DataFrame
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2})
>>> sample.columns
MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['ticker', None])
>>> r = sample.transpose().iloc[:, :2]
>>> r.index.names = (None, None)
>>> r
2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00
BHP AU price 31.08 31.10
volume 10,166.00 69,981.00
RIO AU price 70.81 70.78
volume 4,749.00 6,762.00
"""
if len(data_kw) == 0: return pd.DataFrame()
return pd.DataFrame(pd.concat([
data.assign(ticker=ticker).set_index('ticker', append=True)
.unstack('ticker').swaplevel(0, 1, axis=1)
for ticker, data in data_kw.items()
], axis=1))
def flatten(iterable, maps=None, unique=False):
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x
def _to_gen_(iterable):
"""
Recursively iterate lists and tuples
"""
from collections import Iterable
for elm in iterable:
if isinstance(elm, Iterable) and not isinstance(elm, (str, bytes)):
yield from flatten(elm)
else: yield elm
def to_frame(data_list, exc_cols=None, **kwargs):
"""
Dict in Python 3.6 keeps insertion order, but cannot be relied upon
This method is to keep column names in order
In Python 3.7 this method is redundant
Args:
data_list: list of dict
exc_cols: exclude columns
Returns:
pd.DataFrame
Example:
>>> d_list = [
... dict(sid=1, symbol='1 HK', price=89),
... dict(sid=700, symbol='700 HK', price=350)
... ]
>>> to_frame(d_list)
sid symbol price
0 1 1 HK 89
1 700 700 HK 350
>>> to_frame(d_list, exc_cols=['price'])
sid symbol
0 1 1 HK
1 700 700 HK
"""
from collections import OrderedDict
return pd.DataFrame(
pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs
).drop(columns=[] if exc_cols is None else exc_cols)
def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs):
"""
Fit spline curve for given x, y values
Args:
x: x-values
y: y-values
step: step size for interpolation
val_min: minimum value of result
val_max: maximum value of result
kind: for scipy.interpolate.interp1d
Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’,
‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’
refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and
‘next’ simply return the previous or next value of the point) or as an integer specifying
the order of the spline interpolator to use. Default is ‘linear’.
**kwargs: additional parameters for interp1d
Returns:
pd.Series: fitted curve
Examples:
>>> x = pd.Series([1, 2, 3])
>>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)])
>>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate')
>>> r.round(2).index.tolist()
[1.0, 1.5, 2.0, 2.5, 3.0]
>>> r.round(2).tolist()
[3.0, 4.05, 7.39, 12.73, 18.0]
>>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4]))
>>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate')
>>> r_df.round(2)
a b
1.00 3.00 3.00
1.50 4.05 3.00
2.00 7.39 3.00
2.50 12.73 3.50
3.00 20.09 4.00
"""
from scipy.interpolate import interp1d
from collections import OrderedDict
if isinstance(y, pd.DataFrame):
return pd.DataFrame(OrderedDict([(col, spline_curve(
x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind
)) for col in y.columns]))
fitted_curve = interp1d(x, y, kind=kind, **kwargs)
new_x = np.arange(x.min(), x.max() + step / 2., step=step)
return pd.Series(
new_x, index=new_x, name=y.name if hasattr(y, 'name') else None
).apply(fitted_curve).clip(val_min, val_max)
def func_scope(func):
"""
Function scope name
Args:
func: python function
Returns:
str: module_name.func_name
Examples:
>>> func_scope(flatten)
'xone.utils.flatten'
>>> func_scope(json.dump)
'json.dump'
"""
cur_mod = sys.modules[func.__module__]
return f'{cur_mod.__name__}.{func.__name__}'
def format_float(digit=0, is_pct=False):
"""
Number display format for pandas
Args:
digit: number of digits to keep
if negative, add one space in front of positive pct
is_pct: % display
Returns:
lambda function to format floats
Examples:
>>> format_float(0)(1e5)
'100,000'
>>> format_float(1)(1e5)
'100,000.0'
>>> format_float(-1, True)(.2)
' 20.0%'
>>> format_float(-1, True)(-.2)
'-20.0%'
>>> pd.options.display.float_format = format_float(2)
"""
if is_pct:
space = ' ' if digit < 0 else ''
fmt = f'{{:{space}.{abs(int(digit))}%}}'
return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv)
else:
return lambda vv: 'NaN' if np.isnan(vv) else (
f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit)
)
class FString(object):
def __init__(self, str_fmt):
self.str_fmt = str_fmt
def __str__(self):
kwargs = inspect.currentframe().f_back.f_globals.copy()
kwargs.update(inspect.currentframe().f_back.f_locals)
return self.str_fmt.format(**kwargs)
def fstr(fmt, **kwargs):
"""
Delayed evaluation of f-strings
Args:
fmt: f-string but in terms of normal string, i.e., '{path}/{file}.parq'
**kwargs: variables for f-strings, i.e., path, file = '/data', 'daily'
Returns:
FString object
References:
https://stackoverflow.com/a/42497694/1332656
https://stackoverflow.com/a/4014070/1332656
Examples:
>>> fmt = '{data_path}/{data_file}.parq'
>>> fstr(fmt, data_path='your/data/path', data_file='sample')
'your/data/path/sample.parq'
"""
locals().update(kwargs)
return f'{FString(str_fmt=fmt)}'
def to_str(data: dict, fmt='{key}={value}', sep=', ', public_only=True):
"""
Convert dict to string
Args:
data: dict
fmt: how key and value being represented
sep: how pairs of key and value are seperated
public_only: if display public members only
Returns:
str: string representation of dict
Examples:
>>> test_dict = dict(b=1, a=0, c=2, _d=3)
>>> to_str(test_dict)
'{b=1, a=0, c=2}'
>>> to_str(test_dict, sep='|')
'{b=1|a=0|c=2}'
>>> to_str(test_dict, public_only=False)
'{b=1, a=0, c=2, _d=3}'
"""
if public_only: keys = list(filter(lambda vv: vv[0] != '_', data.keys()))
else: keys = list(data.keys())
return '{' + sep.join([
to_str(data=v, fmt=fmt, sep=sep)
if isinstance(v, dict) else fstr(fmt=fmt, key=k, value=v)
for k, v in data.items() if k in keys
]) + '}'
def inst_repr(instance, fmt='str', public_only=True):
"""
Generate class instance signature from its __dict__
From python 3.6 dict is ordered and order of attributes will be preserved automatically
Args:
instance: class instance
fmt: ['json', 'str']
public_only: if display public members only
Returns:
str: string or json representation of instance
Examples:
>>> inst_repr(1)
''
>>> class SampleClass(object):
... def __init__(self):
... self.b = 3
... self.a = 4
... self._private_ = 'hidden'
>>>
>>> s = SampleClass()
>>> inst_repr(s)
'{b=3, a=4}'
>>> inst_repr(s, public_only=False)
'{b=3, a=4, _private_=hidden}'
>>> json.loads(inst_repr(s, fmt='json'))
{'b': 3, 'a': 4}
>>> inst_repr(s, fmt='unknown')
''
"""
if not hasattr(instance, '__dict__'): return ''
if public_only: inst_dict = {k: v for k, v in instance.__dict__.items() if k[0] != '_'}
else: inst_dict = instance.__dict__
if fmt == 'json': return json.dumps(inst_dict, indent=2)
elif fmt == 'str': return to_str(inst_dict, public_only=public_only)
return ''
def load_module(full_path):
"""
Load module from full path
Args:
full_path: module full path name
Returns:
python module
References:
https://stackoverflow.com/a/67692/1332656
Examples:
>>> import os
>>>
>>> cur_file = os.path.abspath(__file__).replace('\\\\', '/')
>>> cur_path = '/'.join(cur_file.split('/')[:-1])
>>> load_module(f'{cur_path}/files.py').__name__
'files'
>>> load_module(f'{cur_path}/files.pyc')
Traceback (most recent call last):
ImportError: not a python file: files.pyc
"""
from importlib import util
file_name = full_path.replace('\\', '/').split('/')[-1]
if file_name[-3:] != '.py':
raise ImportError(f'not a python file: {file_name}')
module_name = file_name[:-3]
spec = util.spec_from_file_location(name=module_name, location=full_path)
module = util.module_from_spec(spec=spec)
spec.loader.exec_module(module=module)
return module
class AttributeDict(dict):
"""
Dot access support for dict attributes
References:
https://stackoverflow.com/a/5021467/1332656
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
|
alpha-xone/xone | xone/utils.py | cur_time | python | def cur_time(typ='date', tz=DEFAULT_TZ, trading=True, cal='US'):
dt = pd.Timestamp('now', tz=tz)
if typ == 'date':
if trading: return trade_day(dt=dt, cal=cal).strftime('%Y-%m-%d')
else: return dt.strftime('%Y-%m-%d')
if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S')
if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S')
if typ == 'raw': return dt
return trade_day(dt).date() if trading else dt.date() | Current time
Args:
typ: one of ['date', 'time', 'time_path', 'raw', '']
tz: timezone
trading: check if current date is trading day
cal: trading calendar
Returns:
relevant current time or date
Examples:
>>> cur_dt = pd.Timestamp('now')
>>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d')
True
>>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S')
True
>>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')
True
>>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)
True
>>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp)
True
>>> cur_time(typ='', trading=False) == cur_dt.date()
True | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L73-L111 | [
"def trade_day(dt, cal='US'):\n \"\"\"\n Latest trading day w.r.t given dt\n\n Args:\n dt: date of reference\n cal: trading calendar\n\n Returns:\n pd.Timestamp: last trading day\n\n Examples:\n >>> trade_day('2018-12-25').strftime('%Y-%m-%d')\n '2018-12-24'\n \"\"\"\n from xone import calendar\n\n dt = pd.Timestamp(dt).date()\n return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]\n"
] | import numpy as np
import pandas as pd
import json
import time
import pytz
import inspect
import sys
DEFAULT_TZ = pytz.FixedOffset(-time.timezone / 60)
def tolist(iterable):
"""
Simpler implementation of flatten method
Args:
iterable: any array or value
Returns:
list: list of unique values
Examples:
>>> tolist('xyz')
['xyz']
>>> tolist(['ab', 'cd', 'xy', 'ab'])
['ab', 'cd', 'xy']
"""
return pd.Series(iterable).drop_duplicates().tolist()
def fmt_dt(dt, fmt='%Y-%m-%d'):
"""
Format date string
Args:
dt: any date format
fmt: output date format
Returns:
str: date format
Examples:
>>> fmt_dt(dt='2018-12')
'2018-12-01'
>>> fmt_dt(dt='2018-12-31', fmt='%Y%m%d')
'20181231'
"""
return pd.Timestamp(dt).strftime(fmt)
def trade_day(dt, cal='US'):
"""
Latest trading day w.r.t given dt
Args:
dt: date of reference
cal: trading calendar
Returns:
pd.Timestamp: last trading day
Examples:
>>> trade_day('2018-12-25').strftime('%Y-%m-%d')
'2018-12-24'
"""
from xone import calendar
dt = pd.Timestamp(dt).date()
return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]
def align_data(*args):
"""
Resample and aligh data for defined frequency
Args:
*args: DataFrame of data to be aligned
Returns:
pd.DataFrame: aligned data with renamed columns
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> d1
price volume
2018-09-10 10:10:00+10:00 31.08 10166
2018-09-10 10:11:00+10:00 31.10 69981
2018-09-10 10:12:00+10:00 31.11 14343
2018-09-10 10:13:00+10:00 31.07 10096
2018-09-10 10:14:00+10:00 31.04 11506
2018-09-10 10:15:00+10:00 31.04 9718
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> d2
price volume
2018-09-10 10:10:00+10:00 70.81 4749
2018-09-10 10:11:00+10:00 70.78 6762
2018-09-10 10:12:00+10:00 70.85 4908
2018-09-10 10:13:00+10:00 70.79 2002
2018-09-10 10:14:00+10:00 70.79 9170
2018-09-10 10:15:00+10:00 70.79 9791
>>> align_data(d1, d2)
price_1 volume_1 price_2 volume_2
2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749
2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762
2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908
2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002
2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170
2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791
"""
res = pd.DataFrame(pd.concat([
d.loc[~d.index.duplicated(keep='first')].rename(
columns=lambda vv: '%s_%d' % (vv, i + 1)
) for i, d in enumerate(args)
], axis=1))
data_cols = [col for col in res.columns if col[-2:] == '_1']
other_cols = [col for col in res.columns if col[-2:] != '_1']
res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='pad')
return res.dropna(subset=data_cols)
def cat_data(data_kw):
"""
Concatenate data with ticker as sub column index
Args:
data_kw: key = ticker, value = pd.DataFrame
Returns:
pd.DataFrame
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2})
>>> sample.columns
MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['ticker', None])
>>> r = sample.transpose().iloc[:, :2]
>>> r.index.names = (None, None)
>>> r
2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00
BHP AU price 31.08 31.10
volume 10,166.00 69,981.00
RIO AU price 70.81 70.78
volume 4,749.00 6,762.00
"""
if len(data_kw) == 0: return pd.DataFrame()
return pd.DataFrame(pd.concat([
data.assign(ticker=ticker).set_index('ticker', append=True)
.unstack('ticker').swaplevel(0, 1, axis=1)
for ticker, data in data_kw.items()
], axis=1))
def flatten(iterable, maps=None, unique=False):
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x
def _to_gen_(iterable):
"""
Recursively iterate lists and tuples
"""
from collections import Iterable
for elm in iterable:
if isinstance(elm, Iterable) and not isinstance(elm, (str, bytes)):
yield from flatten(elm)
else: yield elm
def to_frame(data_list, exc_cols=None, **kwargs):
"""
Dict in Python 3.6 keeps insertion order, but cannot be relied upon
This method is to keep column names in order
In Python 3.7 this method is redundant
Args:
data_list: list of dict
exc_cols: exclude columns
Returns:
pd.DataFrame
Example:
>>> d_list = [
... dict(sid=1, symbol='1 HK', price=89),
... dict(sid=700, symbol='700 HK', price=350)
... ]
>>> to_frame(d_list)
sid symbol price
0 1 1 HK 89
1 700 700 HK 350
>>> to_frame(d_list, exc_cols=['price'])
sid symbol
0 1 1 HK
1 700 700 HK
"""
from collections import OrderedDict
return pd.DataFrame(
pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs
).drop(columns=[] if exc_cols is None else exc_cols)
def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs):
"""
Fit spline curve for given x, y values
Args:
x: x-values
y: y-values
step: step size for interpolation
val_min: minimum value of result
val_max: maximum value of result
kind: for scipy.interpolate.interp1d
Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’,
‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’
refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and
‘next’ simply return the previous or next value of the point) or as an integer specifying
the order of the spline interpolator to use. Default is ‘linear’.
**kwargs: additional parameters for interp1d
Returns:
pd.Series: fitted curve
Examples:
>>> x = pd.Series([1, 2, 3])
>>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)])
>>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate')
>>> r.round(2).index.tolist()
[1.0, 1.5, 2.0, 2.5, 3.0]
>>> r.round(2).tolist()
[3.0, 4.05, 7.39, 12.73, 18.0]
>>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4]))
>>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate')
>>> r_df.round(2)
a b
1.00 3.00 3.00
1.50 4.05 3.00
2.00 7.39 3.00
2.50 12.73 3.50
3.00 20.09 4.00
"""
from scipy.interpolate import interp1d
from collections import OrderedDict
if isinstance(y, pd.DataFrame):
return pd.DataFrame(OrderedDict([(col, spline_curve(
x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind
)) for col in y.columns]))
fitted_curve = interp1d(x, y, kind=kind, **kwargs)
new_x = np.arange(x.min(), x.max() + step / 2., step=step)
return pd.Series(
new_x, index=new_x, name=y.name if hasattr(y, 'name') else None
).apply(fitted_curve).clip(val_min, val_max)
def func_scope(func):
"""
Function scope name
Args:
func: python function
Returns:
str: module_name.func_name
Examples:
>>> func_scope(flatten)
'xone.utils.flatten'
>>> func_scope(json.dump)
'json.dump'
"""
cur_mod = sys.modules[func.__module__]
return f'{cur_mod.__name__}.{func.__name__}'
def format_float(digit=0, is_pct=False):
"""
Number display format for pandas
Args:
digit: number of digits to keep
if negative, add one space in front of positive pct
is_pct: % display
Returns:
lambda function to format floats
Examples:
>>> format_float(0)(1e5)
'100,000'
>>> format_float(1)(1e5)
'100,000.0'
>>> format_float(-1, True)(.2)
' 20.0%'
>>> format_float(-1, True)(-.2)
'-20.0%'
>>> pd.options.display.float_format = format_float(2)
"""
if is_pct:
space = ' ' if digit < 0 else ''
fmt = f'{{:{space}.{abs(int(digit))}%}}'
return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv)
else:
return lambda vv: 'NaN' if np.isnan(vv) else (
f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit)
)
class FString(object):
def __init__(self, str_fmt):
self.str_fmt = str_fmt
def __str__(self):
kwargs = inspect.currentframe().f_back.f_globals.copy()
kwargs.update(inspect.currentframe().f_back.f_locals)
return self.str_fmt.format(**kwargs)
def fstr(fmt, **kwargs):
"""
Delayed evaluation of f-strings
Args:
fmt: f-string but in terms of normal string, i.e., '{path}/{file}.parq'
**kwargs: variables for f-strings, i.e., path, file = '/data', 'daily'
Returns:
FString object
References:
https://stackoverflow.com/a/42497694/1332656
https://stackoverflow.com/a/4014070/1332656
Examples:
>>> fmt = '{data_path}/{data_file}.parq'
>>> fstr(fmt, data_path='your/data/path', data_file='sample')
'your/data/path/sample.parq'
"""
locals().update(kwargs)
return f'{FString(str_fmt=fmt)}'
def to_str(data: dict, fmt='{key}={value}', sep=', ', public_only=True):
"""
Convert dict to string
Args:
data: dict
fmt: how key and value being represented
sep: how pairs of key and value are seperated
public_only: if display public members only
Returns:
str: string representation of dict
Examples:
>>> test_dict = dict(b=1, a=0, c=2, _d=3)
>>> to_str(test_dict)
'{b=1, a=0, c=2}'
>>> to_str(test_dict, sep='|')
'{b=1|a=0|c=2}'
>>> to_str(test_dict, public_only=False)
'{b=1, a=0, c=2, _d=3}'
"""
if public_only: keys = list(filter(lambda vv: vv[0] != '_', data.keys()))
else: keys = list(data.keys())
return '{' + sep.join([
to_str(data=v, fmt=fmt, sep=sep)
if isinstance(v, dict) else fstr(fmt=fmt, key=k, value=v)
for k, v in data.items() if k in keys
]) + '}'
def inst_repr(instance, fmt='str', public_only=True):
"""
Generate class instance signature from its __dict__
From python 3.6 dict is ordered and order of attributes will be preserved automatically
Args:
instance: class instance
fmt: ['json', 'str']
public_only: if display public members only
Returns:
str: string or json representation of instance
Examples:
>>> inst_repr(1)
''
>>> class SampleClass(object):
... def __init__(self):
... self.b = 3
... self.a = 4
... self._private_ = 'hidden'
>>>
>>> s = SampleClass()
>>> inst_repr(s)
'{b=3, a=4}'
>>> inst_repr(s, public_only=False)
'{b=3, a=4, _private_=hidden}'
>>> json.loads(inst_repr(s, fmt='json'))
{'b': 3, 'a': 4}
>>> inst_repr(s, fmt='unknown')
''
"""
if not hasattr(instance, '__dict__'): return ''
if public_only: inst_dict = {k: v for k, v in instance.__dict__.items() if k[0] != '_'}
else: inst_dict = instance.__dict__
if fmt == 'json': return json.dumps(inst_dict, indent=2)
elif fmt == 'str': return to_str(inst_dict, public_only=public_only)
return ''
def load_module(full_path):
"""
Load module from full path
Args:
full_path: module full path name
Returns:
python module
References:
https://stackoverflow.com/a/67692/1332656
Examples:
>>> import os
>>>
>>> cur_file = os.path.abspath(__file__).replace('\\\\', '/')
>>> cur_path = '/'.join(cur_file.split('/')[:-1])
>>> load_module(f'{cur_path}/files.py').__name__
'files'
>>> load_module(f'{cur_path}/files.pyc')
Traceback (most recent call last):
ImportError: not a python file: files.pyc
"""
from importlib import util
file_name = full_path.replace('\\', '/').split('/')[-1]
if file_name[-3:] != '.py':
raise ImportError(f'not a python file: {file_name}')
module_name = file_name[:-3]
spec = util.spec_from_file_location(name=module_name, location=full_path)
module = util.module_from_spec(spec=spec)
spec.loader.exec_module(module=module)
return module
class AttributeDict(dict):
"""
Dot access support for dict attributes
References:
https://stackoverflow.com/a/5021467/1332656
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
|
alpha-xone/xone | xone/utils.py | align_data | python | def align_data(*args):
res = pd.DataFrame(pd.concat([
d.loc[~d.index.duplicated(keep='first')].rename(
columns=lambda vv: '%s_%d' % (vv, i + 1)
) for i, d in enumerate(args)
], axis=1))
data_cols = [col for col in res.columns if col[-2:] == '_1']
other_cols = [col for col in res.columns if col[-2:] != '_1']
res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='pad')
return res.dropna(subset=data_cols) | Resample and aligh data for defined frequency
Args:
*args: DataFrame of data to be aligned
Returns:
pd.DataFrame: aligned data with renamed columns
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> d1
price volume
2018-09-10 10:10:00+10:00 31.08 10166
2018-09-10 10:11:00+10:00 31.10 69981
2018-09-10 10:12:00+10:00 31.11 14343
2018-09-10 10:13:00+10:00 31.07 10096
2018-09-10 10:14:00+10:00 31.04 11506
2018-09-10 10:15:00+10:00 31.04 9718
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> d2
price volume
2018-09-10 10:10:00+10:00 70.81 4749
2018-09-10 10:11:00+10:00 70.78 6762
2018-09-10 10:12:00+10:00 70.85 4908
2018-09-10 10:13:00+10:00 70.79 2002
2018-09-10 10:14:00+10:00 70.79 9170
2018-09-10 10:15:00+10:00 70.79 9791
>>> align_data(d1, d2)
price_1 volume_1 price_2 volume_2
2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749
2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762
2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908
2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002
2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170
2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791 | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L114-L167 | null | import numpy as np
import pandas as pd
import json
import time
import pytz
import inspect
import sys
DEFAULT_TZ = pytz.FixedOffset(-time.timezone / 60)
def tolist(iterable):
"""
Simpler implementation of flatten method
Args:
iterable: any array or value
Returns:
list: list of unique values
Examples:
>>> tolist('xyz')
['xyz']
>>> tolist(['ab', 'cd', 'xy', 'ab'])
['ab', 'cd', 'xy']
"""
return pd.Series(iterable).drop_duplicates().tolist()
def fmt_dt(dt, fmt='%Y-%m-%d'):
"""
Format date string
Args:
dt: any date format
fmt: output date format
Returns:
str: date format
Examples:
>>> fmt_dt(dt='2018-12')
'2018-12-01'
>>> fmt_dt(dt='2018-12-31', fmt='%Y%m%d')
'20181231'
"""
return pd.Timestamp(dt).strftime(fmt)
def trade_day(dt, cal='US'):
"""
Latest trading day w.r.t given dt
Args:
dt: date of reference
cal: trading calendar
Returns:
pd.Timestamp: last trading day
Examples:
>>> trade_day('2018-12-25').strftime('%Y-%m-%d')
'2018-12-24'
"""
from xone import calendar
dt = pd.Timestamp(dt).date()
return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]
def cur_time(typ='date', tz=DEFAULT_TZ, trading=True, cal='US'):
"""
Current time
Args:
typ: one of ['date', 'time', 'time_path', 'raw', '']
tz: timezone
trading: check if current date is trading day
cal: trading calendar
Returns:
relevant current time or date
Examples:
>>> cur_dt = pd.Timestamp('now')
>>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d')
True
>>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S')
True
>>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')
True
>>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)
True
>>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp)
True
>>> cur_time(typ='', trading=False) == cur_dt.date()
True
"""
dt = pd.Timestamp('now', tz=tz)
if typ == 'date':
if trading: return trade_day(dt=dt, cal=cal).strftime('%Y-%m-%d')
else: return dt.strftime('%Y-%m-%d')
if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S')
if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S')
if typ == 'raw': return dt
return trade_day(dt).date() if trading else dt.date()
def cat_data(data_kw):
"""
Concatenate data with ticker as sub column index
Args:
data_kw: key = ticker, value = pd.DataFrame
Returns:
pd.DataFrame
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2})
>>> sample.columns
MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['ticker', None])
>>> r = sample.transpose().iloc[:, :2]
>>> r.index.names = (None, None)
>>> r
2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00
BHP AU price 31.08 31.10
volume 10,166.00 69,981.00
RIO AU price 70.81 70.78
volume 4,749.00 6,762.00
"""
if len(data_kw) == 0: return pd.DataFrame()
return pd.DataFrame(pd.concat([
data.assign(ticker=ticker).set_index('ticker', append=True)
.unstack('ticker').swaplevel(0, 1, axis=1)
for ticker, data in data_kw.items()
], axis=1))
def flatten(iterable, maps=None, unique=False):
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x
def _to_gen_(iterable):
"""
Recursively iterate lists and tuples
"""
from collections import Iterable
for elm in iterable:
if isinstance(elm, Iterable) and not isinstance(elm, (str, bytes)):
yield from flatten(elm)
else: yield elm
def to_frame(data_list, exc_cols=None, **kwargs):
"""
Dict in Python 3.6 keeps insertion order, but cannot be relied upon
This method is to keep column names in order
In Python 3.7 this method is redundant
Args:
data_list: list of dict
exc_cols: exclude columns
Returns:
pd.DataFrame
Example:
>>> d_list = [
... dict(sid=1, symbol='1 HK', price=89),
... dict(sid=700, symbol='700 HK', price=350)
... ]
>>> to_frame(d_list)
sid symbol price
0 1 1 HK 89
1 700 700 HK 350
>>> to_frame(d_list, exc_cols=['price'])
sid symbol
0 1 1 HK
1 700 700 HK
"""
from collections import OrderedDict
return pd.DataFrame(
pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs
).drop(columns=[] if exc_cols is None else exc_cols)
def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs):
"""
Fit spline curve for given x, y values
Args:
x: x-values
y: y-values
step: step size for interpolation
val_min: minimum value of result
val_max: maximum value of result
kind: for scipy.interpolate.interp1d
Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’,
‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’
refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and
‘next’ simply return the previous or next value of the point) or as an integer specifying
the order of the spline interpolator to use. Default is ‘linear’.
**kwargs: additional parameters for interp1d
Returns:
pd.Series: fitted curve
Examples:
>>> x = pd.Series([1, 2, 3])
>>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)])
>>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate')
>>> r.round(2).index.tolist()
[1.0, 1.5, 2.0, 2.5, 3.0]
>>> r.round(2).tolist()
[3.0, 4.05, 7.39, 12.73, 18.0]
>>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4]))
>>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate')
>>> r_df.round(2)
a b
1.00 3.00 3.00
1.50 4.05 3.00
2.00 7.39 3.00
2.50 12.73 3.50
3.00 20.09 4.00
"""
from scipy.interpolate import interp1d
from collections import OrderedDict
if isinstance(y, pd.DataFrame):
return pd.DataFrame(OrderedDict([(col, spline_curve(
x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind
)) for col in y.columns]))
fitted_curve = interp1d(x, y, kind=kind, **kwargs)
new_x = np.arange(x.min(), x.max() + step / 2., step=step)
return pd.Series(
new_x, index=new_x, name=y.name if hasattr(y, 'name') else None
).apply(fitted_curve).clip(val_min, val_max)
def func_scope(func):
"""
Function scope name
Args:
func: python function
Returns:
str: module_name.func_name
Examples:
>>> func_scope(flatten)
'xone.utils.flatten'
>>> func_scope(json.dump)
'json.dump'
"""
cur_mod = sys.modules[func.__module__]
return f'{cur_mod.__name__}.{func.__name__}'
def format_float(digit=0, is_pct=False):
"""
Number display format for pandas
Args:
digit: number of digits to keep
if negative, add one space in front of positive pct
is_pct: % display
Returns:
lambda function to format floats
Examples:
>>> format_float(0)(1e5)
'100,000'
>>> format_float(1)(1e5)
'100,000.0'
>>> format_float(-1, True)(.2)
' 20.0%'
>>> format_float(-1, True)(-.2)
'-20.0%'
>>> pd.options.display.float_format = format_float(2)
"""
if is_pct:
space = ' ' if digit < 0 else ''
fmt = f'{{:{space}.{abs(int(digit))}%}}'
return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv)
else:
return lambda vv: 'NaN' if np.isnan(vv) else (
f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit)
)
class FString(object):
def __init__(self, str_fmt):
self.str_fmt = str_fmt
def __str__(self):
kwargs = inspect.currentframe().f_back.f_globals.copy()
kwargs.update(inspect.currentframe().f_back.f_locals)
return self.str_fmt.format(**kwargs)
def fstr(fmt, **kwargs):
"""
Delayed evaluation of f-strings
Args:
fmt: f-string but in terms of normal string, i.e., '{path}/{file}.parq'
**kwargs: variables for f-strings, i.e., path, file = '/data', 'daily'
Returns:
FString object
References:
https://stackoverflow.com/a/42497694/1332656
https://stackoverflow.com/a/4014070/1332656
Examples:
>>> fmt = '{data_path}/{data_file}.parq'
>>> fstr(fmt, data_path='your/data/path', data_file='sample')
'your/data/path/sample.parq'
"""
locals().update(kwargs)
return f'{FString(str_fmt=fmt)}'
def to_str(data: dict, fmt='{key}={value}', sep=', ', public_only=True):
"""
Convert dict to string
Args:
data: dict
fmt: how key and value being represented
sep: how pairs of key and value are seperated
public_only: if display public members only
Returns:
str: string representation of dict
Examples:
>>> test_dict = dict(b=1, a=0, c=2, _d=3)
>>> to_str(test_dict)
'{b=1, a=0, c=2}'
>>> to_str(test_dict, sep='|')
'{b=1|a=0|c=2}'
>>> to_str(test_dict, public_only=False)
'{b=1, a=0, c=2, _d=3}'
"""
if public_only: keys = list(filter(lambda vv: vv[0] != '_', data.keys()))
else: keys = list(data.keys())
return '{' + sep.join([
to_str(data=v, fmt=fmt, sep=sep)
if isinstance(v, dict) else fstr(fmt=fmt, key=k, value=v)
for k, v in data.items() if k in keys
]) + '}'
def inst_repr(instance, fmt='str', public_only=True):
"""
Generate class instance signature from its __dict__
From python 3.6 dict is ordered and order of attributes will be preserved automatically
Args:
instance: class instance
fmt: ['json', 'str']
public_only: if display public members only
Returns:
str: string or json representation of instance
Examples:
>>> inst_repr(1)
''
>>> class SampleClass(object):
... def __init__(self):
... self.b = 3
... self.a = 4
... self._private_ = 'hidden'
>>>
>>> s = SampleClass()
>>> inst_repr(s)
'{b=3, a=4}'
>>> inst_repr(s, public_only=False)
'{b=3, a=4, _private_=hidden}'
>>> json.loads(inst_repr(s, fmt='json'))
{'b': 3, 'a': 4}
>>> inst_repr(s, fmt='unknown')
''
"""
if not hasattr(instance, '__dict__'): return ''
if public_only: inst_dict = {k: v for k, v in instance.__dict__.items() if k[0] != '_'}
else: inst_dict = instance.__dict__
if fmt == 'json': return json.dumps(inst_dict, indent=2)
elif fmt == 'str': return to_str(inst_dict, public_only=public_only)
return ''
def load_module(full_path):
"""
Load module from full path
Args:
full_path: module full path name
Returns:
python module
References:
https://stackoverflow.com/a/67692/1332656
Examples:
>>> import os
>>>
>>> cur_file = os.path.abspath(__file__).replace('\\\\', '/')
>>> cur_path = '/'.join(cur_file.split('/')[:-1])
>>> load_module(f'{cur_path}/files.py').__name__
'files'
>>> load_module(f'{cur_path}/files.pyc')
Traceback (most recent call last):
ImportError: not a python file: files.pyc
"""
from importlib import util
file_name = full_path.replace('\\', '/').split('/')[-1]
if file_name[-3:] != '.py':
raise ImportError(f'not a python file: {file_name}')
module_name = file_name[:-3]
spec = util.spec_from_file_location(name=module_name, location=full_path)
module = util.module_from_spec(spec=spec)
spec.loader.exec_module(module=module)
return module
class AttributeDict(dict):
"""
Dot access support for dict attributes
References:
https://stackoverflow.com/a/5021467/1332656
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
|
alpha-xone/xone | xone/utils.py | cat_data | python | def cat_data(data_kw):
if len(data_kw) == 0: return pd.DataFrame()
return pd.DataFrame(pd.concat([
data.assign(ticker=ticker).set_index('ticker', append=True)
.unstack('ticker').swaplevel(0, 1, axis=1)
for ticker, data in data_kw.items()
], axis=1)) | Concatenate data with ticker as sub column index
Args:
data_kw: key = ticker, value = pd.DataFrame
Returns:
pd.DataFrame
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2})
>>> sample.columns
MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['ticker', None])
>>> r = sample.transpose().iloc[:, :2]
>>> r.index.names = (None, None)
>>> r
2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00
BHP AU price 31.08 31.10
volume 10,166.00 69,981.00
RIO AU price 70.81 70.78
volume 4,749.00 6,762.00 | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L170-L209 | null | import numpy as np
import pandas as pd
import json
import time
import pytz
import inspect
import sys
DEFAULT_TZ = pytz.FixedOffset(-time.timezone / 60)
def tolist(iterable):
"""
Simpler implementation of flatten method
Args:
iterable: any array or value
Returns:
list: list of unique values
Examples:
>>> tolist('xyz')
['xyz']
>>> tolist(['ab', 'cd', 'xy', 'ab'])
['ab', 'cd', 'xy']
"""
return pd.Series(iterable).drop_duplicates().tolist()
def fmt_dt(dt, fmt='%Y-%m-%d'):
"""
Format date string
Args:
dt: any date format
fmt: output date format
Returns:
str: date format
Examples:
>>> fmt_dt(dt='2018-12')
'2018-12-01'
>>> fmt_dt(dt='2018-12-31', fmt='%Y%m%d')
'20181231'
"""
return pd.Timestamp(dt).strftime(fmt)
def trade_day(dt, cal='US'):
"""
Latest trading day w.r.t given dt
Args:
dt: date of reference
cal: trading calendar
Returns:
pd.Timestamp: last trading day
Examples:
>>> trade_day('2018-12-25').strftime('%Y-%m-%d')
'2018-12-24'
"""
from xone import calendar
dt = pd.Timestamp(dt).date()
return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]
def cur_time(typ='date', tz=DEFAULT_TZ, trading=True, cal='US'):
"""
Current time
Args:
typ: one of ['date', 'time', 'time_path', 'raw', '']
tz: timezone
trading: check if current date is trading day
cal: trading calendar
Returns:
relevant current time or date
Examples:
>>> cur_dt = pd.Timestamp('now')
>>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d')
True
>>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S')
True
>>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')
True
>>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)
True
>>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp)
True
>>> cur_time(typ='', trading=False) == cur_dt.date()
True
"""
dt = pd.Timestamp('now', tz=tz)
if typ == 'date':
if trading: return trade_day(dt=dt, cal=cal).strftime('%Y-%m-%d')
else: return dt.strftime('%Y-%m-%d')
if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S')
if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S')
if typ == 'raw': return dt
return trade_day(dt).date() if trading else dt.date()
def align_data(*args):
"""
Resample and aligh data for defined frequency
Args:
*args: DataFrame of data to be aligned
Returns:
pd.DataFrame: aligned data with renamed columns
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> d1
price volume
2018-09-10 10:10:00+10:00 31.08 10166
2018-09-10 10:11:00+10:00 31.10 69981
2018-09-10 10:12:00+10:00 31.11 14343
2018-09-10 10:13:00+10:00 31.07 10096
2018-09-10 10:14:00+10:00 31.04 11506
2018-09-10 10:15:00+10:00 31.04 9718
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> d2
price volume
2018-09-10 10:10:00+10:00 70.81 4749
2018-09-10 10:11:00+10:00 70.78 6762
2018-09-10 10:12:00+10:00 70.85 4908
2018-09-10 10:13:00+10:00 70.79 2002
2018-09-10 10:14:00+10:00 70.79 9170
2018-09-10 10:15:00+10:00 70.79 9791
>>> align_data(d1, d2)
price_1 volume_1 price_2 volume_2
2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749
2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762
2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908
2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002
2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170
2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791
"""
res = pd.DataFrame(pd.concat([
d.loc[~d.index.duplicated(keep='first')].rename(
columns=lambda vv: '%s_%d' % (vv, i + 1)
) for i, d in enumerate(args)
], axis=1))
data_cols = [col for col in res.columns if col[-2:] == '_1']
other_cols = [col for col in res.columns if col[-2:] != '_1']
res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='pad')
return res.dropna(subset=data_cols)
def flatten(iterable, maps=None, unique=False):
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x
def _to_gen_(iterable):
"""
Recursively iterate lists and tuples
"""
from collections import Iterable
for elm in iterable:
if isinstance(elm, Iterable) and not isinstance(elm, (str, bytes)):
yield from flatten(elm)
else: yield elm
def to_frame(data_list, exc_cols=None, **kwargs):
"""
Dict in Python 3.6 keeps insertion order, but cannot be relied upon
This method is to keep column names in order
In Python 3.7 this method is redundant
Args:
data_list: list of dict
exc_cols: exclude columns
Returns:
pd.DataFrame
Example:
>>> d_list = [
... dict(sid=1, symbol='1 HK', price=89),
... dict(sid=700, symbol='700 HK', price=350)
... ]
>>> to_frame(d_list)
sid symbol price
0 1 1 HK 89
1 700 700 HK 350
>>> to_frame(d_list, exc_cols=['price'])
sid symbol
0 1 1 HK
1 700 700 HK
"""
from collections import OrderedDict
return pd.DataFrame(
pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs
).drop(columns=[] if exc_cols is None else exc_cols)
def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs):
"""
Fit spline curve for given x, y values
Args:
x: x-values
y: y-values
step: step size for interpolation
val_min: minimum value of result
val_max: maximum value of result
kind: for scipy.interpolate.interp1d
Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’,
‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’
refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and
‘next’ simply return the previous or next value of the point) or as an integer specifying
the order of the spline interpolator to use. Default is ‘linear’.
**kwargs: additional parameters for interp1d
Returns:
pd.Series: fitted curve
Examples:
>>> x = pd.Series([1, 2, 3])
>>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)])
>>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate')
>>> r.round(2).index.tolist()
[1.0, 1.5, 2.0, 2.5, 3.0]
>>> r.round(2).tolist()
[3.0, 4.05, 7.39, 12.73, 18.0]
>>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4]))
>>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate')
>>> r_df.round(2)
a b
1.00 3.00 3.00
1.50 4.05 3.00
2.00 7.39 3.00
2.50 12.73 3.50
3.00 20.09 4.00
"""
from scipy.interpolate import interp1d
from collections import OrderedDict
if isinstance(y, pd.DataFrame):
return pd.DataFrame(OrderedDict([(col, spline_curve(
x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind
)) for col in y.columns]))
fitted_curve = interp1d(x, y, kind=kind, **kwargs)
new_x = np.arange(x.min(), x.max() + step / 2., step=step)
return pd.Series(
new_x, index=new_x, name=y.name if hasattr(y, 'name') else None
).apply(fitted_curve).clip(val_min, val_max)
def func_scope(func):
"""
Function scope name
Args:
func: python function
Returns:
str: module_name.func_name
Examples:
>>> func_scope(flatten)
'xone.utils.flatten'
>>> func_scope(json.dump)
'json.dump'
"""
cur_mod = sys.modules[func.__module__]
return f'{cur_mod.__name__}.{func.__name__}'
def format_float(digit=0, is_pct=False):
"""
Number display format for pandas
Args:
digit: number of digits to keep
if negative, add one space in front of positive pct
is_pct: % display
Returns:
lambda function to format floats
Examples:
>>> format_float(0)(1e5)
'100,000'
>>> format_float(1)(1e5)
'100,000.0'
>>> format_float(-1, True)(.2)
' 20.0%'
>>> format_float(-1, True)(-.2)
'-20.0%'
>>> pd.options.display.float_format = format_float(2)
"""
if is_pct:
space = ' ' if digit < 0 else ''
fmt = f'{{:{space}.{abs(int(digit))}%}}'
return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv)
else:
return lambda vv: 'NaN' if np.isnan(vv) else (
f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit)
)
class FString(object):
def __init__(self, str_fmt):
self.str_fmt = str_fmt
def __str__(self):
kwargs = inspect.currentframe().f_back.f_globals.copy()
kwargs.update(inspect.currentframe().f_back.f_locals)
return self.str_fmt.format(**kwargs)
def fstr(fmt, **kwargs):
"""
Delayed evaluation of f-strings
Args:
fmt: f-string but in terms of normal string, i.e., '{path}/{file}.parq'
**kwargs: variables for f-strings, i.e., path, file = '/data', 'daily'
Returns:
FString object
References:
https://stackoverflow.com/a/42497694/1332656
https://stackoverflow.com/a/4014070/1332656
Examples:
>>> fmt = '{data_path}/{data_file}.parq'
>>> fstr(fmt, data_path='your/data/path', data_file='sample')
'your/data/path/sample.parq'
"""
locals().update(kwargs)
return f'{FString(str_fmt=fmt)}'
def to_str(data: dict, fmt='{key}={value}', sep=', ', public_only=True):
"""
Convert dict to string
Args:
data: dict
fmt: how key and value being represented
sep: how pairs of key and value are seperated
public_only: if display public members only
Returns:
str: string representation of dict
Examples:
>>> test_dict = dict(b=1, a=0, c=2, _d=3)
>>> to_str(test_dict)
'{b=1, a=0, c=2}'
>>> to_str(test_dict, sep='|')
'{b=1|a=0|c=2}'
>>> to_str(test_dict, public_only=False)
'{b=1, a=0, c=2, _d=3}'
"""
if public_only: keys = list(filter(lambda vv: vv[0] != '_', data.keys()))
else: keys = list(data.keys())
return '{' + sep.join([
to_str(data=v, fmt=fmt, sep=sep)
if isinstance(v, dict) else fstr(fmt=fmt, key=k, value=v)
for k, v in data.items() if k in keys
]) + '}'
def inst_repr(instance, fmt='str', public_only=True):
"""
Generate class instance signature from its __dict__
From python 3.6 dict is ordered and order of attributes will be preserved automatically
Args:
instance: class instance
fmt: ['json', 'str']
public_only: if display public members only
Returns:
str: string or json representation of instance
Examples:
>>> inst_repr(1)
''
>>> class SampleClass(object):
... def __init__(self):
... self.b = 3
... self.a = 4
... self._private_ = 'hidden'
>>>
>>> s = SampleClass()
>>> inst_repr(s)
'{b=3, a=4}'
>>> inst_repr(s, public_only=False)
'{b=3, a=4, _private_=hidden}'
>>> json.loads(inst_repr(s, fmt='json'))
{'b': 3, 'a': 4}
>>> inst_repr(s, fmt='unknown')
''
"""
if not hasattr(instance, '__dict__'): return ''
if public_only: inst_dict = {k: v for k, v in instance.__dict__.items() if k[0] != '_'}
else: inst_dict = instance.__dict__
if fmt == 'json': return json.dumps(inst_dict, indent=2)
elif fmt == 'str': return to_str(inst_dict, public_only=public_only)
return ''
def load_module(full_path):
"""
Load module from full path
Args:
full_path: module full path name
Returns:
python module
References:
https://stackoverflow.com/a/67692/1332656
Examples:
>>> import os
>>>
>>> cur_file = os.path.abspath(__file__).replace('\\\\', '/')
>>> cur_path = '/'.join(cur_file.split('/')[:-1])
>>> load_module(f'{cur_path}/files.py').__name__
'files'
>>> load_module(f'{cur_path}/files.pyc')
Traceback (most recent call last):
ImportError: not a python file: files.pyc
"""
from importlib import util
file_name = full_path.replace('\\', '/').split('/')[-1]
if file_name[-3:] != '.py':
raise ImportError(f'not a python file: {file_name}')
module_name = file_name[:-3]
spec = util.spec_from_file_location(name=module_name, location=full_path)
module = util.module_from_spec(spec=spec)
spec.loader.exec_module(module=module)
return module
class AttributeDict(dict):
"""
Dot access support for dict attributes
References:
https://stackoverflow.com/a/5021467/1332656
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
|
alpha-xone/xone | xone/utils.py | to_frame | python | def to_frame(data_list, exc_cols=None, **kwargs):
from collections import OrderedDict
return pd.DataFrame(
pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs
).drop(columns=[] if exc_cols is None else exc_cols) | Dict in Python 3.6 keeps insertion order, but cannot be relied upon
This method is to keep column names in order
In Python 3.7 this method is redundant
Args:
data_list: list of dict
exc_cols: exclude columns
Returns:
pd.DataFrame
Example:
>>> d_list = [
... dict(sid=1, symbol='1 HK', price=89),
... dict(sid=700, symbol='700 HK', price=350)
... ]
>>> to_frame(d_list)
sid symbol price
0 1 1 HK 89
1 700 700 HK 350
>>> to_frame(d_list, exc_cols=['price'])
sid symbol
0 1 1 HK
1 700 700 HK | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L262-L293 | null | import numpy as np
import pandas as pd
import json
import time
import pytz
import inspect
import sys
DEFAULT_TZ = pytz.FixedOffset(-time.timezone / 60)
def tolist(iterable):
"""
Simpler implementation of flatten method
Args:
iterable: any array or value
Returns:
list: list of unique values
Examples:
>>> tolist('xyz')
['xyz']
>>> tolist(['ab', 'cd', 'xy', 'ab'])
['ab', 'cd', 'xy']
"""
return pd.Series(iterable).drop_duplicates().tolist()
def fmt_dt(dt, fmt='%Y-%m-%d'):
"""
Format date string
Args:
dt: any date format
fmt: output date format
Returns:
str: date format
Examples:
>>> fmt_dt(dt='2018-12')
'2018-12-01'
>>> fmt_dt(dt='2018-12-31', fmt='%Y%m%d')
'20181231'
"""
return pd.Timestamp(dt).strftime(fmt)
def trade_day(dt, cal='US'):
"""
Latest trading day w.r.t given dt
Args:
dt: date of reference
cal: trading calendar
Returns:
pd.Timestamp: last trading day
Examples:
>>> trade_day('2018-12-25').strftime('%Y-%m-%d')
'2018-12-24'
"""
from xone import calendar
dt = pd.Timestamp(dt).date()
return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]
def cur_time(typ='date', tz=DEFAULT_TZ, trading=True, cal='US'):
"""
Current time
Args:
typ: one of ['date', 'time', 'time_path', 'raw', '']
tz: timezone
trading: check if current date is trading day
cal: trading calendar
Returns:
relevant current time or date
Examples:
>>> cur_dt = pd.Timestamp('now')
>>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d')
True
>>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S')
True
>>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')
True
>>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)
True
>>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp)
True
>>> cur_time(typ='', trading=False) == cur_dt.date()
True
"""
dt = pd.Timestamp('now', tz=tz)
if typ == 'date':
if trading: return trade_day(dt=dt, cal=cal).strftime('%Y-%m-%d')
else: return dt.strftime('%Y-%m-%d')
if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S')
if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S')
if typ == 'raw': return dt
return trade_day(dt).date() if trading else dt.date()
def align_data(*args):
"""
Resample and aligh data for defined frequency
Args:
*args: DataFrame of data to be aligned
Returns:
pd.DataFrame: aligned data with renamed columns
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> d1
price volume
2018-09-10 10:10:00+10:00 31.08 10166
2018-09-10 10:11:00+10:00 31.10 69981
2018-09-10 10:12:00+10:00 31.11 14343
2018-09-10 10:13:00+10:00 31.07 10096
2018-09-10 10:14:00+10:00 31.04 11506
2018-09-10 10:15:00+10:00 31.04 9718
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> d2
price volume
2018-09-10 10:10:00+10:00 70.81 4749
2018-09-10 10:11:00+10:00 70.78 6762
2018-09-10 10:12:00+10:00 70.85 4908
2018-09-10 10:13:00+10:00 70.79 2002
2018-09-10 10:14:00+10:00 70.79 9170
2018-09-10 10:15:00+10:00 70.79 9791
>>> align_data(d1, d2)
price_1 volume_1 price_2 volume_2
2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749
2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762
2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908
2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002
2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170
2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791
"""
res = pd.DataFrame(pd.concat([
d.loc[~d.index.duplicated(keep='first')].rename(
columns=lambda vv: '%s_%d' % (vv, i + 1)
) for i, d in enumerate(args)
], axis=1))
data_cols = [col for col in res.columns if col[-2:] == '_1']
other_cols = [col for col in res.columns if col[-2:] != '_1']
res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='pad')
return res.dropna(subset=data_cols)
def cat_data(data_kw):
"""
Concatenate data with ticker as sub column index
Args:
data_kw: key = ticker, value = pd.DataFrame
Returns:
pd.DataFrame
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2})
>>> sample.columns
MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['ticker', None])
>>> r = sample.transpose().iloc[:, :2]
>>> r.index.names = (None, None)
>>> r
2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00
BHP AU price 31.08 31.10
volume 10,166.00 69,981.00
RIO AU price 70.81 70.78
volume 4,749.00 6,762.00
"""
if len(data_kw) == 0: return pd.DataFrame()
return pd.DataFrame(pd.concat([
data.assign(ticker=ticker).set_index('ticker', append=True)
.unstack('ticker').swaplevel(0, 1, axis=1)
for ticker, data in data_kw.items()
], axis=1))
def flatten(iterable, maps=None, unique=False):
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x
def _to_gen_(iterable):
"""
Recursively iterate lists and tuples
"""
from collections import Iterable
for elm in iterable:
if isinstance(elm, Iterable) and not isinstance(elm, (str, bytes)):
yield from flatten(elm)
else: yield elm
def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs):
"""
Fit spline curve for given x, y values
Args:
x: x-values
y: y-values
step: step size for interpolation
val_min: minimum value of result
val_max: maximum value of result
kind: for scipy.interpolate.interp1d
Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’,
‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’
refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and
‘next’ simply return the previous or next value of the point) or as an integer specifying
the order of the spline interpolator to use. Default is ‘linear’.
**kwargs: additional parameters for interp1d
Returns:
pd.Series: fitted curve
Examples:
>>> x = pd.Series([1, 2, 3])
>>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)])
>>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate')
>>> r.round(2).index.tolist()
[1.0, 1.5, 2.0, 2.5, 3.0]
>>> r.round(2).tolist()
[3.0, 4.05, 7.39, 12.73, 18.0]
>>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4]))
>>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate')
>>> r_df.round(2)
a b
1.00 3.00 3.00
1.50 4.05 3.00
2.00 7.39 3.00
2.50 12.73 3.50
3.00 20.09 4.00
"""
from scipy.interpolate import interp1d
from collections import OrderedDict
if isinstance(y, pd.DataFrame):
return pd.DataFrame(OrderedDict([(col, spline_curve(
x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind
)) for col in y.columns]))
fitted_curve = interp1d(x, y, kind=kind, **kwargs)
new_x = np.arange(x.min(), x.max() + step / 2., step=step)
return pd.Series(
new_x, index=new_x, name=y.name if hasattr(y, 'name') else None
).apply(fitted_curve).clip(val_min, val_max)
def func_scope(func):
"""
Function scope name
Args:
func: python function
Returns:
str: module_name.func_name
Examples:
>>> func_scope(flatten)
'xone.utils.flatten'
>>> func_scope(json.dump)
'json.dump'
"""
cur_mod = sys.modules[func.__module__]
return f'{cur_mod.__name__}.{func.__name__}'
def format_float(digit=0, is_pct=False):
"""
Number display format for pandas
Args:
digit: number of digits to keep
if negative, add one space in front of positive pct
is_pct: % display
Returns:
lambda function to format floats
Examples:
>>> format_float(0)(1e5)
'100,000'
>>> format_float(1)(1e5)
'100,000.0'
>>> format_float(-1, True)(.2)
' 20.0%'
>>> format_float(-1, True)(-.2)
'-20.0%'
>>> pd.options.display.float_format = format_float(2)
"""
if is_pct:
space = ' ' if digit < 0 else ''
fmt = f'{{:{space}.{abs(int(digit))}%}}'
return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv)
else:
return lambda vv: 'NaN' if np.isnan(vv) else (
f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit)
)
class FString(object):
def __init__(self, str_fmt):
self.str_fmt = str_fmt
def __str__(self):
kwargs = inspect.currentframe().f_back.f_globals.copy()
kwargs.update(inspect.currentframe().f_back.f_locals)
return self.str_fmt.format(**kwargs)
def fstr(fmt, **kwargs):
"""
Delayed evaluation of f-strings
Args:
fmt: f-string but in terms of normal string, i.e., '{path}/{file}.parq'
**kwargs: variables for f-strings, i.e., path, file = '/data', 'daily'
Returns:
FString object
References:
https://stackoverflow.com/a/42497694/1332656
https://stackoverflow.com/a/4014070/1332656
Examples:
>>> fmt = '{data_path}/{data_file}.parq'
>>> fstr(fmt, data_path='your/data/path', data_file='sample')
'your/data/path/sample.parq'
"""
locals().update(kwargs)
return f'{FString(str_fmt=fmt)}'
def to_str(data: dict, fmt='{key}={value}', sep=', ', public_only=True):
"""
Convert dict to string
Args:
data: dict
fmt: how key and value being represented
sep: how pairs of key and value are seperated
public_only: if display public members only
Returns:
str: string representation of dict
Examples:
>>> test_dict = dict(b=1, a=0, c=2, _d=3)
>>> to_str(test_dict)
'{b=1, a=0, c=2}'
>>> to_str(test_dict, sep='|')
'{b=1|a=0|c=2}'
>>> to_str(test_dict, public_only=False)
'{b=1, a=0, c=2, _d=3}'
"""
if public_only: keys = list(filter(lambda vv: vv[0] != '_', data.keys()))
else: keys = list(data.keys())
return '{' + sep.join([
to_str(data=v, fmt=fmt, sep=sep)
if isinstance(v, dict) else fstr(fmt=fmt, key=k, value=v)
for k, v in data.items() if k in keys
]) + '}'
def inst_repr(instance, fmt='str', public_only=True):
"""
Generate class instance signature from its __dict__
From python 3.6 dict is ordered and order of attributes will be preserved automatically
Args:
instance: class instance
fmt: ['json', 'str']
public_only: if display public members only
Returns:
str: string or json representation of instance
Examples:
>>> inst_repr(1)
''
>>> class SampleClass(object):
... def __init__(self):
... self.b = 3
... self.a = 4
... self._private_ = 'hidden'
>>>
>>> s = SampleClass()
>>> inst_repr(s)
'{b=3, a=4}'
>>> inst_repr(s, public_only=False)
'{b=3, a=4, _private_=hidden}'
>>> json.loads(inst_repr(s, fmt='json'))
{'b': 3, 'a': 4}
>>> inst_repr(s, fmt='unknown')
''
"""
if not hasattr(instance, '__dict__'): return ''
if public_only: inst_dict = {k: v for k, v in instance.__dict__.items() if k[0] != '_'}
else: inst_dict = instance.__dict__
if fmt == 'json': return json.dumps(inst_dict, indent=2)
elif fmt == 'str': return to_str(inst_dict, public_only=public_only)
return ''
def load_module(full_path):
"""
Load module from full path
Args:
full_path: module full path name
Returns:
python module
References:
https://stackoverflow.com/a/67692/1332656
Examples:
>>> import os
>>>
>>> cur_file = os.path.abspath(__file__).replace('\\\\', '/')
>>> cur_path = '/'.join(cur_file.split('/')[:-1])
>>> load_module(f'{cur_path}/files.py').__name__
'files'
>>> load_module(f'{cur_path}/files.pyc')
Traceback (most recent call last):
ImportError: not a python file: files.pyc
"""
from importlib import util
file_name = full_path.replace('\\', '/').split('/')[-1]
if file_name[-3:] != '.py':
raise ImportError(f'not a python file: {file_name}')
module_name = file_name[:-3]
spec = util.spec_from_file_location(name=module_name, location=full_path)
module = util.module_from_spec(spec=spec)
spec.loader.exec_module(module=module)
return module
class AttributeDict(dict):
"""
Dot access support for dict attributes
References:
https://stackoverflow.com/a/5021467/1332656
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
|
alpha-xone/xone | xone/utils.py | spline_curve | python | def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs):
from scipy.interpolate import interp1d
from collections import OrderedDict
if isinstance(y, pd.DataFrame):
return pd.DataFrame(OrderedDict([(col, spline_curve(
x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind
)) for col in y.columns]))
fitted_curve = interp1d(x, y, kind=kind, **kwargs)
new_x = np.arange(x.min(), x.max() + step / 2., step=step)
return pd.Series(
new_x, index=new_x, name=y.name if hasattr(y, 'name') else None
).apply(fitted_curve).clip(val_min, val_max) | Fit spline curve for given x, y values
Args:
x: x-values
y: y-values
step: step size for interpolation
val_min: minimum value of result
val_max: maximum value of result
kind: for scipy.interpolate.interp1d
Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’,
‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’
refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and
‘next’ simply return the previous or next value of the point) or as an integer specifying
the order of the spline interpolator to use. Default is ‘linear’.
**kwargs: additional parameters for interp1d
Returns:
pd.Series: fitted curve
Examples:
>>> x = pd.Series([1, 2, 3])
>>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)])
>>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate')
>>> r.round(2).index.tolist()
[1.0, 1.5, 2.0, 2.5, 3.0]
>>> r.round(2).tolist()
[3.0, 4.05, 7.39, 12.73, 18.0]
>>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4]))
>>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate')
>>> r_df.round(2)
a b
1.00 3.00 3.00
1.50 4.05 3.00
2.00 7.39 3.00
2.50 12.73 3.50
3.00 20.09 4.00 | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L296-L346 | null | import numpy as np
import pandas as pd
import json
import time
import pytz
import inspect
import sys
DEFAULT_TZ = pytz.FixedOffset(-time.timezone / 60)
def tolist(iterable):
"""
Simpler implementation of flatten method
Args:
iterable: any array or value
Returns:
list: list of unique values
Examples:
>>> tolist('xyz')
['xyz']
>>> tolist(['ab', 'cd', 'xy', 'ab'])
['ab', 'cd', 'xy']
"""
return pd.Series(iterable).drop_duplicates().tolist()
def fmt_dt(dt, fmt='%Y-%m-%d'):
"""
Format date string
Args:
dt: any date format
fmt: output date format
Returns:
str: date format
Examples:
>>> fmt_dt(dt='2018-12')
'2018-12-01'
>>> fmt_dt(dt='2018-12-31', fmt='%Y%m%d')
'20181231'
"""
return pd.Timestamp(dt).strftime(fmt)
def trade_day(dt, cal='US'):
"""
Latest trading day w.r.t given dt
Args:
dt: date of reference
cal: trading calendar
Returns:
pd.Timestamp: last trading day
Examples:
>>> trade_day('2018-12-25').strftime('%Y-%m-%d')
'2018-12-24'
"""
from xone import calendar
dt = pd.Timestamp(dt).date()
return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]
def cur_time(typ='date', tz=DEFAULT_TZ, trading=True, cal='US'):
"""
Current time
Args:
typ: one of ['date', 'time', 'time_path', 'raw', '']
tz: timezone
trading: check if current date is trading day
cal: trading calendar
Returns:
relevant current time or date
Examples:
>>> cur_dt = pd.Timestamp('now')
>>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d')
True
>>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S')
True
>>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')
True
>>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)
True
>>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp)
True
>>> cur_time(typ='', trading=False) == cur_dt.date()
True
"""
dt = pd.Timestamp('now', tz=tz)
if typ == 'date':
if trading: return trade_day(dt=dt, cal=cal).strftime('%Y-%m-%d')
else: return dt.strftime('%Y-%m-%d')
if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S')
if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S')
if typ == 'raw': return dt
return trade_day(dt).date() if trading else dt.date()
def align_data(*args):
"""
Resample and aligh data for defined frequency
Args:
*args: DataFrame of data to be aligned
Returns:
pd.DataFrame: aligned data with renamed columns
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> d1
price volume
2018-09-10 10:10:00+10:00 31.08 10166
2018-09-10 10:11:00+10:00 31.10 69981
2018-09-10 10:12:00+10:00 31.11 14343
2018-09-10 10:13:00+10:00 31.07 10096
2018-09-10 10:14:00+10:00 31.04 11506
2018-09-10 10:15:00+10:00 31.04 9718
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> d2
price volume
2018-09-10 10:10:00+10:00 70.81 4749
2018-09-10 10:11:00+10:00 70.78 6762
2018-09-10 10:12:00+10:00 70.85 4908
2018-09-10 10:13:00+10:00 70.79 2002
2018-09-10 10:14:00+10:00 70.79 9170
2018-09-10 10:15:00+10:00 70.79 9791
>>> align_data(d1, d2)
price_1 volume_1 price_2 volume_2
2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749
2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762
2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908
2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002
2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170
2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791
"""
res = pd.DataFrame(pd.concat([
d.loc[~d.index.duplicated(keep='first')].rename(
columns=lambda vv: '%s_%d' % (vv, i + 1)
) for i, d in enumerate(args)
], axis=1))
data_cols = [col for col in res.columns if col[-2:] == '_1']
other_cols = [col for col in res.columns if col[-2:] != '_1']
res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='pad')
return res.dropna(subset=data_cols)
def cat_data(data_kw):
"""
Concatenate data with ticker as sub column index
Args:
data_kw: key = ticker, value = pd.DataFrame
Returns:
pd.DataFrame
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2})
>>> sample.columns
MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['ticker', None])
>>> r = sample.transpose().iloc[:, :2]
>>> r.index.names = (None, None)
>>> r
2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00
BHP AU price 31.08 31.10
volume 10,166.00 69,981.00
RIO AU price 70.81 70.78
volume 4,749.00 6,762.00
"""
if len(data_kw) == 0: return pd.DataFrame()
return pd.DataFrame(pd.concat([
data.assign(ticker=ticker).set_index('ticker', append=True)
.unstack('ticker').swaplevel(0, 1, axis=1)
for ticker, data in data_kw.items()
], axis=1))
def flatten(iterable, maps=None, unique=False):
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x
def _to_gen_(iterable):
"""
Recursively iterate lists and tuples
"""
from collections import Iterable
for elm in iterable:
if isinstance(elm, Iterable) and not isinstance(elm, (str, bytes)):
yield from flatten(elm)
else: yield elm
def to_frame(data_list, exc_cols=None, **kwargs):
"""
Dict in Python 3.6 keeps insertion order, but cannot be relied upon
This method is to keep column names in order
In Python 3.7 this method is redundant
Args:
data_list: list of dict
exc_cols: exclude columns
Returns:
pd.DataFrame
Example:
>>> d_list = [
... dict(sid=1, symbol='1 HK', price=89),
... dict(sid=700, symbol='700 HK', price=350)
... ]
>>> to_frame(d_list)
sid symbol price
0 1 1 HK 89
1 700 700 HK 350
>>> to_frame(d_list, exc_cols=['price'])
sid symbol
0 1 1 HK
1 700 700 HK
"""
from collections import OrderedDict
return pd.DataFrame(
pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs
).drop(columns=[] if exc_cols is None else exc_cols)
def func_scope(func):
"""
Function scope name
Args:
func: python function
Returns:
str: module_name.func_name
Examples:
>>> func_scope(flatten)
'xone.utils.flatten'
>>> func_scope(json.dump)
'json.dump'
"""
cur_mod = sys.modules[func.__module__]
return f'{cur_mod.__name__}.{func.__name__}'
def format_float(digit=0, is_pct=False):
"""
Number display format for pandas
Args:
digit: number of digits to keep
if negative, add one space in front of positive pct
is_pct: % display
Returns:
lambda function to format floats
Examples:
>>> format_float(0)(1e5)
'100,000'
>>> format_float(1)(1e5)
'100,000.0'
>>> format_float(-1, True)(.2)
' 20.0%'
>>> format_float(-1, True)(-.2)
'-20.0%'
>>> pd.options.display.float_format = format_float(2)
"""
if is_pct:
space = ' ' if digit < 0 else ''
fmt = f'{{:{space}.{abs(int(digit))}%}}'
return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv)
else:
return lambda vv: 'NaN' if np.isnan(vv) else (
f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit)
)
class FString(object):
def __init__(self, str_fmt):
self.str_fmt = str_fmt
def __str__(self):
kwargs = inspect.currentframe().f_back.f_globals.copy()
kwargs.update(inspect.currentframe().f_back.f_locals)
return self.str_fmt.format(**kwargs)
def fstr(fmt, **kwargs):
"""
Delayed evaluation of f-strings
Args:
fmt: f-string but in terms of normal string, i.e., '{path}/{file}.parq'
**kwargs: variables for f-strings, i.e., path, file = '/data', 'daily'
Returns:
FString object
References:
https://stackoverflow.com/a/42497694/1332656
https://stackoverflow.com/a/4014070/1332656
Examples:
>>> fmt = '{data_path}/{data_file}.parq'
>>> fstr(fmt, data_path='your/data/path', data_file='sample')
'your/data/path/sample.parq'
"""
locals().update(kwargs)
return f'{FString(str_fmt=fmt)}'
def to_str(data: dict, fmt='{key}={value}', sep=', ', public_only=True):
"""
Convert dict to string
Args:
data: dict
fmt: how key and value being represented
sep: how pairs of key and value are seperated
public_only: if display public members only
Returns:
str: string representation of dict
Examples:
>>> test_dict = dict(b=1, a=0, c=2, _d=3)
>>> to_str(test_dict)
'{b=1, a=0, c=2}'
>>> to_str(test_dict, sep='|')
'{b=1|a=0|c=2}'
>>> to_str(test_dict, public_only=False)
'{b=1, a=0, c=2, _d=3}'
"""
if public_only: keys = list(filter(lambda vv: vv[0] != '_', data.keys()))
else: keys = list(data.keys())
return '{' + sep.join([
to_str(data=v, fmt=fmt, sep=sep)
if isinstance(v, dict) else fstr(fmt=fmt, key=k, value=v)
for k, v in data.items() if k in keys
]) + '}'
def inst_repr(instance, fmt='str', public_only=True):
"""
Generate class instance signature from its __dict__
From python 3.6 dict is ordered and order of attributes will be preserved automatically
Args:
instance: class instance
fmt: ['json', 'str']
public_only: if display public members only
Returns:
str: string or json representation of instance
Examples:
>>> inst_repr(1)
''
>>> class SampleClass(object):
... def __init__(self):
... self.b = 3
... self.a = 4
... self._private_ = 'hidden'
>>>
>>> s = SampleClass()
>>> inst_repr(s)
'{b=3, a=4}'
>>> inst_repr(s, public_only=False)
'{b=3, a=4, _private_=hidden}'
>>> json.loads(inst_repr(s, fmt='json'))
{'b': 3, 'a': 4}
>>> inst_repr(s, fmt='unknown')
''
"""
if not hasattr(instance, '__dict__'): return ''
if public_only: inst_dict = {k: v for k, v in instance.__dict__.items() if k[0] != '_'}
else: inst_dict = instance.__dict__
if fmt == 'json': return json.dumps(inst_dict, indent=2)
elif fmt == 'str': return to_str(inst_dict, public_only=public_only)
return ''
def load_module(full_path):
"""
Load module from full path
Args:
full_path: module full path name
Returns:
python module
References:
https://stackoverflow.com/a/67692/1332656
Examples:
>>> import os
>>>
>>> cur_file = os.path.abspath(__file__).replace('\\\\', '/')
>>> cur_path = '/'.join(cur_file.split('/')[:-1])
>>> load_module(f'{cur_path}/files.py').__name__
'files'
>>> load_module(f'{cur_path}/files.pyc')
Traceback (most recent call last):
ImportError: not a python file: files.pyc
"""
from importlib import util
file_name = full_path.replace('\\', '/').split('/')[-1]
if file_name[-3:] != '.py':
raise ImportError(f'not a python file: {file_name}')
module_name = file_name[:-3]
spec = util.spec_from_file_location(name=module_name, location=full_path)
module = util.module_from_spec(spec=spec)
spec.loader.exec_module(module=module)
return module
class AttributeDict(dict):
"""
Dot access support for dict attributes
References:
https://stackoverflow.com/a/5021467/1332656
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
|
alpha-xone/xone | xone/utils.py | format_float | python | def format_float(digit=0, is_pct=False):
if is_pct:
space = ' ' if digit < 0 else ''
fmt = f'{{:{space}.{abs(int(digit))}%}}'
return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv)
else:
return lambda vv: 'NaN' if np.isnan(vv) else (
f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit)
) | Number display format for pandas
Args:
digit: number of digits to keep
if negative, add one space in front of positive pct
is_pct: % display
Returns:
lambda function to format floats
Examples:
>>> format_float(0)(1e5)
'100,000'
>>> format_float(1)(1e5)
'100,000.0'
>>> format_float(-1, True)(.2)
' 20.0%'
>>> format_float(-1, True)(-.2)
'-20.0%'
>>> pd.options.display.float_format = format_float(2) | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L369-L400 | null | import numpy as np
import pandas as pd
import json
import time
import pytz
import inspect
import sys
DEFAULT_TZ = pytz.FixedOffset(-time.timezone / 60)
def tolist(iterable):
"""
Simpler implementation of flatten method
Args:
iterable: any array or value
Returns:
list: list of unique values
Examples:
>>> tolist('xyz')
['xyz']
>>> tolist(['ab', 'cd', 'xy', 'ab'])
['ab', 'cd', 'xy']
"""
return pd.Series(iterable).drop_duplicates().tolist()
def fmt_dt(dt, fmt='%Y-%m-%d'):
"""
Format date string
Args:
dt: any date format
fmt: output date format
Returns:
str: date format
Examples:
>>> fmt_dt(dt='2018-12')
'2018-12-01'
>>> fmt_dt(dt='2018-12-31', fmt='%Y%m%d')
'20181231'
"""
return pd.Timestamp(dt).strftime(fmt)
def trade_day(dt, cal='US'):
"""
Latest trading day w.r.t given dt
Args:
dt: date of reference
cal: trading calendar
Returns:
pd.Timestamp: last trading day
Examples:
>>> trade_day('2018-12-25').strftime('%Y-%m-%d')
'2018-12-24'
"""
from xone import calendar
dt = pd.Timestamp(dt).date()
return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]
def cur_time(typ='date', tz=DEFAULT_TZ, trading=True, cal='US'):
"""
Current time
Args:
typ: one of ['date', 'time', 'time_path', 'raw', '']
tz: timezone
trading: check if current date is trading day
cal: trading calendar
Returns:
relevant current time or date
Examples:
>>> cur_dt = pd.Timestamp('now')
>>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d')
True
>>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S')
True
>>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')
True
>>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)
True
>>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp)
True
>>> cur_time(typ='', trading=False) == cur_dt.date()
True
"""
dt = pd.Timestamp('now', tz=tz)
if typ == 'date':
if trading: return trade_day(dt=dt, cal=cal).strftime('%Y-%m-%d')
else: return dt.strftime('%Y-%m-%d')
if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S')
if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S')
if typ == 'raw': return dt
return trade_day(dt).date() if trading else dt.date()
def align_data(*args):
"""
Resample and aligh data for defined frequency
Args:
*args: DataFrame of data to be aligned
Returns:
pd.DataFrame: aligned data with renamed columns
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> d1
price volume
2018-09-10 10:10:00+10:00 31.08 10166
2018-09-10 10:11:00+10:00 31.10 69981
2018-09-10 10:12:00+10:00 31.11 14343
2018-09-10 10:13:00+10:00 31.07 10096
2018-09-10 10:14:00+10:00 31.04 11506
2018-09-10 10:15:00+10:00 31.04 9718
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> d2
price volume
2018-09-10 10:10:00+10:00 70.81 4749
2018-09-10 10:11:00+10:00 70.78 6762
2018-09-10 10:12:00+10:00 70.85 4908
2018-09-10 10:13:00+10:00 70.79 2002
2018-09-10 10:14:00+10:00 70.79 9170
2018-09-10 10:15:00+10:00 70.79 9791
>>> align_data(d1, d2)
price_1 volume_1 price_2 volume_2
2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749
2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762
2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908
2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002
2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170
2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791
"""
res = pd.DataFrame(pd.concat([
d.loc[~d.index.duplicated(keep='first')].rename(
columns=lambda vv: '%s_%d' % (vv, i + 1)
) for i, d in enumerate(args)
], axis=1))
data_cols = [col for col in res.columns if col[-2:] == '_1']
other_cols = [col for col in res.columns if col[-2:] != '_1']
res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='pad')
return res.dropna(subset=data_cols)
def cat_data(data_kw):
"""
Concatenate data with ticker as sub column index
Args:
data_kw: key = ticker, value = pd.DataFrame
Returns:
pd.DataFrame
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2})
>>> sample.columns
MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['ticker', None])
>>> r = sample.transpose().iloc[:, :2]
>>> r.index.names = (None, None)
>>> r
2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00
BHP AU price 31.08 31.10
volume 10,166.00 69,981.00
RIO AU price 70.81 70.78
volume 4,749.00 6,762.00
"""
if len(data_kw) == 0: return pd.DataFrame()
return pd.DataFrame(pd.concat([
data.assign(ticker=ticker).set_index('ticker', append=True)
.unstack('ticker').swaplevel(0, 1, axis=1)
for ticker, data in data_kw.items()
], axis=1))
def flatten(iterable, maps=None, unique=False):
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x
def _to_gen_(iterable):
"""
Recursively iterate lists and tuples
"""
from collections import Iterable
for elm in iterable:
if isinstance(elm, Iterable) and not isinstance(elm, (str, bytes)):
yield from flatten(elm)
else: yield elm
def to_frame(data_list, exc_cols=None, **kwargs):
"""
Dict in Python 3.6 keeps insertion order, but cannot be relied upon
This method is to keep column names in order
In Python 3.7 this method is redundant
Args:
data_list: list of dict
exc_cols: exclude columns
Returns:
pd.DataFrame
Example:
>>> d_list = [
... dict(sid=1, symbol='1 HK', price=89),
... dict(sid=700, symbol='700 HK', price=350)
... ]
>>> to_frame(d_list)
sid symbol price
0 1 1 HK 89
1 700 700 HK 350
>>> to_frame(d_list, exc_cols=['price'])
sid symbol
0 1 1 HK
1 700 700 HK
"""
from collections import OrderedDict
return pd.DataFrame(
pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs
).drop(columns=[] if exc_cols is None else exc_cols)
def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs):
"""
Fit spline curve for given x, y values
Args:
x: x-values
y: y-values
step: step size for interpolation
val_min: minimum value of result
val_max: maximum value of result
kind: for scipy.interpolate.interp1d
Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’,
‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’
refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and
‘next’ simply return the previous or next value of the point) or as an integer specifying
the order of the spline interpolator to use. Default is ‘linear’.
**kwargs: additional parameters for interp1d
Returns:
pd.Series: fitted curve
Examples:
>>> x = pd.Series([1, 2, 3])
>>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)])
>>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate')
>>> r.round(2).index.tolist()
[1.0, 1.5, 2.0, 2.5, 3.0]
>>> r.round(2).tolist()
[3.0, 4.05, 7.39, 12.73, 18.0]
>>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4]))
>>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate')
>>> r_df.round(2)
a b
1.00 3.00 3.00
1.50 4.05 3.00
2.00 7.39 3.00
2.50 12.73 3.50
3.00 20.09 4.00
"""
from scipy.interpolate import interp1d
from collections import OrderedDict
if isinstance(y, pd.DataFrame):
return pd.DataFrame(OrderedDict([(col, spline_curve(
x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind
)) for col in y.columns]))
fitted_curve = interp1d(x, y, kind=kind, **kwargs)
new_x = np.arange(x.min(), x.max() + step / 2., step=step)
return pd.Series(
new_x, index=new_x, name=y.name if hasattr(y, 'name') else None
).apply(fitted_curve).clip(val_min, val_max)
def func_scope(func):
"""
Function scope name
Args:
func: python function
Returns:
str: module_name.func_name
Examples:
>>> func_scope(flatten)
'xone.utils.flatten'
>>> func_scope(json.dump)
'json.dump'
"""
cur_mod = sys.modules[func.__module__]
return f'{cur_mod.__name__}.{func.__name__}'
class FString(object):
def __init__(self, str_fmt):
self.str_fmt = str_fmt
def __str__(self):
kwargs = inspect.currentframe().f_back.f_globals.copy()
kwargs.update(inspect.currentframe().f_back.f_locals)
return self.str_fmt.format(**kwargs)
def fstr(fmt, **kwargs):
"""
Delayed evaluation of f-strings
Args:
fmt: f-string but in terms of normal string, i.e., '{path}/{file}.parq'
**kwargs: variables for f-strings, i.e., path, file = '/data', 'daily'
Returns:
FString object
References:
https://stackoverflow.com/a/42497694/1332656
https://stackoverflow.com/a/4014070/1332656
Examples:
>>> fmt = '{data_path}/{data_file}.parq'
>>> fstr(fmt, data_path='your/data/path', data_file='sample')
'your/data/path/sample.parq'
"""
locals().update(kwargs)
return f'{FString(str_fmt=fmt)}'
def to_str(data: dict, fmt='{key}={value}', sep=', ', public_only=True):
"""
Convert dict to string
Args:
data: dict
fmt: how key and value being represented
sep: how pairs of key and value are seperated
public_only: if display public members only
Returns:
str: string representation of dict
Examples:
>>> test_dict = dict(b=1, a=0, c=2, _d=3)
>>> to_str(test_dict)
'{b=1, a=0, c=2}'
>>> to_str(test_dict, sep='|')
'{b=1|a=0|c=2}'
>>> to_str(test_dict, public_only=False)
'{b=1, a=0, c=2, _d=3}'
"""
if public_only: keys = list(filter(lambda vv: vv[0] != '_', data.keys()))
else: keys = list(data.keys())
return '{' + sep.join([
to_str(data=v, fmt=fmt, sep=sep)
if isinstance(v, dict) else fstr(fmt=fmt, key=k, value=v)
for k, v in data.items() if k in keys
]) + '}'
def inst_repr(instance, fmt='str', public_only=True):
"""
Generate class instance signature from its __dict__
From python 3.6 dict is ordered and order of attributes will be preserved automatically
Args:
instance: class instance
fmt: ['json', 'str']
public_only: if display public members only
Returns:
str: string or json representation of instance
Examples:
>>> inst_repr(1)
''
>>> class SampleClass(object):
... def __init__(self):
... self.b = 3
... self.a = 4
... self._private_ = 'hidden'
>>>
>>> s = SampleClass()
>>> inst_repr(s)
'{b=3, a=4}'
>>> inst_repr(s, public_only=False)
'{b=3, a=4, _private_=hidden}'
>>> json.loads(inst_repr(s, fmt='json'))
{'b': 3, 'a': 4}
>>> inst_repr(s, fmt='unknown')
''
"""
if not hasattr(instance, '__dict__'): return ''
if public_only: inst_dict = {k: v for k, v in instance.__dict__.items() if k[0] != '_'}
else: inst_dict = instance.__dict__
if fmt == 'json': return json.dumps(inst_dict, indent=2)
elif fmt == 'str': return to_str(inst_dict, public_only=public_only)
return ''
def load_module(full_path):
"""
Load module from full path
Args:
full_path: module full path name
Returns:
python module
References:
https://stackoverflow.com/a/67692/1332656
Examples:
>>> import os
>>>
>>> cur_file = os.path.abspath(__file__).replace('\\\\', '/')
>>> cur_path = '/'.join(cur_file.split('/')[:-1])
>>> load_module(f'{cur_path}/files.py').__name__
'files'
>>> load_module(f'{cur_path}/files.pyc')
Traceback (most recent call last):
ImportError: not a python file: files.pyc
"""
from importlib import util
file_name = full_path.replace('\\', '/').split('/')[-1]
if file_name[-3:] != '.py':
raise ImportError(f'not a python file: {file_name}')
module_name = file_name[:-3]
spec = util.spec_from_file_location(name=module_name, location=full_path)
module = util.module_from_spec(spec=spec)
spec.loader.exec_module(module=module)
return module
class AttributeDict(dict):
"""
Dot access support for dict attributes
References:
https://stackoverflow.com/a/5021467/1332656
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
|
alpha-xone/xone | xone/utils.py | inst_repr | python | def inst_repr(instance, fmt='str', public_only=True):
if not hasattr(instance, '__dict__'): return ''
if public_only: inst_dict = {k: v for k, v in instance.__dict__.items() if k[0] != '_'}
else: inst_dict = instance.__dict__
if fmt == 'json': return json.dumps(inst_dict, indent=2)
elif fmt == 'str': return to_str(inst_dict, public_only=public_only)
return '' | Generate class instance signature from its __dict__
From python 3.6 dict is ordered and order of attributes will be preserved automatically
Args:
instance: class instance
fmt: ['json', 'str']
public_only: if display public members only
Returns:
str: string or json representation of instance
Examples:
>>> inst_repr(1)
''
>>> class SampleClass(object):
... def __init__(self):
... self.b = 3
... self.a = 4
... self._private_ = 'hidden'
>>>
>>> s = SampleClass()
>>> inst_repr(s)
'{b=3, a=4}'
>>> inst_repr(s, public_only=False)
'{b=3, a=4, _private_=hidden}'
>>> json.loads(inst_repr(s, fmt='json'))
{'b': 3, 'a': 4}
>>> inst_repr(s, fmt='unknown')
'' | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L469-L509 | [
"def to_str(data: dict, fmt='{key}={value}', sep=', ', public_only=True):\n \"\"\"\n Convert dict to string\n\n Args:\n data: dict\n fmt: how key and value being represented\n sep: how pairs of key and value are seperated\n public_only: if display public members only\n\n Returns:\n str: string representation of dict\n\n Examples:\n >>> test_dict = dict(b=1, a=0, c=2, _d=3)\n >>> to_str(test_dict)\n '{b=1, a=0, c=2}'\n >>> to_str(test_dict, sep='|')\n '{b=1|a=0|c=2}'\n >>> to_str(test_dict, public_only=False)\n '{b=1, a=0, c=2, _d=3}'\n \"\"\"\n if public_only: keys = list(filter(lambda vv: vv[0] != '_', data.keys()))\n else: keys = list(data.keys())\n return '{' + sep.join([\n to_str(data=v, fmt=fmt, sep=sep)\n if isinstance(v, dict) else fstr(fmt=fmt, key=k, value=v)\n for k, v in data.items() if k in keys\n ]) + '}'\n"
] | import numpy as np
import pandas as pd
import json
import time
import pytz
import inspect
import sys
DEFAULT_TZ = pytz.FixedOffset(-time.timezone / 60)
def tolist(iterable):
"""
Simpler implementation of flatten method
Args:
iterable: any array or value
Returns:
list: list of unique values
Examples:
>>> tolist('xyz')
['xyz']
>>> tolist(['ab', 'cd', 'xy', 'ab'])
['ab', 'cd', 'xy']
"""
return pd.Series(iterable).drop_duplicates().tolist()
def fmt_dt(dt, fmt='%Y-%m-%d'):
"""
Format date string
Args:
dt: any date format
fmt: output date format
Returns:
str: date format
Examples:
>>> fmt_dt(dt='2018-12')
'2018-12-01'
>>> fmt_dt(dt='2018-12-31', fmt='%Y%m%d')
'20181231'
"""
return pd.Timestamp(dt).strftime(fmt)
def trade_day(dt, cal='US'):
"""
Latest trading day w.r.t given dt
Args:
dt: date of reference
cal: trading calendar
Returns:
pd.Timestamp: last trading day
Examples:
>>> trade_day('2018-12-25').strftime('%Y-%m-%d')
'2018-12-24'
"""
from xone import calendar
dt = pd.Timestamp(dt).date()
return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]
def cur_time(typ='date', tz=DEFAULT_TZ, trading=True, cal='US'):
"""
Current time
Args:
typ: one of ['date', 'time', 'time_path', 'raw', '']
tz: timezone
trading: check if current date is trading day
cal: trading calendar
Returns:
relevant current time or date
Examples:
>>> cur_dt = pd.Timestamp('now')
>>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d')
True
>>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S')
True
>>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')
True
>>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)
True
>>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp)
True
>>> cur_time(typ='', trading=False) == cur_dt.date()
True
"""
dt = pd.Timestamp('now', tz=tz)
if typ == 'date':
if trading: return trade_day(dt=dt, cal=cal).strftime('%Y-%m-%d')
else: return dt.strftime('%Y-%m-%d')
if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S')
if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S')
if typ == 'raw': return dt
return trade_day(dt).date() if trading else dt.date()
def align_data(*args):
"""
Resample and aligh data for defined frequency
Args:
*args: DataFrame of data to be aligned
Returns:
pd.DataFrame: aligned data with renamed columns
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> d1
price volume
2018-09-10 10:10:00+10:00 31.08 10166
2018-09-10 10:11:00+10:00 31.10 69981
2018-09-10 10:12:00+10:00 31.11 14343
2018-09-10 10:13:00+10:00 31.07 10096
2018-09-10 10:14:00+10:00 31.04 11506
2018-09-10 10:15:00+10:00 31.04 9718
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> d2
price volume
2018-09-10 10:10:00+10:00 70.81 4749
2018-09-10 10:11:00+10:00 70.78 6762
2018-09-10 10:12:00+10:00 70.85 4908
2018-09-10 10:13:00+10:00 70.79 2002
2018-09-10 10:14:00+10:00 70.79 9170
2018-09-10 10:15:00+10:00 70.79 9791
>>> align_data(d1, d2)
price_1 volume_1 price_2 volume_2
2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749
2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762
2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908
2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002
2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170
2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791
"""
res = pd.DataFrame(pd.concat([
d.loc[~d.index.duplicated(keep='first')].rename(
columns=lambda vv: '%s_%d' % (vv, i + 1)
) for i, d in enumerate(args)
], axis=1))
data_cols = [col for col in res.columns if col[-2:] == '_1']
other_cols = [col for col in res.columns if col[-2:] != '_1']
res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='pad')
return res.dropna(subset=data_cols)
def cat_data(data_kw):
"""
Concatenate data with ticker as sub column index
Args:
data_kw: key = ticker, value = pd.DataFrame
Returns:
pd.DataFrame
Examples:
>>> start = '2018-09-10T10:10:00'
>>> tz = 'Australia/Sydney'
>>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)
>>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]
>>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]
>>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)
>>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]
>>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]
>>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)
>>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2})
>>> sample.columns
MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['ticker', None])
>>> r = sample.transpose().iloc[:, :2]
>>> r.index.names = (None, None)
>>> r
2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00
BHP AU price 31.08 31.10
volume 10,166.00 69,981.00
RIO AU price 70.81 70.78
volume 4,749.00 6,762.00
"""
if len(data_kw) == 0: return pd.DataFrame()
return pd.DataFrame(pd.concat([
data.assign(ticker=ticker).set_index('ticker', append=True)
.unstack('ticker').swaplevel(0, 1, axis=1)
for ticker, data in data_kw.items()
], axis=1))
def flatten(iterable, maps=None, unique=False):
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x
def _to_gen_(iterable):
"""
Recursively iterate lists and tuples
"""
from collections import Iterable
for elm in iterable:
if isinstance(elm, Iterable) and not isinstance(elm, (str, bytes)):
yield from flatten(elm)
else: yield elm
def to_frame(data_list, exc_cols=None, **kwargs):
"""
Dict in Python 3.6 keeps insertion order, but cannot be relied upon
This method is to keep column names in order
In Python 3.7 this method is redundant
Args:
data_list: list of dict
exc_cols: exclude columns
Returns:
pd.DataFrame
Example:
>>> d_list = [
... dict(sid=1, symbol='1 HK', price=89),
... dict(sid=700, symbol='700 HK', price=350)
... ]
>>> to_frame(d_list)
sid symbol price
0 1 1 HK 89
1 700 700 HK 350
>>> to_frame(d_list, exc_cols=['price'])
sid symbol
0 1 1 HK
1 700 700 HK
"""
from collections import OrderedDict
return pd.DataFrame(
pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs
).drop(columns=[] if exc_cols is None else exc_cols)
def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs):
"""
Fit spline curve for given x, y values
Args:
x: x-values
y: y-values
step: step size for interpolation
val_min: minimum value of result
val_max: maximum value of result
kind: for scipy.interpolate.interp1d
Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’,
‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’
refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and
‘next’ simply return the previous or next value of the point) or as an integer specifying
the order of the spline interpolator to use. Default is ‘linear’.
**kwargs: additional parameters for interp1d
Returns:
pd.Series: fitted curve
Examples:
>>> x = pd.Series([1, 2, 3])
>>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)])
>>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate')
>>> r.round(2).index.tolist()
[1.0, 1.5, 2.0, 2.5, 3.0]
>>> r.round(2).tolist()
[3.0, 4.05, 7.39, 12.73, 18.0]
>>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4]))
>>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate')
>>> r_df.round(2)
a b
1.00 3.00 3.00
1.50 4.05 3.00
2.00 7.39 3.00
2.50 12.73 3.50
3.00 20.09 4.00
"""
from scipy.interpolate import interp1d
from collections import OrderedDict
if isinstance(y, pd.DataFrame):
return pd.DataFrame(OrderedDict([(col, spline_curve(
x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind
)) for col in y.columns]))
fitted_curve = interp1d(x, y, kind=kind, **kwargs)
new_x = np.arange(x.min(), x.max() + step / 2., step=step)
return pd.Series(
new_x, index=new_x, name=y.name if hasattr(y, 'name') else None
).apply(fitted_curve).clip(val_min, val_max)
def func_scope(func):
"""
Function scope name
Args:
func: python function
Returns:
str: module_name.func_name
Examples:
>>> func_scope(flatten)
'xone.utils.flatten'
>>> func_scope(json.dump)
'json.dump'
"""
cur_mod = sys.modules[func.__module__]
return f'{cur_mod.__name__}.{func.__name__}'
def format_float(digit=0, is_pct=False):
"""
Number display format for pandas
Args:
digit: number of digits to keep
if negative, add one space in front of positive pct
is_pct: % display
Returns:
lambda function to format floats
Examples:
>>> format_float(0)(1e5)
'100,000'
>>> format_float(1)(1e5)
'100,000.0'
>>> format_float(-1, True)(.2)
' 20.0%'
>>> format_float(-1, True)(-.2)
'-20.0%'
>>> pd.options.display.float_format = format_float(2)
"""
if is_pct:
space = ' ' if digit < 0 else ''
fmt = f'{{:{space}.{abs(int(digit))}%}}'
return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv)
else:
return lambda vv: 'NaN' if np.isnan(vv) else (
f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit)
)
class FString(object):
def __init__(self, str_fmt):
self.str_fmt = str_fmt
def __str__(self):
kwargs = inspect.currentframe().f_back.f_globals.copy()
kwargs.update(inspect.currentframe().f_back.f_locals)
return self.str_fmt.format(**kwargs)
def fstr(fmt, **kwargs):
"""
Delayed evaluation of f-strings
Args:
fmt: f-string but in terms of normal string, i.e., '{path}/{file}.parq'
**kwargs: variables for f-strings, i.e., path, file = '/data', 'daily'
Returns:
FString object
References:
https://stackoverflow.com/a/42497694/1332656
https://stackoverflow.com/a/4014070/1332656
Examples:
>>> fmt = '{data_path}/{data_file}.parq'
>>> fstr(fmt, data_path='your/data/path', data_file='sample')
'your/data/path/sample.parq'
"""
locals().update(kwargs)
return f'{FString(str_fmt=fmt)}'
def to_str(data: dict, fmt='{key}={value}', sep=', ', public_only=True):
"""
Convert dict to string
Args:
data: dict
fmt: how key and value being represented
sep: how pairs of key and value are seperated
public_only: if display public members only
Returns:
str: string representation of dict
Examples:
>>> test_dict = dict(b=1, a=0, c=2, _d=3)
>>> to_str(test_dict)
'{b=1, a=0, c=2}'
>>> to_str(test_dict, sep='|')
'{b=1|a=0|c=2}'
>>> to_str(test_dict, public_only=False)
'{b=1, a=0, c=2, _d=3}'
"""
if public_only: keys = list(filter(lambda vv: vv[0] != '_', data.keys()))
else: keys = list(data.keys())
return '{' + sep.join([
to_str(data=v, fmt=fmt, sep=sep)
if isinstance(v, dict) else fstr(fmt=fmt, key=k, value=v)
for k, v in data.items() if k in keys
]) + '}'
def load_module(full_path):
"""
Load module from full path
Args:
full_path: module full path name
Returns:
python module
References:
https://stackoverflow.com/a/67692/1332656
Examples:
>>> import os
>>>
>>> cur_file = os.path.abspath(__file__).replace('\\\\', '/')
>>> cur_path = '/'.join(cur_file.split('/')[:-1])
>>> load_module(f'{cur_path}/files.py').__name__
'files'
>>> load_module(f'{cur_path}/files.pyc')
Traceback (most recent call last):
ImportError: not a python file: files.pyc
"""
from importlib import util
file_name = full_path.replace('\\', '/').split('/')[-1]
if file_name[-3:] != '.py':
raise ImportError(f'not a python file: {file_name}')
module_name = file_name[:-3]
spec = util.spec_from_file_location(name=module_name, location=full_path)
module = util.module_from_spec(spec=spec)
spec.loader.exec_module(module=module)
return module
class AttributeDict(dict):
"""
Dot access support for dict attributes
References:
https://stackoverflow.com/a/5021467/1332656
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
|
alpha-xone/xone | xone/profile.py | profile | python | def profile(func):
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
res = func(*args, **kwargs)
pr.disable()
s = io.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print(s.getvalue())
return res
return inner | Decorator to profile functions with cProfile
Args:
func: python function
Returns:
profile report
References:
https://osf.io/upav8/ | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/profile.py#L6-L31 | null | import cProfile
import io
import pstats
|
alpha-xone/xone | xone/procs.py | run | python | def run(func, keys, max_procs=None, show_proc=False, affinity=None, **kwargs):
if max_procs is None: max_procs = cpu_count()
kw_arr = saturate_kwargs(keys=keys, **kwargs)
if len(kw_arr) == 0: return
if isinstance(affinity, int):
win32process.SetProcessAffinityMask(win32api.GetCurrentProcess(), affinity)
task_queue = queue.Queue()
while len(kw_arr) > 0:
for _ in range(max_procs):
if len(kw_arr) == 0: break
kw = kw_arr.pop(0)
p = Process(target=func, kwargs=kw)
p.start()
sys.stdout.flush()
task_queue.put(p)
if show_proc:
signature = ', '.join([f'{k}={v}' for k, v in kw.items()])
print(f'[{func.__name__}] ({signature})')
while not task_queue.empty():
p = task_queue.get()
p.join() | Provide interface for multiprocessing
Args:
func: callable functions
keys: keys in kwargs that want to use process
max_procs: max number of processes
show_proc: whether to show process
affinity: CPU affinity
**kwargs: kwargs for func | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/procs.py#L16-L49 | [
"def saturate_kwargs(keys, **kwargs):\n \"\"\"\n Saturate all combinations of kwargs\n\n Args:\n keys: keys in kwargs that want to use process\n **kwargs: kwargs for func\n \"\"\"\n # Validate if keys are in kwargs and if they are iterable\n if isinstance(keys, str): keys = [keys]\n keys = [k for k in keys if k in kwargs and hasattr(kwargs.get(k, None), '__iter__')]\n if len(keys) == 0: return []\n\n # Saturate coordinates of kwargs\n kw_corr = list(product(*(range(len(kwargs[k])) for k in keys)))\n\n # Append all possible values\n kw_arr = []\n for corr in kw_corr: kw_arr.append(\n dict(zip(keys, [kwargs[keys[i]][corr[i]] for i in range(len(keys))]))\n )\n\n # All combinations of kwargs of inputs\n for k in keys: kwargs.pop(k, None)\n kw_arr = [{**k, **kwargs} for k in kw_arr]\n\n return kw_arr\n"
] | import sys
import queue
import pytest
from multiprocessing import Process, cpu_count
from itertools import product
try:
import win32process
import win32api
except ImportError:
pytest.skip()
sys.exit(1)
def saturate_kwargs(keys, **kwargs):
"""
Saturate all combinations of kwargs
Args:
keys: keys in kwargs that want to use process
**kwargs: kwargs for func
"""
# Validate if keys are in kwargs and if they are iterable
if isinstance(keys, str): keys = [keys]
keys = [k for k in keys if k in kwargs and hasattr(kwargs.get(k, None), '__iter__')]
if len(keys) == 0: return []
# Saturate coordinates of kwargs
kw_corr = list(product(*(range(len(kwargs[k])) for k in keys)))
# Append all possible values
kw_arr = []
for corr in kw_corr: kw_arr.append(
dict(zip(keys, [kwargs[keys[i]][corr[i]] for i in range(len(keys))]))
)
# All combinations of kwargs of inputs
for k in keys: kwargs.pop(k, None)
kw_arr = [{**k, **kwargs} for k in kw_arr]
return kw_arr
|
alpha-xone/xone | xone/procs.py | saturate_kwargs | python | def saturate_kwargs(keys, **kwargs):
# Validate if keys are in kwargs and if they are iterable
if isinstance(keys, str): keys = [keys]
keys = [k for k in keys if k in kwargs and hasattr(kwargs.get(k, None), '__iter__')]
if len(keys) == 0: return []
# Saturate coordinates of kwargs
kw_corr = list(product(*(range(len(kwargs[k])) for k in keys)))
# Append all possible values
kw_arr = []
for corr in kw_corr: kw_arr.append(
dict(zip(keys, [kwargs[keys[i]][corr[i]] for i in range(len(keys))]))
)
# All combinations of kwargs of inputs
for k in keys: kwargs.pop(k, None)
kw_arr = [{**k, **kwargs} for k in kw_arr]
return kw_arr | Saturate all combinations of kwargs
Args:
keys: keys in kwargs that want to use process
**kwargs: kwargs for func | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/procs.py#L52-L78 | null | import sys
import queue
import pytest
from multiprocessing import Process, cpu_count
from itertools import product
try:
import win32process
import win32api
except ImportError:
pytest.skip()
sys.exit(1)
def run(func, keys, max_procs=None, show_proc=False, affinity=None, **kwargs):
"""
Provide interface for multiprocessing
Args:
func: callable functions
keys: keys in kwargs that want to use process
max_procs: max number of processes
show_proc: whether to show process
affinity: CPU affinity
**kwargs: kwargs for func
"""
if max_procs is None: max_procs = cpu_count()
kw_arr = saturate_kwargs(keys=keys, **kwargs)
if len(kw_arr) == 0: return
if isinstance(affinity, int):
win32process.SetProcessAffinityMask(win32api.GetCurrentProcess(), affinity)
task_queue = queue.Queue()
while len(kw_arr) > 0:
for _ in range(max_procs):
if len(kw_arr) == 0: break
kw = kw_arr.pop(0)
p = Process(target=func, kwargs=kw)
p.start()
sys.stdout.flush()
task_queue.put(p)
if show_proc:
signature = ', '.join([f'{k}={v}' for k, v in kw.items()])
print(f'[{func.__name__}] ({signature})')
while not task_queue.empty():
p = task_queue.get()
p.join()
|
alpha-xone/xone | xone/calendar.py | trading_dates | python | def trading_dates(start, end, calendar='US'):
kw = dict(start=pd.Timestamp(start, tz='UTC').date(), end=pd.Timestamp(end, tz='UTC').date())
us_cal = getattr(sys.modules[__name__], f'{calendar}TradingCalendar')()
return pd.bdate_range(**kw).drop(us_cal.holidays(**kw)) | Trading dates for given exchange
Args:
start: start date
end: end date
calendar: exchange as string
Returns:
pd.DatetimeIndex: datetime index
Examples:
>>> bus_dates = ['2018-12-24', '2018-12-26', '2018-12-27']
>>> trd_dates = trading_dates(start='2018-12-23', end='2018-12-27')
>>> assert len(trd_dates) == len(bus_dates)
>>> assert pd.Series(trd_dates == pd.DatetimeIndex(bus_dates)).all() | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/calendar.py#L22-L42 | null | import pandas as pd
import sys
from pandas.tseries import holiday
class USTradingCalendar(holiday.AbstractHolidayCalendar):
rules = [
holiday.Holiday('NewYearsDay', month=1, day=1, observance=holiday.nearest_workday),
holiday.USMartinLutherKingJr,
holiday.USPresidentsDay,
holiday.GoodFriday,
holiday.USMemorialDay,
holiday.Holiday('USIndependenceDay', month=7, day=4, observance=holiday.nearest_workday),
holiday.USLaborDay,
holiday.USThanksgivingDay,
holiday.Holiday('Christmas', month=12, day=25, observance=holiday.nearest_workday)
]
|
alpha-xone/xone | xone/logs.py | get_logger | python | def get_logger(
name_or_func, log_file='', level=logging.INFO, types='stream', **kwargs
):
if isinstance(level, str): level = getattr(logging, level.upper())
log_name = name_or_func if isinstance(name_or_func, str) else utils.func_scope(name_or_func)
logger = logging.getLogger(name=log_name)
logger.setLevel(level=level)
if not len(logger.handlers):
formatter = logging.Formatter(fmt=kwargs.get('fmt', LOG_FMT))
if 'file' in types:
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(fmt=formatter)
logger.addHandler(file_handler)
if 'stream' in types:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(fmt=formatter)
logger.addHandler(stream_handler)
return logger | Generate logger
Args:
name_or_func: logger name or current running function
log_file: logger file
level: level of logs - debug, info, error
types: file or stream, or both
Returns:
logger
Examples:
>>> get_logger(name_or_func='download_data', level='debug', types='stream')
<Logger download_data (DEBUG)>
>>> get_logger(name_or_func='preprocess', log_file='pre.log', types='file|stream')
<Logger preprocess (INFO)> | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/logs.py#L8-L47 | [
"def func_scope(func):\n \"\"\"\n Function scope name\n\n Args:\n func: python function\n\n Returns:\n str: module_name.func_name\n\n Examples:\n >>> func_scope(flatten)\n 'xone.utils.flatten'\n >>> func_scope(json.dump)\n 'json.dump'\n \"\"\"\n cur_mod = sys.modules[func.__module__]\n return f'{cur_mod.__name__}.{func.__name__}'\n"
] | import logging
from xone import utils
LOG_FMT = '%(asctime)s:%(name)s:%(levelname)s:%(message)s'
|
alpha-xone/xone | xone/cache.py | cache_file | python | def cache_file(symbol, func, has_date, root, date_type='date'):
cur_mod = sys.modules[func.__module__]
data_tz = getattr(cur_mod, 'DATA_TZ') if hasattr(cur_mod, 'DATA_TZ') else 'UTC'
cur_dt = utils.cur_time(typ=date_type, tz=data_tz, trading=False)
if has_date:
if hasattr(cur_mod, 'FILE_WITH_DATE'):
file_fmt = getattr(cur_mod, 'FILE_WITH_DATE')
else:
file_fmt = '{root}/{typ}/{symbol}/{cur_dt}.parq'
else:
if hasattr(cur_mod, 'FILE_NO_DATE'):
file_fmt = getattr(cur_mod, 'FILE_NO_DATE')
else:
file_fmt = '{root}/{typ}/{symbol}.parq'
return data_file(
file_fmt=file_fmt, root=root, cur_dt=cur_dt, typ=func.__name__, symbol=symbol
) | Data file
Args:
symbol: symbol
func: use function to categorize data
has_date: contains date in data file
root: root path
date_type: parameters pass to utils.cur_time, [date, time, time_path, ...]
Returns:
str: date file | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/cache.py#L12-L43 | [
"def cur_time(typ='date', tz=DEFAULT_TZ, trading=True, cal='US'):\n \"\"\"\n Current time\n\n Args:\n typ: one of ['date', 'time', 'time_path', 'raw', '']\n tz: timezone\n trading: check if current date is trading day\n cal: trading calendar\n\n Returns:\n relevant current time or date\n\n Examples:\n >>> cur_dt = pd.Timestamp('now')\n >>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d')\n True\n >>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S')\n True\n >>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')\n True\n >>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)\n True\n >>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp)\n True\n >>> cur_time(typ='', trading=False) == cur_dt.date()\n True\n \"\"\"\n dt = pd.Timestamp('now', tz=tz)\n\n if typ == 'date':\n if trading: return trade_day(dt=dt, cal=cal).strftime('%Y-%m-%d')\n else: return dt.strftime('%Y-%m-%d')\n\n if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S')\n if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S')\n if typ == 'raw': return dt\n\n return trade_day(dt).date() if trading else dt.date()\n",
"def data_file(file_fmt, info=None, **kwargs):\n \"\"\"\n Data file name for given infomation\n\n Args:\n file_fmt: file format in terms of f-strings\n info: dict, to be hashed and then pass to f-string using 'hash_key'\n these info will also be passed to f-strings\n **kwargs: arguments for f-strings\n\n Returns:\n str: data file name\n \"\"\"\n if isinstance(info, dict):\n kwargs['hash_key'] = hashlib.sha256(json.dumps(info).encode('utf-8')).hexdigest()\n kwargs.update(info)\n\n return utils.fstr(fmt=file_fmt, **kwargs)\n"
] | import hashlib
import json
import pandas as pd
import sys
import inspect
from functools import wraps
from xone import utils, files, logs
def update_data(func):
"""
Decorator to save data more easily. Use parquet as data format
Args:
func: function to load data from data source
Returns:
wrapped function
"""
default = dict([
(param.name, param.default)
for param in inspect.signature(func).parameters.values()
if param.default != getattr(inspect, '_empty')
])
@wraps(func)
def wrapper(*args, **kwargs):
default.update(kwargs)
kwargs.update(default)
cur_mod = sys.modules[func.__module__]
logger = logs.get_logger(name_or_func=f'{cur_mod.__name__}.{func.__name__}', types='stream')
root_path = cur_mod.DATA_PATH
date_type = kwargs.pop('date_type', 'date')
save_static = kwargs.pop('save_static', True)
save_dynamic = kwargs.pop('save_dynamic', True)
symbol = kwargs.get('symbol')
file_kw = dict(func=func, symbol=symbol, root=root_path, date_type=date_type)
d_file = cache_file(has_date=True, **file_kw)
s_file = cache_file(has_date=False, **file_kw)
cached = kwargs.pop('cached', False)
if cached and save_static and files.exists(s_file):
logger.info(f'Reading data from {s_file} ...')
return pd.read_parquet(s_file)
data = func(*args, **kwargs)
if save_static:
files.create_folder(s_file, is_file=True)
save_data(data=data, file_fmt=s_file, append=False)
logger.info(f'Saved data file to {s_file} ...')
if save_dynamic:
drop_dups = kwargs.pop('drop_dups', None)
files.create_folder(d_file, is_file=True)
save_data(data=data, file_fmt=d_file, append=True, drop_dups=drop_dups)
logger.info(f'Saved data file to {d_file} ...')
return data
return wrapper
def save_data(data, file_fmt, append=False, drop_dups=None, info=None, **kwargs):
"""
Save data to file
Args:
data: pd.DataFrame
file_fmt: data file format in terms of f-strings
append: if append data to existing data
drop_dups: list, drop duplicates in columns
info: dict, infomation to be hashed and passed to f-strings
**kwargs: additional parameters for f-strings
Examples:
>>> data = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> # save_data(
>>> # data, '{ROOT}/daily/{typ}.parq',
>>> # ROOT='tests/data', typ='earnings'
>>> # )
"""
d_file = data_file(file_fmt=file_fmt, info=info, **kwargs)
if append and files.exists(d_file):
data = pd.DataFrame(pd.concat([pd.read_parquet(d_file), data], sort=False))
if drop_dups is not None:
data.drop_duplicates(subset=utils.tolist(drop_dups), inplace=True)
if not data.empty: data.to_parquet(d_file)
return data
def data_file(file_fmt, info=None, **kwargs):
"""
Data file name for given infomation
Args:
file_fmt: file format in terms of f-strings
info: dict, to be hashed and then pass to f-string using 'hash_key'
these info will also be passed to f-strings
**kwargs: arguments for f-strings
Returns:
str: data file name
"""
if isinstance(info, dict):
kwargs['hash_key'] = hashlib.sha256(json.dumps(info).encode('utf-8')).hexdigest()
kwargs.update(info)
return utils.fstr(fmt=file_fmt, **kwargs)
|
alpha-xone/xone | xone/cache.py | update_data | python | def update_data(func):
default = dict([
(param.name, param.default)
for param in inspect.signature(func).parameters.values()
if param.default != getattr(inspect, '_empty')
])
@wraps(func)
def wrapper(*args, **kwargs):
default.update(kwargs)
kwargs.update(default)
cur_mod = sys.modules[func.__module__]
logger = logs.get_logger(name_or_func=f'{cur_mod.__name__}.{func.__name__}', types='stream')
root_path = cur_mod.DATA_PATH
date_type = kwargs.pop('date_type', 'date')
save_static = kwargs.pop('save_static', True)
save_dynamic = kwargs.pop('save_dynamic', True)
symbol = kwargs.get('symbol')
file_kw = dict(func=func, symbol=symbol, root=root_path, date_type=date_type)
d_file = cache_file(has_date=True, **file_kw)
s_file = cache_file(has_date=False, **file_kw)
cached = kwargs.pop('cached', False)
if cached and save_static and files.exists(s_file):
logger.info(f'Reading data from {s_file} ...')
return pd.read_parquet(s_file)
data = func(*args, **kwargs)
if save_static:
files.create_folder(s_file, is_file=True)
save_data(data=data, file_fmt=s_file, append=False)
logger.info(f'Saved data file to {s_file} ...')
if save_dynamic:
drop_dups = kwargs.pop('drop_dups', None)
files.create_folder(d_file, is_file=True)
save_data(data=data, file_fmt=d_file, append=True, drop_dups=drop_dups)
logger.info(f'Saved data file to {d_file} ...')
return data
return wrapper | Decorator to save data more easily. Use parquet as data format
Args:
func: function to load data from data source
Returns:
wrapped function | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/cache.py#L46-L99 | null | import hashlib
import json
import pandas as pd
import sys
import inspect
from functools import wraps
from xone import utils, files, logs
def cache_file(symbol, func, has_date, root, date_type='date'):
"""
Data file
Args:
symbol: symbol
func: use function to categorize data
has_date: contains date in data file
root: root path
date_type: parameters pass to utils.cur_time, [date, time, time_path, ...]
Returns:
str: date file
"""
cur_mod = sys.modules[func.__module__]
data_tz = getattr(cur_mod, 'DATA_TZ') if hasattr(cur_mod, 'DATA_TZ') else 'UTC'
cur_dt = utils.cur_time(typ=date_type, tz=data_tz, trading=False)
if has_date:
if hasattr(cur_mod, 'FILE_WITH_DATE'):
file_fmt = getattr(cur_mod, 'FILE_WITH_DATE')
else:
file_fmt = '{root}/{typ}/{symbol}/{cur_dt}.parq'
else:
if hasattr(cur_mod, 'FILE_NO_DATE'):
file_fmt = getattr(cur_mod, 'FILE_NO_DATE')
else:
file_fmt = '{root}/{typ}/{symbol}.parq'
return data_file(
file_fmt=file_fmt, root=root, cur_dt=cur_dt, typ=func.__name__, symbol=symbol
)
def save_data(data, file_fmt, append=False, drop_dups=None, info=None, **kwargs):
"""
Save data to file
Args:
data: pd.DataFrame
file_fmt: data file format in terms of f-strings
append: if append data to existing data
drop_dups: list, drop duplicates in columns
info: dict, infomation to be hashed and passed to f-strings
**kwargs: additional parameters for f-strings
Examples:
>>> data = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> # save_data(
>>> # data, '{ROOT}/daily/{typ}.parq',
>>> # ROOT='tests/data', typ='earnings'
>>> # )
"""
d_file = data_file(file_fmt=file_fmt, info=info, **kwargs)
if append and files.exists(d_file):
data = pd.DataFrame(pd.concat([pd.read_parquet(d_file), data], sort=False))
if drop_dups is not None:
data.drop_duplicates(subset=utils.tolist(drop_dups), inplace=True)
if not data.empty: data.to_parquet(d_file)
return data
def data_file(file_fmt, info=None, **kwargs):
"""
Data file name for given infomation
Args:
file_fmt: file format in terms of f-strings
info: dict, to be hashed and then pass to f-string using 'hash_key'
these info will also be passed to f-strings
**kwargs: arguments for f-strings
Returns:
str: data file name
"""
if isinstance(info, dict):
kwargs['hash_key'] = hashlib.sha256(json.dumps(info).encode('utf-8')).hexdigest()
kwargs.update(info)
return utils.fstr(fmt=file_fmt, **kwargs)
|
alpha-xone/xone | xone/cache.py | save_data | python | def save_data(data, file_fmt, append=False, drop_dups=None, info=None, **kwargs):
d_file = data_file(file_fmt=file_fmt, info=info, **kwargs)
if append and files.exists(d_file):
data = pd.DataFrame(pd.concat([pd.read_parquet(d_file), data], sort=False))
if drop_dups is not None:
data.drop_duplicates(subset=utils.tolist(drop_dups), inplace=True)
if not data.empty: data.to_parquet(d_file)
return data | Save data to file
Args:
data: pd.DataFrame
file_fmt: data file format in terms of f-strings
append: if append data to existing data
drop_dups: list, drop duplicates in columns
info: dict, infomation to be hashed and passed to f-strings
**kwargs: additional parameters for f-strings
Examples:
>>> data = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> # save_data(
>>> # data, '{ROOT}/daily/{typ}.parq',
>>> # ROOT='tests/data', typ='earnings'
>>> # ) | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/cache.py#L102-L128 | [
"def tolist(iterable):\n \"\"\"\n Simpler implementation of flatten method\n\n Args:\n iterable: any array or value\n\n Returns:\n list: list of unique values\n\n Examples:\n >>> tolist('xyz')\n ['xyz']\n >>> tolist(['ab', 'cd', 'xy', 'ab'])\n ['ab', 'cd', 'xy']\n \"\"\"\n return pd.Series(iterable).drop_duplicates().tolist()\n",
"def exists(path) -> bool:\n \"\"\"\n Check path or file exists (use os.path.exists)\n\n Args:\n path: path or file\n\n Examples\n >>> exists(f'{abspath(__file__, 1)}/xone/tests/files/test_1.json')\n True\n >>> exists(f'{abspath(__file__)}/tests/files/notfound.yml')\n False\n \"\"\"\n return os.path.exists(path=path)\n",
"def data_file(file_fmt, info=None, **kwargs):\n \"\"\"\n Data file name for given infomation\n\n Args:\n file_fmt: file format in terms of f-strings\n info: dict, to be hashed and then pass to f-string using 'hash_key'\n these info will also be passed to f-strings\n **kwargs: arguments for f-strings\n\n Returns:\n str: data file name\n \"\"\"\n if isinstance(info, dict):\n kwargs['hash_key'] = hashlib.sha256(json.dumps(info).encode('utf-8')).hexdigest()\n kwargs.update(info)\n\n return utils.fstr(fmt=file_fmt, **kwargs)\n"
] | import hashlib
import json
import pandas as pd
import sys
import inspect
from functools import wraps
from xone import utils, files, logs
def cache_file(symbol, func, has_date, root, date_type='date'):
"""
Data file
Args:
symbol: symbol
func: use function to categorize data
has_date: contains date in data file
root: root path
date_type: parameters pass to utils.cur_time, [date, time, time_path, ...]
Returns:
str: date file
"""
cur_mod = sys.modules[func.__module__]
data_tz = getattr(cur_mod, 'DATA_TZ') if hasattr(cur_mod, 'DATA_TZ') else 'UTC'
cur_dt = utils.cur_time(typ=date_type, tz=data_tz, trading=False)
if has_date:
if hasattr(cur_mod, 'FILE_WITH_DATE'):
file_fmt = getattr(cur_mod, 'FILE_WITH_DATE')
else:
file_fmt = '{root}/{typ}/{symbol}/{cur_dt}.parq'
else:
if hasattr(cur_mod, 'FILE_NO_DATE'):
file_fmt = getattr(cur_mod, 'FILE_NO_DATE')
else:
file_fmt = '{root}/{typ}/{symbol}.parq'
return data_file(
file_fmt=file_fmt, root=root, cur_dt=cur_dt, typ=func.__name__, symbol=symbol
)
def update_data(func):
"""
Decorator to save data more easily. Use parquet as data format
Args:
func: function to load data from data source
Returns:
wrapped function
"""
default = dict([
(param.name, param.default)
for param in inspect.signature(func).parameters.values()
if param.default != getattr(inspect, '_empty')
])
@wraps(func)
def wrapper(*args, **kwargs):
default.update(kwargs)
kwargs.update(default)
cur_mod = sys.modules[func.__module__]
logger = logs.get_logger(name_or_func=f'{cur_mod.__name__}.{func.__name__}', types='stream')
root_path = cur_mod.DATA_PATH
date_type = kwargs.pop('date_type', 'date')
save_static = kwargs.pop('save_static', True)
save_dynamic = kwargs.pop('save_dynamic', True)
symbol = kwargs.get('symbol')
file_kw = dict(func=func, symbol=symbol, root=root_path, date_type=date_type)
d_file = cache_file(has_date=True, **file_kw)
s_file = cache_file(has_date=False, **file_kw)
cached = kwargs.pop('cached', False)
if cached and save_static and files.exists(s_file):
logger.info(f'Reading data from {s_file} ...')
return pd.read_parquet(s_file)
data = func(*args, **kwargs)
if save_static:
files.create_folder(s_file, is_file=True)
save_data(data=data, file_fmt=s_file, append=False)
logger.info(f'Saved data file to {s_file} ...')
if save_dynamic:
drop_dups = kwargs.pop('drop_dups', None)
files.create_folder(d_file, is_file=True)
save_data(data=data, file_fmt=d_file, append=True, drop_dups=drop_dups)
logger.info(f'Saved data file to {d_file} ...')
return data
return wrapper
def data_file(file_fmt, info=None, **kwargs):
"""
Data file name for given infomation
Args:
file_fmt: file format in terms of f-strings
info: dict, to be hashed and then pass to f-string using 'hash_key'
these info will also be passed to f-strings
**kwargs: arguments for f-strings
Returns:
str: data file name
"""
if isinstance(info, dict):
kwargs['hash_key'] = hashlib.sha256(json.dumps(info).encode('utf-8')).hexdigest()
kwargs.update(info)
return utils.fstr(fmt=file_fmt, **kwargs)
|
alpha-xone/xone | xone/cache.py | data_file | python | def data_file(file_fmt, info=None, **kwargs):
if isinstance(info, dict):
kwargs['hash_key'] = hashlib.sha256(json.dumps(info).encode('utf-8')).hexdigest()
kwargs.update(info)
return utils.fstr(fmt=file_fmt, **kwargs) | Data file name for given infomation
Args:
file_fmt: file format in terms of f-strings
info: dict, to be hashed and then pass to f-string using 'hash_key'
these info will also be passed to f-strings
**kwargs: arguments for f-strings
Returns:
str: data file name | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/cache.py#L131-L148 | [
"def fstr(fmt, **kwargs):\n \"\"\"\n Delayed evaluation of f-strings\n\n Args:\n fmt: f-string but in terms of normal string, i.e., '{path}/{file}.parq'\n **kwargs: variables for f-strings, i.e., path, file = '/data', 'daily'\n\n Returns:\n FString object\n\n References:\n https://stackoverflow.com/a/42497694/1332656\n https://stackoverflow.com/a/4014070/1332656\n\n Examples:\n >>> fmt = '{data_path}/{data_file}.parq'\n >>> fstr(fmt, data_path='your/data/path', data_file='sample')\n 'your/data/path/sample.parq'\n \"\"\"\n locals().update(kwargs)\n return f'{FString(str_fmt=fmt)}'\n"
] | import hashlib
import json
import pandas as pd
import sys
import inspect
from functools import wraps
from xone import utils, files, logs
def cache_file(symbol, func, has_date, root, date_type='date'):
"""
Data file
Args:
symbol: symbol
func: use function to categorize data
has_date: contains date in data file
root: root path
date_type: parameters pass to utils.cur_time, [date, time, time_path, ...]
Returns:
str: date file
"""
cur_mod = sys.modules[func.__module__]
data_tz = getattr(cur_mod, 'DATA_TZ') if hasattr(cur_mod, 'DATA_TZ') else 'UTC'
cur_dt = utils.cur_time(typ=date_type, tz=data_tz, trading=False)
if has_date:
if hasattr(cur_mod, 'FILE_WITH_DATE'):
file_fmt = getattr(cur_mod, 'FILE_WITH_DATE')
else:
file_fmt = '{root}/{typ}/{symbol}/{cur_dt}.parq'
else:
if hasattr(cur_mod, 'FILE_NO_DATE'):
file_fmt = getattr(cur_mod, 'FILE_NO_DATE')
else:
file_fmt = '{root}/{typ}/{symbol}.parq'
return data_file(
file_fmt=file_fmt, root=root, cur_dt=cur_dt, typ=func.__name__, symbol=symbol
)
def update_data(func):
"""
Decorator to save data more easily. Use parquet as data format
Args:
func: function to load data from data source
Returns:
wrapped function
"""
default = dict([
(param.name, param.default)
for param in inspect.signature(func).parameters.values()
if param.default != getattr(inspect, '_empty')
])
@wraps(func)
def wrapper(*args, **kwargs):
default.update(kwargs)
kwargs.update(default)
cur_mod = sys.modules[func.__module__]
logger = logs.get_logger(name_or_func=f'{cur_mod.__name__}.{func.__name__}', types='stream')
root_path = cur_mod.DATA_PATH
date_type = kwargs.pop('date_type', 'date')
save_static = kwargs.pop('save_static', True)
save_dynamic = kwargs.pop('save_dynamic', True)
symbol = kwargs.get('symbol')
file_kw = dict(func=func, symbol=symbol, root=root_path, date_type=date_type)
d_file = cache_file(has_date=True, **file_kw)
s_file = cache_file(has_date=False, **file_kw)
cached = kwargs.pop('cached', False)
if cached and save_static and files.exists(s_file):
logger.info(f'Reading data from {s_file} ...')
return pd.read_parquet(s_file)
data = func(*args, **kwargs)
if save_static:
files.create_folder(s_file, is_file=True)
save_data(data=data, file_fmt=s_file, append=False)
logger.info(f'Saved data file to {s_file} ...')
if save_dynamic:
drop_dups = kwargs.pop('drop_dups', None)
files.create_folder(d_file, is_file=True)
save_data(data=data, file_fmt=d_file, append=True, drop_dups=drop_dups)
logger.info(f'Saved data file to {d_file} ...')
return data
return wrapper
def save_data(data, file_fmt, append=False, drop_dups=None, info=None, **kwargs):
"""
Save data to file
Args:
data: pd.DataFrame
file_fmt: data file format in terms of f-strings
append: if append data to existing data
drop_dups: list, drop duplicates in columns
info: dict, infomation to be hashed and passed to f-strings
**kwargs: additional parameters for f-strings
Examples:
>>> data = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> # save_data(
>>> # data, '{ROOT}/daily/{typ}.parq',
>>> # ROOT='tests/data', typ='earnings'
>>> # )
"""
d_file = data_file(file_fmt=file_fmt, info=info, **kwargs)
if append and files.exists(d_file):
data = pd.DataFrame(pd.concat([pd.read_parquet(d_file), data], sort=False))
if drop_dups is not None:
data.drop_duplicates(subset=utils.tolist(drop_dups), inplace=True)
if not data.empty: data.to_parquet(d_file)
return data
|
alpha-xone/xone | xone/plots.py | plot_multi | python | def plot_multi(data, cols=None, spacing=.06, color_map=None, plot_kw=None, **kwargs):
import matplotlib.pyplot as plt
from pandas import plotting
if cols is None: cols = data.columns
if plot_kw is None: plot_kw = [{}] * len(cols)
if len(cols) == 0: return
num_colors = len(utils.flatten(cols))
# Get default color style from pandas
colors = getattr(getattr(plotting, '_style'), '_get_standard_colors')(num_colors=num_colors)
if color_map is None: color_map = dict()
fig = plt.figure()
ax, lines, labels, c_idx = None, [], [], 0
for n, col in enumerate(cols):
if isinstance(col, (list, tuple)):
ylabel = ' / '.join(cols[n])
color = [
color_map.get(cols[n][_ - c_idx], colors[_ % len(colors)])
for _ in range(c_idx, c_idx + len(cols[n]))
]
c_idx += len(col)
else:
ylabel = col
color = color_map.get(col, colors[c_idx % len(colors)])
c_idx += 1
if 'color' in plot_kw[n]: color = plot_kw[n].pop('color')
if ax is None:
# First y-axes
legend = plot_kw[0].pop('legend', kwargs.pop('legend', False))
ax = data.loc[:, col].plot(
label=col, color=color, legend=legend, zorder=n, **plot_kw[0], **kwargs
)
ax.set_ylabel(ylabel=ylabel)
line, label = ax.get_legend_handles_labels()
ax.spines['left'].set_edgecolor('#D5C4A1')
ax.spines['left'].set_alpha(.5)
else:
# Multiple y-axes
legend = plot_kw[n].pop('legend', False)
ax_new = ax.twinx()
ax_new.spines['right'].set_position(('axes', 1 + spacing * (n - 1)))
data.loc[:, col].plot(
ax=ax_new, label=col, color=color, legend=legend, zorder=n, **plot_kw[n]
)
ax_new.set_ylabel(ylabel=ylabel)
line, label = ax_new.get_legend_handles_labels()
ax_new.spines['right'].set_edgecolor('#D5C4A1')
ax_new.spines['right'].set_alpha(.5)
ax_new.grid(False)
# Proper legend position
lines += line
labels += label
fig.legend(lines, labels, loc=8, prop=dict(), ncol=num_colors).set_zorder(len(cols))
ax.set_xlabel(' \n ')
return ax | Plot data with multiple scaels together
Args:
data: DataFrame of data
cols: columns to be plotted
spacing: spacing between legends
color_map: customized colors in map
plot_kw: kwargs for each plot
**kwargs: kwargs for the first plot
Returns:
ax for plot
Examples:
>>> import pandas as pd
>>> import numpy as np
>>>
>>> idx = range(5)
>>> data = pd.DataFrame(dict(a=np.exp(idx), b=idx), index=idx)
>>> # plot_multi(data=data, cols=['a', 'b'], plot_kw=[dict(style='.-'), dict()]) | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/plots.py#L4-L87 | [
"def flatten(iterable, maps=None, unique=False):\n \"\"\"\n Flatten any array of items to list\n\n Args:\n iterable: any array or value\n maps: map items to values\n unique: drop duplicates\n\n Returns:\n list: flattened list\n\n References:\n https://stackoverflow.com/a/40857703/1332656\n\n Examples:\n >>> flatten('abc')\n ['abc']\n >>> flatten(1)\n [1]\n >>> flatten(1.)\n [1.0]\n >>> flatten(['ab', 'cd', ['xy', 'zz']])\n ['ab', 'cd', 'xy', 'zz']\n >>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})\n ['ab', '0x', 'zz']\n \"\"\"\n if iterable is None: return []\n if maps is None: maps = dict()\n\n if isinstance(iterable, (str, int, float)):\n return [maps.get(iterable, iterable)]\n\n else:\n x = [maps.get(item, item) for item in _to_gen_(iterable)]\n return list(set(x)) if unique else x\n"
] | from xone import utils
def plot_h(data, cols, wspace=.1, plot_kw=None, **kwargs):
"""
Plot horizontally
Args:
data: DataFrame of data
cols: columns to be plotted
wspace: spacing between plots
plot_kw: kwargs for each plot
**kwargs: kwargs for the whole plot
Returns:
axes for plots
Examples:
>>> import pandas as pd
>>> import numpy as np
>>>
>>> idx = range(5)
>>> data = pd.DataFrame(dict(a=np.exp(idx), b=idx), index=idx)
>>> # plot_h(data=data, cols=['a', 'b'], wspace=.2, plot_kw=[dict(style='.-'), dict()])
"""
import matplotlib.pyplot as plt
if plot_kw is None: plot_kw = [dict()] * len(cols)
_, axes = plt.subplots(nrows=1, ncols=len(cols), **kwargs)
plt.subplots_adjust(wspace=wspace)
for n, col in enumerate(cols):
data.loc[:, col].plot(ax=axes[n], **plot_kw[n])
return axes
|
alpha-xone/xone | xone/plots.py | plot_h | python | def plot_h(data, cols, wspace=.1, plot_kw=None, **kwargs):
import matplotlib.pyplot as plt
if plot_kw is None: plot_kw = [dict()] * len(cols)
_, axes = plt.subplots(nrows=1, ncols=len(cols), **kwargs)
plt.subplots_adjust(wspace=wspace)
for n, col in enumerate(cols):
data.loc[:, col].plot(ax=axes[n], **plot_kw[n])
return axes | Plot horizontally
Args:
data: DataFrame of data
cols: columns to be plotted
wspace: spacing between plots
plot_kw: kwargs for each plot
**kwargs: kwargs for the whole plot
Returns:
axes for plots
Examples:
>>> import pandas as pd
>>> import numpy as np
>>>
>>> idx = range(5)
>>> data = pd.DataFrame(dict(a=np.exp(idx), b=idx), index=idx)
>>> # plot_h(data=data, cols=['a', 'b'], wspace=.2, plot_kw=[dict(style='.-'), dict()]) | train | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/plots.py#L90-L121 | null | from xone import utils
def plot_multi(data, cols=None, spacing=.06, color_map=None, plot_kw=None, **kwargs):
"""
Plot data with multiple scaels together
Args:
data: DataFrame of data
cols: columns to be plotted
spacing: spacing between legends
color_map: customized colors in map
plot_kw: kwargs for each plot
**kwargs: kwargs for the first plot
Returns:
ax for plot
Examples:
>>> import pandas as pd
>>> import numpy as np
>>>
>>> idx = range(5)
>>> data = pd.DataFrame(dict(a=np.exp(idx), b=idx), index=idx)
>>> # plot_multi(data=data, cols=['a', 'b'], plot_kw=[dict(style='.-'), dict()])
"""
import matplotlib.pyplot as plt
from pandas import plotting
if cols is None: cols = data.columns
if plot_kw is None: plot_kw = [{}] * len(cols)
if len(cols) == 0: return
num_colors = len(utils.flatten(cols))
# Get default color style from pandas
colors = getattr(getattr(plotting, '_style'), '_get_standard_colors')(num_colors=num_colors)
if color_map is None: color_map = dict()
fig = plt.figure()
ax, lines, labels, c_idx = None, [], [], 0
for n, col in enumerate(cols):
if isinstance(col, (list, tuple)):
ylabel = ' / '.join(cols[n])
color = [
color_map.get(cols[n][_ - c_idx], colors[_ % len(colors)])
for _ in range(c_idx, c_idx + len(cols[n]))
]
c_idx += len(col)
else:
ylabel = col
color = color_map.get(col, colors[c_idx % len(colors)])
c_idx += 1
if 'color' in plot_kw[n]: color = plot_kw[n].pop('color')
if ax is None:
# First y-axes
legend = plot_kw[0].pop('legend', kwargs.pop('legend', False))
ax = data.loc[:, col].plot(
label=col, color=color, legend=legend, zorder=n, **plot_kw[0], **kwargs
)
ax.set_ylabel(ylabel=ylabel)
line, label = ax.get_legend_handles_labels()
ax.spines['left'].set_edgecolor('#D5C4A1')
ax.spines['left'].set_alpha(.5)
else:
# Multiple y-axes
legend = plot_kw[n].pop('legend', False)
ax_new = ax.twinx()
ax_new.spines['right'].set_position(('axes', 1 + spacing * (n - 1)))
data.loc[:, col].plot(
ax=ax_new, label=col, color=color, legend=legend, zorder=n, **plot_kw[n]
)
ax_new.set_ylabel(ylabel=ylabel)
line, label = ax_new.get_legend_handles_labels()
ax_new.spines['right'].set_edgecolor('#D5C4A1')
ax_new.spines['right'].set_alpha(.5)
ax_new.grid(False)
# Proper legend position
lines += line
labels += label
fig.legend(lines, labels, loc=8, prop=dict(), ncol=num_colors).set_zorder(len(cols))
ax.set_xlabel(' \n ')
return ax
|
mozilla-releng/signtool | signtool/signing/client.py | uploadfile | python | def uploadfile(baseurl, filename, format_, token, nonce, cert, method=requests.post):
filehash = sha1sum(filename)
files = {'filedata': open(filename, 'rb')}
payload = {
'sha1': filehash,
'filename': os.path.basename(filename),
'token': token,
'nonce': nonce,
}
return method("%s/sign/%s" % (baseurl, format_), files=files, data=payload, verify=cert) | Uploads file (given by `filename`) to server at `baseurl`.
`sesson_key` and `nonce` are string values that get passed as POST
parameters. | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/signing/client.py#L161-L177 | [
"def sha1sum(f):\n \"\"\"Return the SHA-1 hash of the contents of file `f`, in hex format\"\"\"\n h = hashlib.sha1()\n fp = open(f, 'rb')\n while True:\n block = fp.read(512 * 1024)\n if not block:\n break\n h.update(block)\n return h.hexdigest()\n"
] | import os
import requests
import six
from subprocess import check_call
import time
from signtool.util.file import sha1sum, safe_copyfile
import logging
log = logging.getLogger(__name__)
def getfile(baseurl, filehash, format_, cert, method=requests.get):
url = "%s/sign/%s/%s" % (baseurl, format_, filehash)
log.debug("%s: GET %s", filehash, url)
return method(url, verify=cert)
def overwrite_file(path1, path2):
log.debug("overwrite %s with %s", path2, path1)
if os.path.exists(path2):
os.unlink(path2)
os.rename(path1, path2)
def check_cached_fn(options, cached_fn, filehash, filename, dest):
log.debug("%s: checking cache", filehash)
if os.path.exists(cached_fn):
log.info("%s: exists in the cache; copying to %s", filehash, dest)
tmpfile = dest + '.tmp'
safe_copyfile(cached_fn, tmpfile)
newhash = sha1sum(tmpfile)
overwrite_file(tmpfile, dest)
log.info("%s: OK", filehash)
# See if we should re-sign NSS
if options.nsscmd and filehash != newhash and \
os.path.exists(os.path.splitext(filename)[0] + ".chk"):
cmd = '%s "%s"' % (options.nsscmd, dest)
log.info("Regenerating .chk file")
log.debug("Running %s", cmd)
check_call(cmd, shell=True)
return True
def remote_signfile(options, urls, filename, fmt, token, dest=None):
filehash = sha1sum(filename)
if dest is None:
dest = filename
parent_dir = os.path.dirname(os.path.abspath(dest))
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
# Check the cache
cached_fn = None
if options.cachedir:
cached_fn = os.path.join(options.cachedir, fmt, filehash)
result = check_cached_fn(options, cached_fn, filehash, filename, dest)
if result:
return True
errors = 0
pendings = 0
max_errors = 5
# It takes the server ~60s to respond to an attempting to get a signed file
# We want to give up after about 5 minutes, so 60*5 = 5 tries.
max_pending_tries = 5
url = None
while True:
if pendings >= max_pending_tries:
log.error("%s: giving up after %i tries", filehash, pendings)
# If we've given up on the current server, try a different one!
url = urls.pop(0)
urls.append(url)
errors += 1
# Pendings needs to be reset to give the next server a fair shake.
pendings = 0
if errors >= max_errors:
log.error("%s: giving up after %i tries", filehash, errors)
return False
# Try to get a previously signed copy of this file
try:
url = urls[0]
log.info("%s: processing %s on %s", filehash, filename, url)
r = getfile(url, filehash, fmt, options.cert)
r.raise_for_status()
responsehash = r.headers['X-SHA1-Digest']
tmpfile = dest + '.tmp'
with open(tmpfile, 'wb') as fd:
for chunk in r.iter_content(1024 ** 2):
fd.write(chunk)
newhash = sha1sum(tmpfile)
if newhash != responsehash:
log.warn(
"%s: hash mismatch; trying to download again", filehash)
os.unlink(tmpfile)
errors += 1
continue
overwrite_file(tmpfile, dest)
log.info("%s: OK", filehash)
# See if we should re-sign NSS
if options.nsscmd and filehash != responsehash and \
os.path.exists(os.path.splitext(filename)[0] + ".chk"):
cmd = '%s "%s"' % (options.nsscmd, dest)
log.info("Regenerating .chk file")
log.debug("Running %s", cmd)
check_call(cmd, shell=True)
# Possibly write to our cache
if options.cachedir:
if not os.path.exists(options.cachedir):
log.debug("Creating %s", options.cachedir)
os.makedirs(options.cachedir)
log.info("Copying %s to cache %s", dest, cached_fn)
safe_copyfile(dest, cached_fn)
break
except requests.HTTPError:
try:
if 'X-Pending' in r.headers:
log.debug("%s: pending; try again in a bit", filehash)
time.sleep(15)
pendings += 1
continue
except Exception:
raise
errors += 1
# That didn't work...so let's upload it
log.info("%s: uploading for signing", filehash)
try:
try:
nonce = open(options.noncefile, 'rb').read()
except IOError:
nonce = ""
r = uploadfile(url, filename, fmt, token, nonce, options.cert)
r.raise_for_status()
nonce = r.headers['X-Nonce']
if six.PY3 and isinstance(nonce, six.text_type):
nonce = nonce.encode('utf-8')
open(options.noncefile, 'wb').write(nonce)
except requests.RequestException as e:
log.exception("%s: error uploading file for signing: %s",
filehash, str(e))
urls.pop(0)
urls.append(url)
time.sleep(1)
continue
except (requests.RequestException, KeyError):
# Try again in a little while
log.exception("%s: connection error; trying again soon", filehash)
# Move the current url to the back
urls.pop(0)
urls.append(url)
time.sleep(1)
errors += 1
continue
return True
|
mozilla-releng/signtool | signtool/signtool.py | is_authenticode_signed | python | def is_authenticode_signed(filename):
with open(filename, 'rb') as fp:
fp.seek(0)
magic = fp.read(2)
if magic != b'MZ':
return False
# First grab the pointer to the coff_header, which is at offset 60
fp.seek(60)
coff_header_offset = struct.unpack('<L', fp.read(4))[0]
# Check the COFF magic
fp.seek(coff_header_offset)
magic = fp.read(4)
if magic != b'PE\x00\x00':
return False
# Get the PE type
fp.seek(coff_header_offset + 0x18)
pe_type = struct.unpack('<h', fp.read(2))[0]
if pe_type == 0x10b:
# PE32 file (32-bit apps)
number_of_data_dirs_offset = coff_header_offset + 0x74
elif pe_type == 0x20b:
# PE32+ files (64-bit apps)
# PE32+ files have slightly larger fields in the header
number_of_data_dirs_offset = coff_header_offset + 0x74 + 16
else:
return False
fp.seek(number_of_data_dirs_offset)
num_data_dirs = struct.unpack('<L', fp.read(4))[0]
if num_data_dirs < 5:
# Probably shouldn't happen, but just in case
return False
cert_table_offset = number_of_data_dirs_offset + 4*8 + 4
fp.seek(cert_table_offset)
addr, size = struct.unpack('<LL', fp.read(8))
if not addr or not size:
return False
# Check that addr is inside the file
fp.seek(addr)
if fp.tell() != addr:
return False
cert = fp.read(size)
if len(cert) != size:
return False
return True | Returns True if the file is signed with authenticode | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/signtool.py#L37-L88 | null | #!/usr/bin/env python
"""signtool.py [options] file [file ...]
If no include patterns are specified, all files will be considered. -i/-x only
have effect when signing entire directories."""
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import logging
import os
import sys
from optparse import OptionParser
import random
import struct
from signtool.signing.client import remote_signfile
from signtool.util.paths import findfiles
ALLOWED_FORMATS = (
"dmg",
"gpg",
"macapp",
"mar",
"mar_sha384",
"sha2signcode",
"sha2signcodestub",
"sha2signcode-v2",
"sha2signcodestub-v2",
"widevine",
"widevine_blessed",
)
log = logging.getLogger(__name__)
# is_authenticode_signed {{{1
# parse_cmdln_opts {{{1
def parse_cmdln_opts(parser, cmdln_args):
"""Rather than have this all clutter main(), let's split this out.
Clean arch decision: rather than parsing sys.argv directly, pass
sys.argv[1:] to this function (or any iterable for testing.)
"""
parser.set_defaults(
hosts=[],
cert=None,
log_level=logging.INFO,
output_dir=None,
output_file=None,
formats=[],
includes=[],
excludes=[],
nsscmd=None,
tokenfile=None,
noncefile=None,
cachedir=None,
)
parser.add_option(
"-H", "--host", dest="hosts", action="append", help="format[:format]:hostname[:port]")
parser.add_option("-c", "--server-cert", dest="cert")
parser.add_option("-t", "--token-file", dest="tokenfile",
help="file where token is stored")
parser.add_option("-n", "--nonce-file", dest="noncefile",
help="file where nonce is stored")
parser.add_option("-d", "--output-dir", dest="output_dir",
help="output directory; if not set then files are "
"replaced with signed copies")
parser.add_option("-o", "--output-file", dest="output_file",
help="output file; if not set then files are replaced with signed "
"copies. This can only be used when signing a single file")
parser.add_option("-f", "--formats", dest="formats", action="append",
help="signing formats (one or more of %s)" % ", ".join(ALLOWED_FORMATS))
parser.add_option("-q", "--quiet", dest="log_level", action="store_const",
const=logging.WARN)
parser.add_option(
"-v", "--verbose", dest="log_level", action="store_const",
const=logging.DEBUG)
parser.add_option("-i", "--include", dest="includes", action="append",
help="add to include patterns")
parser.add_option("-x", "--exclude", dest="excludes", action="append",
help="add to exclude patterns")
parser.add_option("--nsscmd", dest="nsscmd",
help="command to re-sign nss libraries, if required")
parser.add_option("--cachedir", dest="cachedir",
help="local cache directory")
# TODO: Concurrency?
# TODO: Different certs per server?
options, args = parser.parse_args(cmdln_args)
if not options.hosts:
parser.error("at least one host is required")
if not options.cert:
parser.error("certificate is required")
if not os.path.exists(options.cert):
parser.error("certificate not found")
if not options.tokenfile:
parser.error("token file is required")
if not options.noncefile:
parser.error("nonce file is required")
# Covert nsscmd to win32 path if required
if sys.platform == 'win32' and options.nsscmd:
nsscmd = options.nsscmd.strip()
if nsscmd.startswith("/"):
drive = nsscmd[1]
options.nsscmd = "%s:%s" % (drive, nsscmd[2:])
# Handle format
formats = []
for fmt in options.formats:
if "," in fmt:
for fmt in fmt.split(","):
if fmt not in ALLOWED_FORMATS:
parser.error("invalid format: %s" % fmt)
formats.append(fmt)
elif fmt not in ALLOWED_FORMATS:
parser.error("invalid format: %s" % fmt)
else:
formats.append(fmt)
# bug 1382882, 1164456
# Widevine and GPG signing must happen last because they will be invalid if
# done prior to any format that modifies the file in-place.
for fmt in ("widevine", "widevine_blessed", "gpg"):
if fmt in formats:
formats.remove(fmt)
formats.append(fmt)
if options.output_file and (len(args) > 1 or os.path.isdir(args[0])):
parser.error(
"-o / --output-file can only be used when signing a single file")
if options.output_dir:
if os.path.exists(options.output_dir):
if not os.path.isdir(options.output_dir):
parser.error(
"output_dir (%s) must be a directory", options.output_dir)
else:
os.makedirs(options.output_dir)
if not options.includes:
# Do everything!
options.includes.append("*")
if not formats:
parser.error("no formats specified")
options.formats = formats
format_urls = defaultdict(list)
for h in options.hosts:
# The last two parts of a host is the actual hostname:port. Any parts
# before that are formats - there could be 0..n formats so this is
# tricky to split.
parts = h.split(":")
h = parts[-2:]
fmts = parts[:-2]
# If no formats are specified, the host is assumed to support all of them.
if not fmts:
fmts = formats
for f in fmts:
format_urls[f].append("https://%s" % ":".join(h))
options.format_urls = format_urls
missing_fmt_hosts = set(formats) - set(format_urls.keys())
if missing_fmt_hosts:
parser.error("no hosts capable of signing formats: %s" % " ".join(missing_fmt_hosts))
return options, args
def sign(options, args):
token = open(options.tokenfile, 'rb').read()
for fmt in options.formats:
urls = options.format_urls[fmt][:]
random.shuffle(urls)
if fmt in ("macapp", ):
fmt = "dmg"
log.debug("doing %s signing", fmt)
log.debug("possible hosts are %s" % urls)
files = []
# We sign all of the files individually.
files = findfiles(args, options.includes, options.excludes)
for f in files:
log.debug("%s", f)
log.debug("checking %s for signature...", f)
if fmt.startswith('sha2signcode') and is_authenticode_signed(f):
log.info("Skipping %s because it looks like it's already signed", f)
continue
if options.output_dir:
dest = os.path.join(options.output_dir, os.path.basename(f))
elif options.output_file:
dest = options.output_file
else:
dest = None
if not remote_signfile(options, urls, f, fmt, token, dest):
log.error("Failed to sign %s with %s", f, fmt)
sys.exit(1)
# main {{{1
def main(name=None):
if name in (None, '__main__'):
parser = OptionParser(__doc__)
options, args = parse_cmdln_opts(parser, sys.argv[1:])
logging.basicConfig(
level=options.log_level, format="%(asctime)s - %(message)s")
log.debug("in %s", os.getcwd())
sign(options, args)
log.info("Done.")
main(name=__name__)
|
mozilla-releng/signtool | signtool/signtool.py | parse_cmdln_opts | python | def parse_cmdln_opts(parser, cmdln_args):
parser.set_defaults(
hosts=[],
cert=None,
log_level=logging.INFO,
output_dir=None,
output_file=None,
formats=[],
includes=[],
excludes=[],
nsscmd=None,
tokenfile=None,
noncefile=None,
cachedir=None,
)
parser.add_option(
"-H", "--host", dest="hosts", action="append", help="format[:format]:hostname[:port]")
parser.add_option("-c", "--server-cert", dest="cert")
parser.add_option("-t", "--token-file", dest="tokenfile",
help="file where token is stored")
parser.add_option("-n", "--nonce-file", dest="noncefile",
help="file where nonce is stored")
parser.add_option("-d", "--output-dir", dest="output_dir",
help="output directory; if not set then files are "
"replaced with signed copies")
parser.add_option("-o", "--output-file", dest="output_file",
help="output file; if not set then files are replaced with signed "
"copies. This can only be used when signing a single file")
parser.add_option("-f", "--formats", dest="formats", action="append",
help="signing formats (one or more of %s)" % ", ".join(ALLOWED_FORMATS))
parser.add_option("-q", "--quiet", dest="log_level", action="store_const",
const=logging.WARN)
parser.add_option(
"-v", "--verbose", dest="log_level", action="store_const",
const=logging.DEBUG)
parser.add_option("-i", "--include", dest="includes", action="append",
help="add to include patterns")
parser.add_option("-x", "--exclude", dest="excludes", action="append",
help="add to exclude patterns")
parser.add_option("--nsscmd", dest="nsscmd",
help="command to re-sign nss libraries, if required")
parser.add_option("--cachedir", dest="cachedir",
help="local cache directory")
# TODO: Concurrency?
# TODO: Different certs per server?
options, args = parser.parse_args(cmdln_args)
if not options.hosts:
parser.error("at least one host is required")
if not options.cert:
parser.error("certificate is required")
if not os.path.exists(options.cert):
parser.error("certificate not found")
if not options.tokenfile:
parser.error("token file is required")
if not options.noncefile:
parser.error("nonce file is required")
# Covert nsscmd to win32 path if required
if sys.platform == 'win32' and options.nsscmd:
nsscmd = options.nsscmd.strip()
if nsscmd.startswith("/"):
drive = nsscmd[1]
options.nsscmd = "%s:%s" % (drive, nsscmd[2:])
# Handle format
formats = []
for fmt in options.formats:
if "," in fmt:
for fmt in fmt.split(","):
if fmt not in ALLOWED_FORMATS:
parser.error("invalid format: %s" % fmt)
formats.append(fmt)
elif fmt not in ALLOWED_FORMATS:
parser.error("invalid format: %s" % fmt)
else:
formats.append(fmt)
# bug 1382882, 1164456
# Widevine and GPG signing must happen last because they will be invalid if
# done prior to any format that modifies the file in-place.
for fmt in ("widevine", "widevine_blessed", "gpg"):
if fmt in formats:
formats.remove(fmt)
formats.append(fmt)
if options.output_file and (len(args) > 1 or os.path.isdir(args[0])):
parser.error(
"-o / --output-file can only be used when signing a single file")
if options.output_dir:
if os.path.exists(options.output_dir):
if not os.path.isdir(options.output_dir):
parser.error(
"output_dir (%s) must be a directory", options.output_dir)
else:
os.makedirs(options.output_dir)
if not options.includes:
# Do everything!
options.includes.append("*")
if not formats:
parser.error("no formats specified")
options.formats = formats
format_urls = defaultdict(list)
for h in options.hosts:
# The last two parts of a host is the actual hostname:port. Any parts
# before that are formats - there could be 0..n formats so this is
# tricky to split.
parts = h.split(":")
h = parts[-2:]
fmts = parts[:-2]
# If no formats are specified, the host is assumed to support all of them.
if not fmts:
fmts = formats
for f in fmts:
format_urls[f].append("https://%s" % ":".join(h))
options.format_urls = format_urls
missing_fmt_hosts = set(formats) - set(format_urls.keys())
if missing_fmt_hosts:
parser.error("no hosts capable of signing formats: %s" % " ".join(missing_fmt_hosts))
return options, args | Rather than have this all clutter main(), let's split this out.
Clean arch decision: rather than parsing sys.argv directly, pass
sys.argv[1:] to this function (or any iterable for testing.) | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/signtool.py#L92-L229 | [
"def error(self, msg, *args):\n self.msg = msg\n raise SystemExit(msg)\n"
] | #!/usr/bin/env python
"""signtool.py [options] file [file ...]
If no include patterns are specified, all files will be considered. -i/-x only
have effect when signing entire directories."""
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import logging
import os
import sys
from optparse import OptionParser
import random
import struct
from signtool.signing.client import remote_signfile
from signtool.util.paths import findfiles
ALLOWED_FORMATS = (
"dmg",
"gpg",
"macapp",
"mar",
"mar_sha384",
"sha2signcode",
"sha2signcodestub",
"sha2signcode-v2",
"sha2signcodestub-v2",
"widevine",
"widevine_blessed",
)
log = logging.getLogger(__name__)
# is_authenticode_signed {{{1
def is_authenticode_signed(filename):
"""Returns True if the file is signed with authenticode"""
with open(filename, 'rb') as fp:
fp.seek(0)
magic = fp.read(2)
if magic != b'MZ':
return False
# First grab the pointer to the coff_header, which is at offset 60
fp.seek(60)
coff_header_offset = struct.unpack('<L', fp.read(4))[0]
# Check the COFF magic
fp.seek(coff_header_offset)
magic = fp.read(4)
if magic != b'PE\x00\x00':
return False
# Get the PE type
fp.seek(coff_header_offset + 0x18)
pe_type = struct.unpack('<h', fp.read(2))[0]
if pe_type == 0x10b:
# PE32 file (32-bit apps)
number_of_data_dirs_offset = coff_header_offset + 0x74
elif pe_type == 0x20b:
# PE32+ files (64-bit apps)
# PE32+ files have slightly larger fields in the header
number_of_data_dirs_offset = coff_header_offset + 0x74 + 16
else:
return False
fp.seek(number_of_data_dirs_offset)
num_data_dirs = struct.unpack('<L', fp.read(4))[0]
if num_data_dirs < 5:
# Probably shouldn't happen, but just in case
return False
cert_table_offset = number_of_data_dirs_offset + 4*8 + 4
fp.seek(cert_table_offset)
addr, size = struct.unpack('<LL', fp.read(8))
if not addr or not size:
return False
# Check that addr is inside the file
fp.seek(addr)
if fp.tell() != addr:
return False
cert = fp.read(size)
if len(cert) != size:
return False
return True
# parse_cmdln_opts {{{1
def sign(options, args):
token = open(options.tokenfile, 'rb').read()
for fmt in options.formats:
urls = options.format_urls[fmt][:]
random.shuffle(urls)
if fmt in ("macapp", ):
fmt = "dmg"
log.debug("doing %s signing", fmt)
log.debug("possible hosts are %s" % urls)
files = []
# We sign all of the files individually.
files = findfiles(args, options.includes, options.excludes)
for f in files:
log.debug("%s", f)
log.debug("checking %s for signature...", f)
if fmt.startswith('sha2signcode') and is_authenticode_signed(f):
log.info("Skipping %s because it looks like it's already signed", f)
continue
if options.output_dir:
dest = os.path.join(options.output_dir, os.path.basename(f))
elif options.output_file:
dest = options.output_file
else:
dest = None
if not remote_signfile(options, urls, f, fmt, token, dest):
log.error("Failed to sign %s with %s", f, fmt)
sys.exit(1)
# main {{{1
def main(name=None):
if name in (None, '__main__'):
parser = OptionParser(__doc__)
options, args = parse_cmdln_opts(parser, sys.argv[1:])
logging.basicConfig(
level=options.log_level, format="%(asctime)s - %(message)s")
log.debug("in %s", os.getcwd())
sign(options, args)
log.info("Done.")
main(name=__name__)
|
mozilla-releng/signtool | signtool/util/paths.py | cygpath | python | def cygpath(filename):
if sys.platform == 'cygwin':
proc = Popen(['cygpath', '-am', filename], stdout=PIPE)
return proc.communicate()[0].strip()
else:
return filename | Convert a cygwin path into a windows style path | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/paths.py#L11-L17 | null | import os.path
import sys
import fnmatch
import logging
# TODO: Use util.commands
from subprocess import PIPE, Popen
log = logging.getLogger(__name__)
def convertPath(srcpath, dstdir):
"""Given `srcpath`, return a corresponding path within `dstdir`"""
bits = srcpath.split("/")
bits.pop(0)
# Strip out leading 'unsigned' from paths like unsigned/update/win32/...
if bits[0] == 'unsigned':
bits.pop(0)
return os.path.join(dstdir, *bits)
def findfiles(roots, includes=('*', ), excludes=()):
retval = []
if not isinstance(roots, (list, tuple)):
roots = [roots]
for fn in roots:
if os.path.isdir(fn):
dirname = fn
for root, dirs, files in os.walk(dirname):
for f in files:
fullpath = os.path.join(root, f)
if not any(fnmatch.fnmatch(f, pat) for pat in includes):
log.debug("Skipping %s; doesn't match any include pattern", f)
continue
if any(fnmatch.fnmatch(f, pat) for pat in excludes):
log.debug("Skipping %s; matches an exclude pattern", f)
continue
retval.append(fullpath)
else:
retval.append(fn)
return retval
def finddirs(root):
"""Return a list of all the directories under `root`"""
retval = []
for root, dirs, files in os.walk(root):
for d in dirs:
retval.append(os.path.join(root, d))
return retval
|
mozilla-releng/signtool | signtool/util/paths.py | convertPath | python | def convertPath(srcpath, dstdir):
bits = srcpath.split("/")
bits.pop(0)
# Strip out leading 'unsigned' from paths like unsigned/update/win32/...
if bits[0] == 'unsigned':
bits.pop(0)
return os.path.join(dstdir, *bits) | Given `srcpath`, return a corresponding path within `dstdir` | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/paths.py#L20-L27 | null | import os.path
import sys
import fnmatch
import logging
# TODO: Use util.commands
from subprocess import PIPE, Popen
log = logging.getLogger(__name__)
def cygpath(filename):
"""Convert a cygwin path into a windows style path"""
if sys.platform == 'cygwin':
proc = Popen(['cygpath', '-am', filename], stdout=PIPE)
return proc.communicate()[0].strip()
else:
return filename
def findfiles(roots, includes=('*', ), excludes=()):
retval = []
if not isinstance(roots, (list, tuple)):
roots = [roots]
for fn in roots:
if os.path.isdir(fn):
dirname = fn
for root, dirs, files in os.walk(dirname):
for f in files:
fullpath = os.path.join(root, f)
if not any(fnmatch.fnmatch(f, pat) for pat in includes):
log.debug("Skipping %s; doesn't match any include pattern", f)
continue
if any(fnmatch.fnmatch(f, pat) for pat in excludes):
log.debug("Skipping %s; matches an exclude pattern", f)
continue
retval.append(fullpath)
else:
retval.append(fn)
return retval
def finddirs(root):
"""Return a list of all the directories under `root`"""
retval = []
for root, dirs, files in os.walk(root):
for d in dirs:
retval.append(os.path.join(root, d))
return retval
|
mozilla-releng/signtool | signtool/util/paths.py | finddirs | python | def finddirs(root):
retval = []
for root, dirs, files in os.walk(root):
for d in dirs:
retval.append(os.path.join(root, d))
return retval | Return a list of all the directories under `root` | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/paths.py#L52-L58 | null | import os.path
import sys
import fnmatch
import logging
# TODO: Use util.commands
from subprocess import PIPE, Popen
log = logging.getLogger(__name__)
def cygpath(filename):
"""Convert a cygwin path into a windows style path"""
if sys.platform == 'cygwin':
proc = Popen(['cygpath', '-am', filename], stdout=PIPE)
return proc.communicate()[0].strip()
else:
return filename
def convertPath(srcpath, dstdir):
"""Given `srcpath`, return a corresponding path within `dstdir`"""
bits = srcpath.split("/")
bits.pop(0)
# Strip out leading 'unsigned' from paths like unsigned/update/win32/...
if bits[0] == 'unsigned':
bits.pop(0)
return os.path.join(dstdir, *bits)
def findfiles(roots, includes=('*', ), excludes=()):
retval = []
if not isinstance(roots, (list, tuple)):
roots = [roots]
for fn in roots:
if os.path.isdir(fn):
dirname = fn
for root, dirs, files in os.walk(dirname):
for f in files:
fullpath = os.path.join(root, f)
if not any(fnmatch.fnmatch(f, pat) for pat in includes):
log.debug("Skipping %s; doesn't match any include pattern", f)
continue
if any(fnmatch.fnmatch(f, pat) for pat in excludes):
log.debug("Skipping %s; matches an exclude pattern", f)
continue
retval.append(fullpath)
else:
retval.append(fn)
return retval
|
mozilla-releng/signtool | signtool/util/archives.py | unpackexe | python | def unpackexe(exefile, destdir):
nullfd = open(os.devnull, "w")
exefile = cygpath(os.path.abspath(exefile))
try:
check_call([SEVENZIP, 'x', exefile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking exe %s to %s", exefile, destdir)
raise
nullfd.close() | Unpack the given exefile into destdir, using 7z | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/archives.py#L23-L33 | [
"def cygpath(filename):\n \"\"\"Convert a cygwin path into a windows style path\"\"\"\n if sys.platform == 'cygwin':\n proc = Popen(['cygpath', '-am', filename], stdout=PIPE)\n return proc.communicate()[0].strip()\n else:\n return filename\n"
] | import os
# TODO: use util.commands
from subprocess import check_call
import logging
import tempfile
import bz2
import shutil
from signtool.util.paths import cygpath, findfiles
log = logging.getLogger(__name__)
SEVENZIP = os.environ.get('SEVENZIP', '7z')
MAR = os.environ.get('MAR', 'mar')
TAR = os.environ.get('TAR', 'tar')
def _noumask():
# Utility function to set a umask of 000
os.umask(0)
def packexe(exefile, srcdir):
"""Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs"""
exefile = cygpath(os.path.abspath(exefile))
appbundle = exefile + ".app.7z"
# Make sure that appbundle doesn't already exist
# We don't want to risk appending to an existing file
if os.path.exists(appbundle):
raise OSError("%s already exists" % appbundle)
files = os.listdir(srcdir)
SEVENZIP_ARGS = ['-r', '-t7z', '-mx', '-m0=BCJ2', '-m1=LZMA:d27',
'-m2=LZMA:d19:mf=bt2', '-m3=LZMA:d19:mf=bt2', '-mb0:1', '-mb0s1:2',
'-mb0s2:3', '-m1fb=128', '-m1lc=4']
# First, compress with 7z
stdout = tempfile.TemporaryFile()
try:
check_call([SEVENZIP, 'a'] + SEVENZIP_ARGS + [appbundle] + files,
cwd=srcdir, stdout=stdout, preexec_fn=_noumask)
except Exception:
stdout.seek(0)
data = stdout.read()
log.error(data)
log.exception("Error packing exe %s from %s", exefile, srcdir)
raise
stdout.close()
# Then prepend our stubs onto the compressed 7z data
o = open(exefile, "wb")
parts = [
'checkouts/stubs/7z/7zSD.sfx.compressed',
'checkouts/stubs/tagfile/app.tag',
appbundle
]
for part in parts:
i = open(part)
while True:
block = i.read(4096)
if not block:
break
o.write(block)
i.close()
o.close()
os.unlink(appbundle)
def bunzip2(filename):
"""Uncompress `filename` in place"""
log.debug("Uncompressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(tmpfile)
f = open(filename, "wb")
while True:
block = b.read(512 * 1024)
if not block:
break
f.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def bzip2(filename):
"""Compress `filename` in place"""
log.debug("Compressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(filename, "w")
f = open(tmpfile, 'rb')
while True:
block = f.read(512 * 1024)
if not block:
break
b.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def unpackmar(marfile, destdir):
"""Unpack marfile into destdir"""
marfile = cygpath(os.path.abspath(marfile))
nullfd = open(os.devnull, "w")
try:
check_call([MAR, '-x', marfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking mar file %s to %s", marfile, destdir)
raise
nullfd.close()
def packmar(marfile, srcdir):
"""Create marfile from the contents of srcdir"""
nullfd = open(os.devnull, "w")
files = [f[len(srcdir) + 1:] for f in findfiles(srcdir)]
marfile = cygpath(os.path.abspath(marfile))
try:
check_call(
[MAR, '-c', marfile] + files, cwd=srcdir, preexec_fn=_noumask)
except Exception:
log.exception("Error packing mar file %s from %s", marfile, srcdir)
raise
nullfd.close()
def unpacktar(tarfile, destdir):
""" Unpack given tarball into the specified dir """
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("unpack tar %s into %s", tarfile, destdir)
try:
check_call([TAR, '-xzf', tarfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking tar file %s to %s", tarfile, destdir)
raise
nullfd.close()
def tar_dir(tarfile, srcdir):
""" Pack a tar file using all the files in the given srcdir """
files = os.listdir(srcdir)
packtar(tarfile, files, srcdir)
def packtar(tarfile, files, srcdir):
""" Pack the given files into a tar, setting cwd = srcdir"""
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("pack tar %s from folder %s with files ", tarfile, srcdir)
log.debug(files)
try:
check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error packing tar file %s to %s", tarfile, srcdir)
raise
nullfd.close()
def unpackfile(filename, destdir):
"""Unpack a mar or exe into destdir"""
if filename.endswith(".mar"):
return unpackmar(filename, destdir)
elif filename.endswith(".exe"):
return unpackexe(filename, destdir)
elif filename.endswith(".tar") or filename.endswith(".tar.gz") \
or filename.endswith(".tgz"):
return unpacktar(filename, destdir)
else:
raise ValueError("Unknown file type: %s" % filename)
def packfile(filename, srcdir):
"""Package up srcdir into filename, archived with 7z for exes or mar for
mar files"""
if filename.endswith(".mar"):
return packmar(filename, srcdir)
elif filename.endswith(".exe"):
return packexe(filename, srcdir)
elif filename.endswith(".tar"):
return tar_dir(filename, srcdir)
else:
raise ValueError("Unknown file type: %s" % filename)
|
mozilla-releng/signtool | signtool/util/archives.py | packexe | python | def packexe(exefile, srcdir):
exefile = cygpath(os.path.abspath(exefile))
appbundle = exefile + ".app.7z"
# Make sure that appbundle doesn't already exist
# We don't want to risk appending to an existing file
if os.path.exists(appbundle):
raise OSError("%s already exists" % appbundle)
files = os.listdir(srcdir)
SEVENZIP_ARGS = ['-r', '-t7z', '-mx', '-m0=BCJ2', '-m1=LZMA:d27',
'-m2=LZMA:d19:mf=bt2', '-m3=LZMA:d19:mf=bt2', '-mb0:1', '-mb0s1:2',
'-mb0s2:3', '-m1fb=128', '-m1lc=4']
# First, compress with 7z
stdout = tempfile.TemporaryFile()
try:
check_call([SEVENZIP, 'a'] + SEVENZIP_ARGS + [appbundle] + files,
cwd=srcdir, stdout=stdout, preexec_fn=_noumask)
except Exception:
stdout.seek(0)
data = stdout.read()
log.error(data)
log.exception("Error packing exe %s from %s", exefile, srcdir)
raise
stdout.close()
# Then prepend our stubs onto the compressed 7z data
o = open(exefile, "wb")
parts = [
'checkouts/stubs/7z/7zSD.sfx.compressed',
'checkouts/stubs/tagfile/app.tag',
appbundle
]
for part in parts:
i = open(part)
while True:
block = i.read(4096)
if not block:
break
o.write(block)
i.close()
o.close()
os.unlink(appbundle) | Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/archives.py#L36-L83 | [
"def cygpath(filename):\n \"\"\"Convert a cygwin path into a windows style path\"\"\"\n if sys.platform == 'cygwin':\n proc = Popen(['cygpath', '-am', filename], stdout=PIPE)\n return proc.communicate()[0].strip()\n else:\n return filename\n"
] | import os
# TODO: use util.commands
from subprocess import check_call
import logging
import tempfile
import bz2
import shutil
from signtool.util.paths import cygpath, findfiles
log = logging.getLogger(__name__)
SEVENZIP = os.environ.get('SEVENZIP', '7z')
MAR = os.environ.get('MAR', 'mar')
TAR = os.environ.get('TAR', 'tar')
def _noumask():
# Utility function to set a umask of 000
os.umask(0)
def unpackexe(exefile, destdir):
"""Unpack the given exefile into destdir, using 7z"""
nullfd = open(os.devnull, "w")
exefile = cygpath(os.path.abspath(exefile))
try:
check_call([SEVENZIP, 'x', exefile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking exe %s to %s", exefile, destdir)
raise
nullfd.close()
def bunzip2(filename):
"""Uncompress `filename` in place"""
log.debug("Uncompressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(tmpfile)
f = open(filename, "wb")
while True:
block = b.read(512 * 1024)
if not block:
break
f.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def bzip2(filename):
"""Compress `filename` in place"""
log.debug("Compressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(filename, "w")
f = open(tmpfile, 'rb')
while True:
block = f.read(512 * 1024)
if not block:
break
b.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def unpackmar(marfile, destdir):
"""Unpack marfile into destdir"""
marfile = cygpath(os.path.abspath(marfile))
nullfd = open(os.devnull, "w")
try:
check_call([MAR, '-x', marfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking mar file %s to %s", marfile, destdir)
raise
nullfd.close()
def packmar(marfile, srcdir):
"""Create marfile from the contents of srcdir"""
nullfd = open(os.devnull, "w")
files = [f[len(srcdir) + 1:] for f in findfiles(srcdir)]
marfile = cygpath(os.path.abspath(marfile))
try:
check_call(
[MAR, '-c', marfile] + files, cwd=srcdir, preexec_fn=_noumask)
except Exception:
log.exception("Error packing mar file %s from %s", marfile, srcdir)
raise
nullfd.close()
def unpacktar(tarfile, destdir):
""" Unpack given tarball into the specified dir """
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("unpack tar %s into %s", tarfile, destdir)
try:
check_call([TAR, '-xzf', tarfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking tar file %s to %s", tarfile, destdir)
raise
nullfd.close()
def tar_dir(tarfile, srcdir):
""" Pack a tar file using all the files in the given srcdir """
files = os.listdir(srcdir)
packtar(tarfile, files, srcdir)
def packtar(tarfile, files, srcdir):
""" Pack the given files into a tar, setting cwd = srcdir"""
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("pack tar %s from folder %s with files ", tarfile, srcdir)
log.debug(files)
try:
check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error packing tar file %s to %s", tarfile, srcdir)
raise
nullfd.close()
def unpackfile(filename, destdir):
"""Unpack a mar or exe into destdir"""
if filename.endswith(".mar"):
return unpackmar(filename, destdir)
elif filename.endswith(".exe"):
return unpackexe(filename, destdir)
elif filename.endswith(".tar") or filename.endswith(".tar.gz") \
or filename.endswith(".tgz"):
return unpacktar(filename, destdir)
else:
raise ValueError("Unknown file type: %s" % filename)
def packfile(filename, srcdir):
"""Package up srcdir into filename, archived with 7z for exes or mar for
mar files"""
if filename.endswith(".mar"):
return packmar(filename, srcdir)
elif filename.endswith(".exe"):
return packexe(filename, srcdir)
elif filename.endswith(".tar"):
return tar_dir(filename, srcdir)
else:
raise ValueError("Unknown file type: %s" % filename)
|
mozilla-releng/signtool | signtool/util/archives.py | bunzip2 | python | def bunzip2(filename):
log.debug("Uncompressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(tmpfile)
f = open(filename, "wb")
while True:
block = b.read(512 * 1024)
if not block:
break
f.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile) | Uncompress `filename` in place | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/archives.py#L86-L102 | null | import os
# TODO: use util.commands
from subprocess import check_call
import logging
import tempfile
import bz2
import shutil
from signtool.util.paths import cygpath, findfiles
log = logging.getLogger(__name__)
SEVENZIP = os.environ.get('SEVENZIP', '7z')
MAR = os.environ.get('MAR', 'mar')
TAR = os.environ.get('TAR', 'tar')
def _noumask():
# Utility function to set a umask of 000
os.umask(0)
def unpackexe(exefile, destdir):
"""Unpack the given exefile into destdir, using 7z"""
nullfd = open(os.devnull, "w")
exefile = cygpath(os.path.abspath(exefile))
try:
check_call([SEVENZIP, 'x', exefile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking exe %s to %s", exefile, destdir)
raise
nullfd.close()
def packexe(exefile, srcdir):
"""Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs"""
exefile = cygpath(os.path.abspath(exefile))
appbundle = exefile + ".app.7z"
# Make sure that appbundle doesn't already exist
# We don't want to risk appending to an existing file
if os.path.exists(appbundle):
raise OSError("%s already exists" % appbundle)
files = os.listdir(srcdir)
SEVENZIP_ARGS = ['-r', '-t7z', '-mx', '-m0=BCJ2', '-m1=LZMA:d27',
'-m2=LZMA:d19:mf=bt2', '-m3=LZMA:d19:mf=bt2', '-mb0:1', '-mb0s1:2',
'-mb0s2:3', '-m1fb=128', '-m1lc=4']
# First, compress with 7z
stdout = tempfile.TemporaryFile()
try:
check_call([SEVENZIP, 'a'] + SEVENZIP_ARGS + [appbundle] + files,
cwd=srcdir, stdout=stdout, preexec_fn=_noumask)
except Exception:
stdout.seek(0)
data = stdout.read()
log.error(data)
log.exception("Error packing exe %s from %s", exefile, srcdir)
raise
stdout.close()
# Then prepend our stubs onto the compressed 7z data
o = open(exefile, "wb")
parts = [
'checkouts/stubs/7z/7zSD.sfx.compressed',
'checkouts/stubs/tagfile/app.tag',
appbundle
]
for part in parts:
i = open(part)
while True:
block = i.read(4096)
if not block:
break
o.write(block)
i.close()
o.close()
os.unlink(appbundle)
def bzip2(filename):
"""Compress `filename` in place"""
log.debug("Compressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(filename, "w")
f = open(tmpfile, 'rb')
while True:
block = f.read(512 * 1024)
if not block:
break
b.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def unpackmar(marfile, destdir):
"""Unpack marfile into destdir"""
marfile = cygpath(os.path.abspath(marfile))
nullfd = open(os.devnull, "w")
try:
check_call([MAR, '-x', marfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking mar file %s to %s", marfile, destdir)
raise
nullfd.close()
def packmar(marfile, srcdir):
"""Create marfile from the contents of srcdir"""
nullfd = open(os.devnull, "w")
files = [f[len(srcdir) + 1:] for f in findfiles(srcdir)]
marfile = cygpath(os.path.abspath(marfile))
try:
check_call(
[MAR, '-c', marfile] + files, cwd=srcdir, preexec_fn=_noumask)
except Exception:
log.exception("Error packing mar file %s from %s", marfile, srcdir)
raise
nullfd.close()
def unpacktar(tarfile, destdir):
""" Unpack given tarball into the specified dir """
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("unpack tar %s into %s", tarfile, destdir)
try:
check_call([TAR, '-xzf', tarfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking tar file %s to %s", tarfile, destdir)
raise
nullfd.close()
def tar_dir(tarfile, srcdir):
""" Pack a tar file using all the files in the given srcdir """
files = os.listdir(srcdir)
packtar(tarfile, files, srcdir)
def packtar(tarfile, files, srcdir):
""" Pack the given files into a tar, setting cwd = srcdir"""
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("pack tar %s from folder %s with files ", tarfile, srcdir)
log.debug(files)
try:
check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error packing tar file %s to %s", tarfile, srcdir)
raise
nullfd.close()
def unpackfile(filename, destdir):
"""Unpack a mar or exe into destdir"""
if filename.endswith(".mar"):
return unpackmar(filename, destdir)
elif filename.endswith(".exe"):
return unpackexe(filename, destdir)
elif filename.endswith(".tar") or filename.endswith(".tar.gz") \
or filename.endswith(".tgz"):
return unpacktar(filename, destdir)
else:
raise ValueError("Unknown file type: %s" % filename)
def packfile(filename, srcdir):
"""Package up srcdir into filename, archived with 7z for exes or mar for
mar files"""
if filename.endswith(".mar"):
return packmar(filename, srcdir)
elif filename.endswith(".exe"):
return packexe(filename, srcdir)
elif filename.endswith(".tar"):
return tar_dir(filename, srcdir)
else:
raise ValueError("Unknown file type: %s" % filename)
|
mozilla-releng/signtool | signtool/util/archives.py | unpackmar | python | def unpackmar(marfile, destdir):
marfile = cygpath(os.path.abspath(marfile))
nullfd = open(os.devnull, "w")
try:
check_call([MAR, '-x', marfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking mar file %s to %s", marfile, destdir)
raise
nullfd.close() | Unpack marfile into destdir | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/archives.py#L124-L134 | [
"def cygpath(filename):\n \"\"\"Convert a cygwin path into a windows style path\"\"\"\n if sys.platform == 'cygwin':\n proc = Popen(['cygpath', '-am', filename], stdout=PIPE)\n return proc.communicate()[0].strip()\n else:\n return filename\n"
] | import os
# TODO: use util.commands
from subprocess import check_call
import logging
import tempfile
import bz2
import shutil
from signtool.util.paths import cygpath, findfiles
log = logging.getLogger(__name__)
SEVENZIP = os.environ.get('SEVENZIP', '7z')
MAR = os.environ.get('MAR', 'mar')
TAR = os.environ.get('TAR', 'tar')
def _noumask():
# Utility function to set a umask of 000
os.umask(0)
def unpackexe(exefile, destdir):
"""Unpack the given exefile into destdir, using 7z"""
nullfd = open(os.devnull, "w")
exefile = cygpath(os.path.abspath(exefile))
try:
check_call([SEVENZIP, 'x', exefile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking exe %s to %s", exefile, destdir)
raise
nullfd.close()
def packexe(exefile, srcdir):
"""Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs"""
exefile = cygpath(os.path.abspath(exefile))
appbundle = exefile + ".app.7z"
# Make sure that appbundle doesn't already exist
# We don't want to risk appending to an existing file
if os.path.exists(appbundle):
raise OSError("%s already exists" % appbundle)
files = os.listdir(srcdir)
SEVENZIP_ARGS = ['-r', '-t7z', '-mx', '-m0=BCJ2', '-m1=LZMA:d27',
'-m2=LZMA:d19:mf=bt2', '-m3=LZMA:d19:mf=bt2', '-mb0:1', '-mb0s1:2',
'-mb0s2:3', '-m1fb=128', '-m1lc=4']
# First, compress with 7z
stdout = tempfile.TemporaryFile()
try:
check_call([SEVENZIP, 'a'] + SEVENZIP_ARGS + [appbundle] + files,
cwd=srcdir, stdout=stdout, preexec_fn=_noumask)
except Exception:
stdout.seek(0)
data = stdout.read()
log.error(data)
log.exception("Error packing exe %s from %s", exefile, srcdir)
raise
stdout.close()
# Then prepend our stubs onto the compressed 7z data
o = open(exefile, "wb")
parts = [
'checkouts/stubs/7z/7zSD.sfx.compressed',
'checkouts/stubs/tagfile/app.tag',
appbundle
]
for part in parts:
i = open(part)
while True:
block = i.read(4096)
if not block:
break
o.write(block)
i.close()
o.close()
os.unlink(appbundle)
def bunzip2(filename):
"""Uncompress `filename` in place"""
log.debug("Uncompressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(tmpfile)
f = open(filename, "wb")
while True:
block = b.read(512 * 1024)
if not block:
break
f.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def bzip2(filename):
"""Compress `filename` in place"""
log.debug("Compressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(filename, "w")
f = open(tmpfile, 'rb')
while True:
block = f.read(512 * 1024)
if not block:
break
b.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def packmar(marfile, srcdir):
"""Create marfile from the contents of srcdir"""
nullfd = open(os.devnull, "w")
files = [f[len(srcdir) + 1:] for f in findfiles(srcdir)]
marfile = cygpath(os.path.abspath(marfile))
try:
check_call(
[MAR, '-c', marfile] + files, cwd=srcdir, preexec_fn=_noumask)
except Exception:
log.exception("Error packing mar file %s from %s", marfile, srcdir)
raise
nullfd.close()
def unpacktar(tarfile, destdir):
""" Unpack given tarball into the specified dir """
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("unpack tar %s into %s", tarfile, destdir)
try:
check_call([TAR, '-xzf', tarfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking tar file %s to %s", tarfile, destdir)
raise
nullfd.close()
def tar_dir(tarfile, srcdir):
""" Pack a tar file using all the files in the given srcdir """
files = os.listdir(srcdir)
packtar(tarfile, files, srcdir)
def packtar(tarfile, files, srcdir):
""" Pack the given files into a tar, setting cwd = srcdir"""
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("pack tar %s from folder %s with files ", tarfile, srcdir)
log.debug(files)
try:
check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error packing tar file %s to %s", tarfile, srcdir)
raise
nullfd.close()
def unpackfile(filename, destdir):
"""Unpack a mar or exe into destdir"""
if filename.endswith(".mar"):
return unpackmar(filename, destdir)
elif filename.endswith(".exe"):
return unpackexe(filename, destdir)
elif filename.endswith(".tar") or filename.endswith(".tar.gz") \
or filename.endswith(".tgz"):
return unpacktar(filename, destdir)
else:
raise ValueError("Unknown file type: %s" % filename)
def packfile(filename, srcdir):
"""Package up srcdir into filename, archived with 7z for exes or mar for
mar files"""
if filename.endswith(".mar"):
return packmar(filename, srcdir)
elif filename.endswith(".exe"):
return packexe(filename, srcdir)
elif filename.endswith(".tar"):
return tar_dir(filename, srcdir)
else:
raise ValueError("Unknown file type: %s" % filename)
|
mozilla-releng/signtool | signtool/util/archives.py | packmar | python | def packmar(marfile, srcdir):
nullfd = open(os.devnull, "w")
files = [f[len(srcdir) + 1:] for f in findfiles(srcdir)]
marfile = cygpath(os.path.abspath(marfile))
try:
check_call(
[MAR, '-c', marfile] + files, cwd=srcdir, preexec_fn=_noumask)
except Exception:
log.exception("Error packing mar file %s from %s", marfile, srcdir)
raise
nullfd.close() | Create marfile from the contents of srcdir | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/archives.py#L137-L148 | [
"def cygpath(filename):\n \"\"\"Convert a cygwin path into a windows style path\"\"\"\n if sys.platform == 'cygwin':\n proc = Popen(['cygpath', '-am', filename], stdout=PIPE)\n return proc.communicate()[0].strip()\n else:\n return filename\n",
"def findfiles(roots, includes=('*', ), excludes=()):\n retval = []\n if not isinstance(roots, (list, tuple)):\n roots = [roots]\n for fn in roots:\n if os.path.isdir(fn):\n dirname = fn\n for root, dirs, files in os.walk(dirname):\n for f in files:\n fullpath = os.path.join(root, f)\n if not any(fnmatch.fnmatch(f, pat) for pat in includes):\n log.debug(\"Skipping %s; doesn't match any include pattern\", f)\n continue\n if any(fnmatch.fnmatch(f, pat) for pat in excludes):\n log.debug(\"Skipping %s; matches an exclude pattern\", f)\n continue\n retval.append(fullpath)\n else:\n retval.append(fn)\n return retval\n"
] | import os
# TODO: use util.commands
from subprocess import check_call
import logging
import tempfile
import bz2
import shutil
from signtool.util.paths import cygpath, findfiles
log = logging.getLogger(__name__)
SEVENZIP = os.environ.get('SEVENZIP', '7z')
MAR = os.environ.get('MAR', 'mar')
TAR = os.environ.get('TAR', 'tar')
def _noumask():
# Utility function to set a umask of 000
os.umask(0)
def unpackexe(exefile, destdir):
"""Unpack the given exefile into destdir, using 7z"""
nullfd = open(os.devnull, "w")
exefile = cygpath(os.path.abspath(exefile))
try:
check_call([SEVENZIP, 'x', exefile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking exe %s to %s", exefile, destdir)
raise
nullfd.close()
def packexe(exefile, srcdir):
"""Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs"""
exefile = cygpath(os.path.abspath(exefile))
appbundle = exefile + ".app.7z"
# Make sure that appbundle doesn't already exist
# We don't want to risk appending to an existing file
if os.path.exists(appbundle):
raise OSError("%s already exists" % appbundle)
files = os.listdir(srcdir)
SEVENZIP_ARGS = ['-r', '-t7z', '-mx', '-m0=BCJ2', '-m1=LZMA:d27',
'-m2=LZMA:d19:mf=bt2', '-m3=LZMA:d19:mf=bt2', '-mb0:1', '-mb0s1:2',
'-mb0s2:3', '-m1fb=128', '-m1lc=4']
# First, compress with 7z
stdout = tempfile.TemporaryFile()
try:
check_call([SEVENZIP, 'a'] + SEVENZIP_ARGS + [appbundle] + files,
cwd=srcdir, stdout=stdout, preexec_fn=_noumask)
except Exception:
stdout.seek(0)
data = stdout.read()
log.error(data)
log.exception("Error packing exe %s from %s", exefile, srcdir)
raise
stdout.close()
# Then prepend our stubs onto the compressed 7z data
o = open(exefile, "wb")
parts = [
'checkouts/stubs/7z/7zSD.sfx.compressed',
'checkouts/stubs/tagfile/app.tag',
appbundle
]
for part in parts:
i = open(part)
while True:
block = i.read(4096)
if not block:
break
o.write(block)
i.close()
o.close()
os.unlink(appbundle)
def bunzip2(filename):
"""Uncompress `filename` in place"""
log.debug("Uncompressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(tmpfile)
f = open(filename, "wb")
while True:
block = b.read(512 * 1024)
if not block:
break
f.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def bzip2(filename):
"""Compress `filename` in place"""
log.debug("Compressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(filename, "w")
f = open(tmpfile, 'rb')
while True:
block = f.read(512 * 1024)
if not block:
break
b.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def unpackmar(marfile, destdir):
"""Unpack marfile into destdir"""
marfile = cygpath(os.path.abspath(marfile))
nullfd = open(os.devnull, "w")
try:
check_call([MAR, '-x', marfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking mar file %s to %s", marfile, destdir)
raise
nullfd.close()
def unpacktar(tarfile, destdir):
""" Unpack given tarball into the specified dir """
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("unpack tar %s into %s", tarfile, destdir)
try:
check_call([TAR, '-xzf', tarfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking tar file %s to %s", tarfile, destdir)
raise
nullfd.close()
def tar_dir(tarfile, srcdir):
""" Pack a tar file using all the files in the given srcdir """
files = os.listdir(srcdir)
packtar(tarfile, files, srcdir)
def packtar(tarfile, files, srcdir):
""" Pack the given files into a tar, setting cwd = srcdir"""
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("pack tar %s from folder %s with files ", tarfile, srcdir)
log.debug(files)
try:
check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error packing tar file %s to %s", tarfile, srcdir)
raise
nullfd.close()
def unpackfile(filename, destdir):
"""Unpack a mar or exe into destdir"""
if filename.endswith(".mar"):
return unpackmar(filename, destdir)
elif filename.endswith(".exe"):
return unpackexe(filename, destdir)
elif filename.endswith(".tar") or filename.endswith(".tar.gz") \
or filename.endswith(".tgz"):
return unpacktar(filename, destdir)
else:
raise ValueError("Unknown file type: %s" % filename)
def packfile(filename, srcdir):
"""Package up srcdir into filename, archived with 7z for exes or mar for
mar files"""
if filename.endswith(".mar"):
return packmar(filename, srcdir)
elif filename.endswith(".exe"):
return packexe(filename, srcdir)
elif filename.endswith(".tar"):
return tar_dir(filename, srcdir)
else:
raise ValueError("Unknown file type: %s" % filename)
|
mozilla-releng/signtool | signtool/util/archives.py | unpacktar | python | def unpacktar(tarfile, destdir):
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("unpack tar %s into %s", tarfile, destdir)
try:
check_call([TAR, '-xzf', tarfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking tar file %s to %s", tarfile, destdir)
raise
nullfd.close() | Unpack given tarball into the specified dir | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/archives.py#L151-L162 | [
"def cygpath(filename):\n \"\"\"Convert a cygwin path into a windows style path\"\"\"\n if sys.platform == 'cygwin':\n proc = Popen(['cygpath', '-am', filename], stdout=PIPE)\n return proc.communicate()[0].strip()\n else:\n return filename\n"
] | import os
# TODO: use util.commands
from subprocess import check_call
import logging
import tempfile
import bz2
import shutil
from signtool.util.paths import cygpath, findfiles
log = logging.getLogger(__name__)
SEVENZIP = os.environ.get('SEVENZIP', '7z')
MAR = os.environ.get('MAR', 'mar')
TAR = os.environ.get('TAR', 'tar')
def _noumask():
# Utility function to set a umask of 000
os.umask(0)
def unpackexe(exefile, destdir):
"""Unpack the given exefile into destdir, using 7z"""
nullfd = open(os.devnull, "w")
exefile = cygpath(os.path.abspath(exefile))
try:
check_call([SEVENZIP, 'x', exefile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking exe %s to %s", exefile, destdir)
raise
nullfd.close()
def packexe(exefile, srcdir):
"""Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs"""
exefile = cygpath(os.path.abspath(exefile))
appbundle = exefile + ".app.7z"
# Make sure that appbundle doesn't already exist
# We don't want to risk appending to an existing file
if os.path.exists(appbundle):
raise OSError("%s already exists" % appbundle)
files = os.listdir(srcdir)
SEVENZIP_ARGS = ['-r', '-t7z', '-mx', '-m0=BCJ2', '-m1=LZMA:d27',
'-m2=LZMA:d19:mf=bt2', '-m3=LZMA:d19:mf=bt2', '-mb0:1', '-mb0s1:2',
'-mb0s2:3', '-m1fb=128', '-m1lc=4']
# First, compress with 7z
stdout = tempfile.TemporaryFile()
try:
check_call([SEVENZIP, 'a'] + SEVENZIP_ARGS + [appbundle] + files,
cwd=srcdir, stdout=stdout, preexec_fn=_noumask)
except Exception:
stdout.seek(0)
data = stdout.read()
log.error(data)
log.exception("Error packing exe %s from %s", exefile, srcdir)
raise
stdout.close()
# Then prepend our stubs onto the compressed 7z data
o = open(exefile, "wb")
parts = [
'checkouts/stubs/7z/7zSD.sfx.compressed',
'checkouts/stubs/tagfile/app.tag',
appbundle
]
for part in parts:
i = open(part)
while True:
block = i.read(4096)
if not block:
break
o.write(block)
i.close()
o.close()
os.unlink(appbundle)
def bunzip2(filename):
"""Uncompress `filename` in place"""
log.debug("Uncompressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(tmpfile)
f = open(filename, "wb")
while True:
block = b.read(512 * 1024)
if not block:
break
f.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def bzip2(filename):
"""Compress `filename` in place"""
log.debug("Compressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(filename, "w")
f = open(tmpfile, 'rb')
while True:
block = f.read(512 * 1024)
if not block:
break
b.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def unpackmar(marfile, destdir):
"""Unpack marfile into destdir"""
marfile = cygpath(os.path.abspath(marfile))
nullfd = open(os.devnull, "w")
try:
check_call([MAR, '-x', marfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking mar file %s to %s", marfile, destdir)
raise
nullfd.close()
def packmar(marfile, srcdir):
"""Create marfile from the contents of srcdir"""
nullfd = open(os.devnull, "w")
files = [f[len(srcdir) + 1:] for f in findfiles(srcdir)]
marfile = cygpath(os.path.abspath(marfile))
try:
check_call(
[MAR, '-c', marfile] + files, cwd=srcdir, preexec_fn=_noumask)
except Exception:
log.exception("Error packing mar file %s from %s", marfile, srcdir)
raise
nullfd.close()
def tar_dir(tarfile, srcdir):
""" Pack a tar file using all the files in the given srcdir """
files = os.listdir(srcdir)
packtar(tarfile, files, srcdir)
def packtar(tarfile, files, srcdir):
""" Pack the given files into a tar, setting cwd = srcdir"""
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("pack tar %s from folder %s with files ", tarfile, srcdir)
log.debug(files)
try:
check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error packing tar file %s to %s", tarfile, srcdir)
raise
nullfd.close()
def unpackfile(filename, destdir):
"""Unpack a mar or exe into destdir"""
if filename.endswith(".mar"):
return unpackmar(filename, destdir)
elif filename.endswith(".exe"):
return unpackexe(filename, destdir)
elif filename.endswith(".tar") or filename.endswith(".tar.gz") \
or filename.endswith(".tgz"):
return unpacktar(filename, destdir)
else:
raise ValueError("Unknown file type: %s" % filename)
def packfile(filename, srcdir):
"""Package up srcdir into filename, archived with 7z for exes or mar for
mar files"""
if filename.endswith(".mar"):
return packmar(filename, srcdir)
elif filename.endswith(".exe"):
return packexe(filename, srcdir)
elif filename.endswith(".tar"):
return tar_dir(filename, srcdir)
else:
raise ValueError("Unknown file type: %s" % filename)
|
mozilla-releng/signtool | signtool/util/archives.py | tar_dir | python | def tar_dir(tarfile, srcdir):
files = os.listdir(srcdir)
packtar(tarfile, files, srcdir) | Pack a tar file using all the files in the given srcdir | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/archives.py#L165-L168 | [
"def packtar(tarfile, files, srcdir):\n \"\"\" Pack the given files into a tar, setting cwd = srcdir\"\"\"\n nullfd = open(os.devnull, \"w\")\n tarfile = cygpath(os.path.abspath(tarfile))\n log.debug(\"pack tar %s from folder %s with files \", tarfile, srcdir)\n log.debug(files)\n try:\n check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,\n stdout=nullfd, preexec_fn=_noumask)\n except Exception:\n log.exception(\"Error packing tar file %s to %s\", tarfile, srcdir)\n raise\n nullfd.close()\n"
] | import os
# TODO: use util.commands
from subprocess import check_call
import logging
import tempfile
import bz2
import shutil
from signtool.util.paths import cygpath, findfiles
log = logging.getLogger(__name__)
SEVENZIP = os.environ.get('SEVENZIP', '7z')
MAR = os.environ.get('MAR', 'mar')
TAR = os.environ.get('TAR', 'tar')
def _noumask():
# Utility function to set a umask of 000
os.umask(0)
def unpackexe(exefile, destdir):
"""Unpack the given exefile into destdir, using 7z"""
nullfd = open(os.devnull, "w")
exefile = cygpath(os.path.abspath(exefile))
try:
check_call([SEVENZIP, 'x', exefile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking exe %s to %s", exefile, destdir)
raise
nullfd.close()
def packexe(exefile, srcdir):
"""Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs"""
exefile = cygpath(os.path.abspath(exefile))
appbundle = exefile + ".app.7z"
# Make sure that appbundle doesn't already exist
# We don't want to risk appending to an existing file
if os.path.exists(appbundle):
raise OSError("%s already exists" % appbundle)
files = os.listdir(srcdir)
SEVENZIP_ARGS = ['-r', '-t7z', '-mx', '-m0=BCJ2', '-m1=LZMA:d27',
'-m2=LZMA:d19:mf=bt2', '-m3=LZMA:d19:mf=bt2', '-mb0:1', '-mb0s1:2',
'-mb0s2:3', '-m1fb=128', '-m1lc=4']
# First, compress with 7z
stdout = tempfile.TemporaryFile()
try:
check_call([SEVENZIP, 'a'] + SEVENZIP_ARGS + [appbundle] + files,
cwd=srcdir, stdout=stdout, preexec_fn=_noumask)
except Exception:
stdout.seek(0)
data = stdout.read()
log.error(data)
log.exception("Error packing exe %s from %s", exefile, srcdir)
raise
stdout.close()
# Then prepend our stubs onto the compressed 7z data
o = open(exefile, "wb")
parts = [
'checkouts/stubs/7z/7zSD.sfx.compressed',
'checkouts/stubs/tagfile/app.tag',
appbundle
]
for part in parts:
i = open(part)
while True:
block = i.read(4096)
if not block:
break
o.write(block)
i.close()
o.close()
os.unlink(appbundle)
def bunzip2(filename):
"""Uncompress `filename` in place"""
log.debug("Uncompressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(tmpfile)
f = open(filename, "wb")
while True:
block = b.read(512 * 1024)
if not block:
break
f.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def bzip2(filename):
"""Compress `filename` in place"""
log.debug("Compressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(filename, "w")
f = open(tmpfile, 'rb')
while True:
block = f.read(512 * 1024)
if not block:
break
b.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def unpackmar(marfile, destdir):
"""Unpack marfile into destdir"""
marfile = cygpath(os.path.abspath(marfile))
nullfd = open(os.devnull, "w")
try:
check_call([MAR, '-x', marfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking mar file %s to %s", marfile, destdir)
raise
nullfd.close()
def packmar(marfile, srcdir):
"""Create marfile from the contents of srcdir"""
nullfd = open(os.devnull, "w")
files = [f[len(srcdir) + 1:] for f in findfiles(srcdir)]
marfile = cygpath(os.path.abspath(marfile))
try:
check_call(
[MAR, '-c', marfile] + files, cwd=srcdir, preexec_fn=_noumask)
except Exception:
log.exception("Error packing mar file %s from %s", marfile, srcdir)
raise
nullfd.close()
def unpacktar(tarfile, destdir):
""" Unpack given tarball into the specified dir """
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("unpack tar %s into %s", tarfile, destdir)
try:
check_call([TAR, '-xzf', tarfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking tar file %s to %s", tarfile, destdir)
raise
nullfd.close()
def packtar(tarfile, files, srcdir):
""" Pack the given files into a tar, setting cwd = srcdir"""
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("pack tar %s from folder %s with files ", tarfile, srcdir)
log.debug(files)
try:
check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error packing tar file %s to %s", tarfile, srcdir)
raise
nullfd.close()
def unpackfile(filename, destdir):
"""Unpack a mar or exe into destdir"""
if filename.endswith(".mar"):
return unpackmar(filename, destdir)
elif filename.endswith(".exe"):
return unpackexe(filename, destdir)
elif filename.endswith(".tar") or filename.endswith(".tar.gz") \
or filename.endswith(".tgz"):
return unpacktar(filename, destdir)
else:
raise ValueError("Unknown file type: %s" % filename)
def packfile(filename, srcdir):
"""Package up srcdir into filename, archived with 7z for exes or mar for
mar files"""
if filename.endswith(".mar"):
return packmar(filename, srcdir)
elif filename.endswith(".exe"):
return packexe(filename, srcdir)
elif filename.endswith(".tar"):
return tar_dir(filename, srcdir)
else:
raise ValueError("Unknown file type: %s" % filename)
|
mozilla-releng/signtool | signtool/util/archives.py | packtar | python | def packtar(tarfile, files, srcdir):
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("pack tar %s from folder %s with files ", tarfile, srcdir)
log.debug(files)
try:
check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error packing tar file %s to %s", tarfile, srcdir)
raise
nullfd.close() | Pack the given files into a tar, setting cwd = srcdir | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/archives.py#L171-L183 | [
"def cygpath(filename):\n \"\"\"Convert a cygwin path into a windows style path\"\"\"\n if sys.platform == 'cygwin':\n proc = Popen(['cygpath', '-am', filename], stdout=PIPE)\n return proc.communicate()[0].strip()\n else:\n return filename\n"
] | import os
# TODO: use util.commands
from subprocess import check_call
import logging
import tempfile
import bz2
import shutil
from signtool.util.paths import cygpath, findfiles
log = logging.getLogger(__name__)
SEVENZIP = os.environ.get('SEVENZIP', '7z')
MAR = os.environ.get('MAR', 'mar')
TAR = os.environ.get('TAR', 'tar')
def _noumask():
# Utility function to set a umask of 000
os.umask(0)
def unpackexe(exefile, destdir):
"""Unpack the given exefile into destdir, using 7z"""
nullfd = open(os.devnull, "w")
exefile = cygpath(os.path.abspath(exefile))
try:
check_call([SEVENZIP, 'x', exefile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking exe %s to %s", exefile, destdir)
raise
nullfd.close()
def packexe(exefile, srcdir):
"""Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs"""
exefile = cygpath(os.path.abspath(exefile))
appbundle = exefile + ".app.7z"
# Make sure that appbundle doesn't already exist
# We don't want to risk appending to an existing file
if os.path.exists(appbundle):
raise OSError("%s already exists" % appbundle)
files = os.listdir(srcdir)
SEVENZIP_ARGS = ['-r', '-t7z', '-mx', '-m0=BCJ2', '-m1=LZMA:d27',
'-m2=LZMA:d19:mf=bt2', '-m3=LZMA:d19:mf=bt2', '-mb0:1', '-mb0s1:2',
'-mb0s2:3', '-m1fb=128', '-m1lc=4']
# First, compress with 7z
stdout = tempfile.TemporaryFile()
try:
check_call([SEVENZIP, 'a'] + SEVENZIP_ARGS + [appbundle] + files,
cwd=srcdir, stdout=stdout, preexec_fn=_noumask)
except Exception:
stdout.seek(0)
data = stdout.read()
log.error(data)
log.exception("Error packing exe %s from %s", exefile, srcdir)
raise
stdout.close()
# Then prepend our stubs onto the compressed 7z data
o = open(exefile, "wb")
parts = [
'checkouts/stubs/7z/7zSD.sfx.compressed',
'checkouts/stubs/tagfile/app.tag',
appbundle
]
for part in parts:
i = open(part)
while True:
block = i.read(4096)
if not block:
break
o.write(block)
i.close()
o.close()
os.unlink(appbundle)
def bunzip2(filename):
"""Uncompress `filename` in place"""
log.debug("Uncompressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(tmpfile)
f = open(filename, "wb")
while True:
block = b.read(512 * 1024)
if not block:
break
f.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def bzip2(filename):
"""Compress `filename` in place"""
log.debug("Compressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(filename, "w")
f = open(tmpfile, 'rb')
while True:
block = f.read(512 * 1024)
if not block:
break
b.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def unpackmar(marfile, destdir):
"""Unpack marfile into destdir"""
marfile = cygpath(os.path.abspath(marfile))
nullfd = open(os.devnull, "w")
try:
check_call([MAR, '-x', marfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking mar file %s to %s", marfile, destdir)
raise
nullfd.close()
def packmar(marfile, srcdir):
"""Create marfile from the contents of srcdir"""
nullfd = open(os.devnull, "w")
files = [f[len(srcdir) + 1:] for f in findfiles(srcdir)]
marfile = cygpath(os.path.abspath(marfile))
try:
check_call(
[MAR, '-c', marfile] + files, cwd=srcdir, preexec_fn=_noumask)
except Exception:
log.exception("Error packing mar file %s from %s", marfile, srcdir)
raise
nullfd.close()
def unpacktar(tarfile, destdir):
""" Unpack given tarball into the specified dir """
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("unpack tar %s into %s", tarfile, destdir)
try:
check_call([TAR, '-xzf', tarfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking tar file %s to %s", tarfile, destdir)
raise
nullfd.close()
def tar_dir(tarfile, srcdir):
""" Pack a tar file using all the files in the given srcdir """
files = os.listdir(srcdir)
packtar(tarfile, files, srcdir)
def unpackfile(filename, destdir):
"""Unpack a mar or exe into destdir"""
if filename.endswith(".mar"):
return unpackmar(filename, destdir)
elif filename.endswith(".exe"):
return unpackexe(filename, destdir)
elif filename.endswith(".tar") or filename.endswith(".tar.gz") \
or filename.endswith(".tgz"):
return unpacktar(filename, destdir)
else:
raise ValueError("Unknown file type: %s" % filename)
def packfile(filename, srcdir):
"""Package up srcdir into filename, archived with 7z for exes or mar for
mar files"""
if filename.endswith(".mar"):
return packmar(filename, srcdir)
elif filename.endswith(".exe"):
return packexe(filename, srcdir)
elif filename.endswith(".tar"):
return tar_dir(filename, srcdir)
else:
raise ValueError("Unknown file type: %s" % filename)
|
mozilla-releng/signtool | signtool/util/archives.py | unpackfile | python | def unpackfile(filename, destdir):
if filename.endswith(".mar"):
return unpackmar(filename, destdir)
elif filename.endswith(".exe"):
return unpackexe(filename, destdir)
elif filename.endswith(".tar") or filename.endswith(".tar.gz") \
or filename.endswith(".tgz"):
return unpacktar(filename, destdir)
else:
raise ValueError("Unknown file type: %s" % filename) | Unpack a mar or exe into destdir | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/archives.py#L186-L196 | [
"def unpackmar(marfile, destdir):\n \"\"\"Unpack marfile into destdir\"\"\"\n marfile = cygpath(os.path.abspath(marfile))\n nullfd = open(os.devnull, \"w\")\n try:\n check_call([MAR, '-x', marfile], cwd=destdir,\n stdout=nullfd, preexec_fn=_noumask)\n except Exception:\n log.exception(\"Error unpacking mar file %s to %s\", marfile, destdir)\n raise\n nullfd.close()\n"
] | import os
# TODO: use util.commands
from subprocess import check_call
import logging
import tempfile
import bz2
import shutil
from signtool.util.paths import cygpath, findfiles
log = logging.getLogger(__name__)
SEVENZIP = os.environ.get('SEVENZIP', '7z')
MAR = os.environ.get('MAR', 'mar')
TAR = os.environ.get('TAR', 'tar')
def _noumask():
# Utility function to set a umask of 000
os.umask(0)
def unpackexe(exefile, destdir):
"""Unpack the given exefile into destdir, using 7z"""
nullfd = open(os.devnull, "w")
exefile = cygpath(os.path.abspath(exefile))
try:
check_call([SEVENZIP, 'x', exefile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking exe %s to %s", exefile, destdir)
raise
nullfd.close()
def packexe(exefile, srcdir):
"""Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs"""
exefile = cygpath(os.path.abspath(exefile))
appbundle = exefile + ".app.7z"
# Make sure that appbundle doesn't already exist
# We don't want to risk appending to an existing file
if os.path.exists(appbundle):
raise OSError("%s already exists" % appbundle)
files = os.listdir(srcdir)
SEVENZIP_ARGS = ['-r', '-t7z', '-mx', '-m0=BCJ2', '-m1=LZMA:d27',
'-m2=LZMA:d19:mf=bt2', '-m3=LZMA:d19:mf=bt2', '-mb0:1', '-mb0s1:2',
'-mb0s2:3', '-m1fb=128', '-m1lc=4']
# First, compress with 7z
stdout = tempfile.TemporaryFile()
try:
check_call([SEVENZIP, 'a'] + SEVENZIP_ARGS + [appbundle] + files,
cwd=srcdir, stdout=stdout, preexec_fn=_noumask)
except Exception:
stdout.seek(0)
data = stdout.read()
log.error(data)
log.exception("Error packing exe %s from %s", exefile, srcdir)
raise
stdout.close()
# Then prepend our stubs onto the compressed 7z data
o = open(exefile, "wb")
parts = [
'checkouts/stubs/7z/7zSD.sfx.compressed',
'checkouts/stubs/tagfile/app.tag',
appbundle
]
for part in parts:
i = open(part)
while True:
block = i.read(4096)
if not block:
break
o.write(block)
i.close()
o.close()
os.unlink(appbundle)
def bunzip2(filename):
"""Uncompress `filename` in place"""
log.debug("Uncompressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(tmpfile)
f = open(filename, "wb")
while True:
block = b.read(512 * 1024)
if not block:
break
f.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def bzip2(filename):
"""Compress `filename` in place"""
log.debug("Compressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(filename, "w")
f = open(tmpfile, 'rb')
while True:
block = f.read(512 * 1024)
if not block:
break
b.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def unpackmar(marfile, destdir):
"""Unpack marfile into destdir"""
marfile = cygpath(os.path.abspath(marfile))
nullfd = open(os.devnull, "w")
try:
check_call([MAR, '-x', marfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking mar file %s to %s", marfile, destdir)
raise
nullfd.close()
def packmar(marfile, srcdir):
"""Create marfile from the contents of srcdir"""
nullfd = open(os.devnull, "w")
files = [f[len(srcdir) + 1:] for f in findfiles(srcdir)]
marfile = cygpath(os.path.abspath(marfile))
try:
check_call(
[MAR, '-c', marfile] + files, cwd=srcdir, preexec_fn=_noumask)
except Exception:
log.exception("Error packing mar file %s from %s", marfile, srcdir)
raise
nullfd.close()
def unpacktar(tarfile, destdir):
""" Unpack given tarball into the specified dir """
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("unpack tar %s into %s", tarfile, destdir)
try:
check_call([TAR, '-xzf', tarfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking tar file %s to %s", tarfile, destdir)
raise
nullfd.close()
def tar_dir(tarfile, srcdir):
""" Pack a tar file using all the files in the given srcdir """
files = os.listdir(srcdir)
packtar(tarfile, files, srcdir)
def packtar(tarfile, files, srcdir):
""" Pack the given files into a tar, setting cwd = srcdir"""
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("pack tar %s from folder %s with files ", tarfile, srcdir)
log.debug(files)
try:
check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error packing tar file %s to %s", tarfile, srcdir)
raise
nullfd.close()
def packfile(filename, srcdir):
"""Package up srcdir into filename, archived with 7z for exes or mar for
mar files"""
if filename.endswith(".mar"):
return packmar(filename, srcdir)
elif filename.endswith(".exe"):
return packexe(filename, srcdir)
elif filename.endswith(".tar"):
return tar_dir(filename, srcdir)
else:
raise ValueError("Unknown file type: %s" % filename)
|
mozilla-releng/signtool | signtool/util/archives.py | packfile | python | def packfile(filename, srcdir):
if filename.endswith(".mar"):
return packmar(filename, srcdir)
elif filename.endswith(".exe"):
return packexe(filename, srcdir)
elif filename.endswith(".tar"):
return tar_dir(filename, srcdir)
else:
raise ValueError("Unknown file type: %s" % filename) | Package up srcdir into filename, archived with 7z for exes or mar for
mar files | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/archives.py#L199-L209 | [
"def packmar(marfile, srcdir):\n \"\"\"Create marfile from the contents of srcdir\"\"\"\n nullfd = open(os.devnull, \"w\")\n files = [f[len(srcdir) + 1:] for f in findfiles(srcdir)]\n marfile = cygpath(os.path.abspath(marfile))\n try:\n check_call(\n [MAR, '-c', marfile] + files, cwd=srcdir, preexec_fn=_noumask)\n except Exception:\n log.exception(\"Error packing mar file %s from %s\", marfile, srcdir)\n raise\n nullfd.close()\n"
] | import os
# TODO: use util.commands
from subprocess import check_call
import logging
import tempfile
import bz2
import shutil
from signtool.util.paths import cygpath, findfiles
log = logging.getLogger(__name__)
SEVENZIP = os.environ.get('SEVENZIP', '7z')
MAR = os.environ.get('MAR', 'mar')
TAR = os.environ.get('TAR', 'tar')
def _noumask():
# Utility function to set a umask of 000
os.umask(0)
def unpackexe(exefile, destdir):
"""Unpack the given exefile into destdir, using 7z"""
nullfd = open(os.devnull, "w")
exefile = cygpath(os.path.abspath(exefile))
try:
check_call([SEVENZIP, 'x', exefile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking exe %s to %s", exefile, destdir)
raise
nullfd.close()
def packexe(exefile, srcdir):
"""Pack the files in srcdir into exefile using 7z.
Requires that stub files are available in checkouts/stubs"""
exefile = cygpath(os.path.abspath(exefile))
appbundle = exefile + ".app.7z"
# Make sure that appbundle doesn't already exist
# We don't want to risk appending to an existing file
if os.path.exists(appbundle):
raise OSError("%s already exists" % appbundle)
files = os.listdir(srcdir)
SEVENZIP_ARGS = ['-r', '-t7z', '-mx', '-m0=BCJ2', '-m1=LZMA:d27',
'-m2=LZMA:d19:mf=bt2', '-m3=LZMA:d19:mf=bt2', '-mb0:1', '-mb0s1:2',
'-mb0s2:3', '-m1fb=128', '-m1lc=4']
# First, compress with 7z
stdout = tempfile.TemporaryFile()
try:
check_call([SEVENZIP, 'a'] + SEVENZIP_ARGS + [appbundle] + files,
cwd=srcdir, stdout=stdout, preexec_fn=_noumask)
except Exception:
stdout.seek(0)
data = stdout.read()
log.error(data)
log.exception("Error packing exe %s from %s", exefile, srcdir)
raise
stdout.close()
# Then prepend our stubs onto the compressed 7z data
o = open(exefile, "wb")
parts = [
'checkouts/stubs/7z/7zSD.sfx.compressed',
'checkouts/stubs/tagfile/app.tag',
appbundle
]
for part in parts:
i = open(part)
while True:
block = i.read(4096)
if not block:
break
o.write(block)
i.close()
o.close()
os.unlink(appbundle)
def bunzip2(filename):
"""Uncompress `filename` in place"""
log.debug("Uncompressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(tmpfile)
f = open(filename, "wb")
while True:
block = b.read(512 * 1024)
if not block:
break
f.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def bzip2(filename):
"""Compress `filename` in place"""
log.debug("Compressing %s", filename)
tmpfile = "%s.tmp" % filename
os.rename(filename, tmpfile)
b = bz2.BZ2File(filename, "w")
f = open(tmpfile, 'rb')
while True:
block = f.read(512 * 1024)
if not block:
break
b.write(block)
f.close()
b.close()
shutil.copystat(tmpfile, filename)
shutil.copymode(tmpfile, filename)
os.unlink(tmpfile)
def unpackmar(marfile, destdir):
"""Unpack marfile into destdir"""
marfile = cygpath(os.path.abspath(marfile))
nullfd = open(os.devnull, "w")
try:
check_call([MAR, '-x', marfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking mar file %s to %s", marfile, destdir)
raise
nullfd.close()
def packmar(marfile, srcdir):
"""Create marfile from the contents of srcdir"""
nullfd = open(os.devnull, "w")
files = [f[len(srcdir) + 1:] for f in findfiles(srcdir)]
marfile = cygpath(os.path.abspath(marfile))
try:
check_call(
[MAR, '-c', marfile] + files, cwd=srcdir, preexec_fn=_noumask)
except Exception:
log.exception("Error packing mar file %s from %s", marfile, srcdir)
raise
nullfd.close()
def unpacktar(tarfile, destdir):
""" Unpack given tarball into the specified dir """
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("unpack tar %s into %s", tarfile, destdir)
try:
check_call([TAR, '-xzf', tarfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking tar file %s to %s", tarfile, destdir)
raise
nullfd.close()
def tar_dir(tarfile, srcdir):
""" Pack a tar file using all the files in the given srcdir """
files = os.listdir(srcdir)
packtar(tarfile, files, srcdir)
def packtar(tarfile, files, srcdir):
""" Pack the given files into a tar, setting cwd = srcdir"""
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("pack tar %s from folder %s with files ", tarfile, srcdir)
log.debug(files)
try:
check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error packing tar file %s to %s", tarfile, srcdir)
raise
nullfd.close()
def unpackfile(filename, destdir):
"""Unpack a mar or exe into destdir"""
if filename.endswith(".mar"):
return unpackmar(filename, destdir)
elif filename.endswith(".exe"):
return unpackexe(filename, destdir)
elif filename.endswith(".tar") or filename.endswith(".tar.gz") \
or filename.endswith(".tgz"):
return unpacktar(filename, destdir)
else:
raise ValueError("Unknown file type: %s" % filename)
|
mozilla-releng/signtool | signtool/util/file.py | compare | python | def compare(file1, file2):
if isinstance(file1, six.string_types): # pragma: no branch
file1 = open(file1, 'r', True)
if isinstance(file2, six.string_types): # pragma: no branch
file2 = open(file2, 'r', True)
file1_contents = file1.read()
file2_contents = file2.read()
return file1_contents == file2_contents | compares the contents of two files, passed in either as
open file handles or accessible file paths. Does a simple
naive string comparison, so do not use on larger files | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/file.py#L11-L21 | null | """Helper functions to handle file operations"""
import logging
import os
import shutil
import six
import hashlib
import tempfile
log = logging.getLogger(__name__)
def sha1sum(f):
"""Return the SHA-1 hash of the contents of file `f`, in hex format"""
h = hashlib.sha1()
fp = open(f, 'rb')
while True:
block = fp.read(512 * 1024)
if not block:
break
h.update(block)
return h.hexdigest()
def safe_copyfile(src, dest):
"""safely copy src to dest using a temporary intermediate and then renaming
to dest"""
fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(dest))
shutil.copyfileobj(open(src, 'rb'), os.fdopen(fd, 'wb'))
shutil.copystat(src, tmpname)
os.rename(tmpname, dest)
|
mozilla-releng/signtool | signtool/util/file.py | sha1sum | python | def sha1sum(f):
h = hashlib.sha1()
fp = open(f, 'rb')
while True:
block = fp.read(512 * 1024)
if not block:
break
h.update(block)
return h.hexdigest() | Return the SHA-1 hash of the contents of file `f`, in hex format | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/file.py#L24-L33 | null | """Helper functions to handle file operations"""
import logging
import os
import shutil
import six
import hashlib
import tempfile
log = logging.getLogger(__name__)
def compare(file1, file2):
"""compares the contents of two files, passed in either as
open file handles or accessible file paths. Does a simple
naive string comparison, so do not use on larger files"""
if isinstance(file1, six.string_types): # pragma: no branch
file1 = open(file1, 'r', True)
if isinstance(file2, six.string_types): # pragma: no branch
file2 = open(file2, 'r', True)
file1_contents = file1.read()
file2_contents = file2.read()
return file1_contents == file2_contents
def safe_copyfile(src, dest):
"""safely copy src to dest using a temporary intermediate and then renaming
to dest"""
fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(dest))
shutil.copyfileobj(open(src, 'rb'), os.fdopen(fd, 'wb'))
shutil.copystat(src, tmpname)
os.rename(tmpname, dest)
|
mozilla-releng/signtool | signtool/util/file.py | safe_copyfile | python | def safe_copyfile(src, dest):
fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(dest))
shutil.copyfileobj(open(src, 'rb'), os.fdopen(fd, 'wb'))
shutil.copystat(src, tmpname)
os.rename(tmpname, dest) | safely copy src to dest using a temporary intermediate and then renaming
to dest | train | https://github.com/mozilla-releng/signtool/blob/0a778778a181cb9cab424b29fa104b70345f53c2/signtool/util/file.py#L36-L42 | null | """Helper functions to handle file operations"""
import logging
import os
import shutil
import six
import hashlib
import tempfile
log = logging.getLogger(__name__)
def compare(file1, file2):
"""compares the contents of two files, passed in either as
open file handles or accessible file paths. Does a simple
naive string comparison, so do not use on larger files"""
if isinstance(file1, six.string_types): # pragma: no branch
file1 = open(file1, 'r', True)
if isinstance(file2, six.string_types): # pragma: no branch
file2 = open(file2, 'r', True)
file1_contents = file1.read()
file2_contents = file2.read()
return file1_contents == file2_contents
def sha1sum(f):
"""Return the SHA-1 hash of the contents of file `f`, in hex format"""
h = hashlib.sha1()
fp = open(f, 'rb')
while True:
block = fp.read(512 * 1024)
if not block:
break
h.update(block)
return h.hexdigest()
|
lobocv/pyperform | pyperform/comparisonbenchmark.py | ComparisonBenchmark.validate | python | def validate(self):
validation_code = self.setup_src + '\nvalidation_result = ' + self.stmt
validation_scope = {}
exec(validation_code, validation_scope)
# Store the result in the first function in the group.
if len(self.groups[self.group]) == 1:
self.result = validation_scope['validation_result']
logging.info('PyPerform: Validating group "{b.group}" against function "{b.callable.__name__}"'
.format(b=self))
else:
compare_against_benchmark = self.groups[self.group][0]
test = [benchmark.result_validation for benchmark in self.groups[self.group]]
if not all(test):
raise ValueError('All functions within a group must have the same validation flag.')
compare_result = compare_against_benchmark.result
if self.validation_func:
results_are_valid = self.validation_func(compare_result, validation_scope['validation_result'])
else:
results_are_valid = compare_result == validation_scope['validation_result']
if results_are_valid:
logging.info('PyPerform: Validating {}......PASSED!'.format(self.callable.__name__))
else:
error = 'Results of functions {0} and {1} are not equivalent.\n{0}:\t {2}\n{1}:\t{3}'
raise ValidationError(error.format(compare_against_benchmark.callable.__name__, self.callable.__name__,
compare_result, validation_scope['validation_result'])) | Execute the code once to get it's results (to be used in function validation). Compare the result to the
first function in the group. | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/comparisonbenchmark.py#L36-L64 | null | class ComparisonBenchmark(Benchmark):
groups = {}
def __init__(self, group, classname=None, setup=None, validation=False, validation_func=None, largs=None, kwargs=None, **kw):
super(ComparisonBenchmark, self).__init__(setup=setup, largs=largs, kwargs=kwargs, **kw)
self.group = group
self.classname = classname
self.result_validation = validation
self.validation_func = validation_func
self.result = None
if group not in self.groups:
self.groups[group] = []
def __call__(self, caller):
if self.enable:
super(ComparisonBenchmark, self).__call__(caller)
self.groups[self.group].append(self)
# Bound functions are tested in ClassBenchmark.__call__
# Just store a reference to the ComparisonBenchmark if the function is bound, otherwise, run the test
if not self.is_class_method:
# Run the test
self.run_timeit(self.stmt, self.setup_src)
if self.result_validation:
self.validate()
return caller
@staticmethod
def summarize(group, fs=None, include_source=True):
"""
Tabulate and write the results of ComparisonBenchmarks to a file or standard out.
:param str group: name of the comparison group.
:param fs: file-like object (Optional)
"""
_line_break = '{0:-<120}\n'.format('')
tests = sorted(ComparisonBenchmark.groups[group], key=lambda t: getattr(t, 'time_average_seconds'))
log = StringIO.StringIO()
log.write('Call statement:\n\n')
log.write('\t' + tests[0].stmt)
log.write('\n\n\n')
fmt = "{0: <8} {1: <35} {2: <12} {3: <15} {4: <15} {5: <14}\n"
log.write(fmt.format('Rank', 'Function Name', 'Time', '% of Slowest', 'timeit_repeat', 'timeit_number'))
log.write(_line_break)
log.write('\n')
for i, t in enumerate(tests):
func_name = "{}.{}".format(t.classname, t.callable.__name__) if t.classname else t.callable.__name__
if i == len(tests)-1:
time_percent = 'Slowest'
else:
time_percent = "{:.1f}".format(t.time_average_seconds / tests[-1].time_average_seconds * 100)
log.write(fmt.format(i+1,
func_name,
convert_time_units(t.time_average_seconds),
time_percent,
t.timeit_repeat,
t.timeit_number))
log.write(_line_break)
if include_source:
log.write('\n\n\nSource Code:\n')
log.write(_line_break)
for test in tests:
log.write(test.log.getvalue())
log.write(_line_break)
if isinstance(fs, str):
with open(fs, 'w') as f:
f.write(log.getvalue())
elif fs is None:
print(log.getvalue())
else:
try:
fs.write(log.getvalue())
except AttributeError as e:
print(e)
|
lobocv/pyperform | pyperform/comparisonbenchmark.py | ComparisonBenchmark.summarize | python | def summarize(group, fs=None, include_source=True):
_line_break = '{0:-<120}\n'.format('')
tests = sorted(ComparisonBenchmark.groups[group], key=lambda t: getattr(t, 'time_average_seconds'))
log = StringIO.StringIO()
log.write('Call statement:\n\n')
log.write('\t' + tests[0].stmt)
log.write('\n\n\n')
fmt = "{0: <8} {1: <35} {2: <12} {3: <15} {4: <15} {5: <14}\n"
log.write(fmt.format('Rank', 'Function Name', 'Time', '% of Slowest', 'timeit_repeat', 'timeit_number'))
log.write(_line_break)
log.write('\n')
for i, t in enumerate(tests):
func_name = "{}.{}".format(t.classname, t.callable.__name__) if t.classname else t.callable.__name__
if i == len(tests)-1:
time_percent = 'Slowest'
else:
time_percent = "{:.1f}".format(t.time_average_seconds / tests[-1].time_average_seconds * 100)
log.write(fmt.format(i+1,
func_name,
convert_time_units(t.time_average_seconds),
time_percent,
t.timeit_repeat,
t.timeit_number))
log.write(_line_break)
if include_source:
log.write('\n\n\nSource Code:\n')
log.write(_line_break)
for test in tests:
log.write(test.log.getvalue())
log.write(_line_break)
if isinstance(fs, str):
with open(fs, 'w') as f:
f.write(log.getvalue())
elif fs is None:
print(log.getvalue())
else:
try:
fs.write(log.getvalue())
except AttributeError as e:
print(e) | Tabulate and write the results of ComparisonBenchmarks to a file or standard out.
:param str group: name of the comparison group.
:param fs: file-like object (Optional) | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/comparisonbenchmark.py#L67-L115 | [
"def convert_time_units(t):\n \"\"\" Convert time in seconds into reasonable time units. \"\"\"\n if t == 0:\n return '0 s'\n order = log10(t)\n if -9 < order < -6:\n time_units = 'ns'\n factor = 1000000000\n elif -6 <= order < -3:\n time_units = 'us'\n factor = 1000000\n elif -3 <= order < -1:\n time_units = 'ms'\n factor = 1000.\n elif -1 <= order:\n time_units = 's'\n factor = 1\n return \"{:.3f} {}\".format(factor * t, time_units)\n"
] | class ComparisonBenchmark(Benchmark):
groups = {}
def __init__(self, group, classname=None, setup=None, validation=False, validation_func=None, largs=None, kwargs=None, **kw):
super(ComparisonBenchmark, self).__init__(setup=setup, largs=largs, kwargs=kwargs, **kw)
self.group = group
self.classname = classname
self.result_validation = validation
self.validation_func = validation_func
self.result = None
if group not in self.groups:
self.groups[group] = []
def __call__(self, caller):
if self.enable:
super(ComparisonBenchmark, self).__call__(caller)
self.groups[self.group].append(self)
# Bound functions are tested in ClassBenchmark.__call__
# Just store a reference to the ComparisonBenchmark if the function is bound, otherwise, run the test
if not self.is_class_method:
# Run the test
self.run_timeit(self.stmt, self.setup_src)
if self.result_validation:
self.validate()
return caller
def validate(self):
"""
Execute the code once to get it's results (to be used in function validation). Compare the result to the
first function in the group.
"""
validation_code = self.setup_src + '\nvalidation_result = ' + self.stmt
validation_scope = {}
exec(validation_code, validation_scope)
# Store the result in the first function in the group.
if len(self.groups[self.group]) == 1:
self.result = validation_scope['validation_result']
logging.info('PyPerform: Validating group "{b.group}" against function "{b.callable.__name__}"'
.format(b=self))
else:
compare_against_benchmark = self.groups[self.group][0]
test = [benchmark.result_validation for benchmark in self.groups[self.group]]
if not all(test):
raise ValueError('All functions within a group must have the same validation flag.')
compare_result = compare_against_benchmark.result
if self.validation_func:
results_are_valid = self.validation_func(compare_result, validation_scope['validation_result'])
else:
results_are_valid = compare_result == validation_scope['validation_result']
if results_are_valid:
logging.info('PyPerform: Validating {}......PASSED!'.format(self.callable.__name__))
else:
error = 'Results of functions {0} and {1} are not equivalent.\n{0}:\t {2}\n{1}:\t{3}'
raise ValidationError(error.format(compare_against_benchmark.callable.__name__, self.callable.__name__,
compare_result, validation_scope['validation_result']))
@staticmethod
|
lobocv/pyperform | pyperform/__init__.py | enable | python | def enable():
Benchmark.enable = True
ComparisonBenchmark.enable = True
BenchmarkedFunction.enable = True
BenchmarkedClass.enable = True | Enable all benchmarking. | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/__init__.py#L23-L30 | null | from __future__ import print_function
__version__ = '1.86'
import sys
if sys.version[0] == '3':
import io as StringIO # Python 3.x
else:
import cStringIO as StringIO # Python 2.x
range = xrange
from pyperform.benchmark import Benchmark
from .comparisonbenchmark import ComparisonBenchmark
from .benchmarkedclass import BenchmarkedClass
from .benchmarkedfunction import BenchmarkedFunction
from .thread import Thread
from .timer import timer
from .exceptions import ValidationError
from .customlogger import CustomLogLevel, new_log_level
def disable():
"""
Disable all benchmarking.
"""
Benchmark.enable = False
ComparisonBenchmark.enable = False
BenchmarkedFunction.enable = False
BenchmarkedClass.enable = False
|
lobocv/pyperform | pyperform/__init__.py | disable | python | def disable():
Benchmark.enable = False
ComparisonBenchmark.enable = False
BenchmarkedFunction.enable = False
BenchmarkedClass.enable = False | Disable all benchmarking. | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/__init__.py#L33-L40 | null | from __future__ import print_function
__version__ = '1.86'
import sys
if sys.version[0] == '3':
import io as StringIO # Python 3.x
else:
import cStringIO as StringIO # Python 2.x
range = xrange
from pyperform.benchmark import Benchmark
from .comparisonbenchmark import ComparisonBenchmark
from .benchmarkedclass import BenchmarkedClass
from .benchmarkedfunction import BenchmarkedFunction
from .thread import Thread
from .timer import timer
from .exceptions import ValidationError
from .customlogger import CustomLogLevel, new_log_level
def enable():
"""
Enable all benchmarking.
"""
Benchmark.enable = True
ComparisonBenchmark.enable = True
BenchmarkedFunction.enable = True
BenchmarkedClass.enable = True
|
lobocv/pyperform | pyperform/customlogger.py | new_log_level | python | def new_log_level(level, name, logger_name=None):
@CustomLogLevel(level, name, logger_name)
def _default_template(logger, msg, *args, **kwargs):
return msg, args, kwargs | Quick way to create a custom log level that behaves like the default levels in the logging module.
:param level: level number
:param name: level name
:param logger_name: optional logger name | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/customlogger.py#L40-L49 | null | __author__ = 'clobo'
import logging
class CustomLogLevel(object):
def __init__(self, level, name, logger_name=None):
self.level = level
self.name = name
self.logger_name = logger_name
if logger_name is None:
self.logger = logging.getLogger()
else:
self.logger = logging.getLogger(logger_name)
def __call__(self, customlevel):
"""
Wrap the decorated function to take care of the setting up of the custom log level.
"""
# Add the new custom level to the list of known levels
logging.addLevelName(self.level, self.name)
def _wrapper(msg, *args, **kwargs):
# Check if the currently set level allows this log level to print.
if self.logger.isEnabledFor(level):
_msg, _args, _kwargs = customlevel(self.logger, msg, *args, **kwargs)
self.logger.log(level, _msg, *_args, **_kwargs)
# Create function bindings in the logger or if using the root logger, setup the bindings to allow
# calls to logging.mycustomlevel() much like logging.info(), logging.debug() etc.
setattr(self.logger, self.name.lower(), _wrapper)
if self.logger_name is None:
setattr(logging, self.name.lower(), _wrapper)
return customlevel
if __name__ == '__main__':
level = logging.INFO-5
name = 'MYLEVEL'
# logger_name = 'mylogger'
logger_name = None
@CustomLogLevel(level, name, logger_name=logger_name)
def myloglevel(logger, msg, *args, **kwargs):
return 'This is a custom level: %s' % msg, args, kwargs
# create_new_level(level, name, logger_name=logger_name)
logging.basicConfig()
if logger_name:
l = logging.getLogger(logger_name)
logger = l
else:
l = logging.getLogger()
logger = logging
l.setLevel(logging.INFO)
logger.info('this is a test')
logger.mylevel('this is a test')
l.setLevel(level)
logger.info('this is a test')
logger.mylevel('this is a test')
|
lobocv/pyperform | pyperform/thread.py | enable_thread_profiling | python | def enable_thread_profiling(profile_dir, exception_callback=None):
global profiled_thread_enabled, Thread, Process
if os.path.isdir(profile_dir):
_Profiler.profile_dir = profile_dir
else:
raise OSError('%s does not exist' % profile_dir)
_Profiler.exception_callback = exception_callback
Thread = threading.Thread = ProfiledThread
Process = multiprocessing.Process = ProfiledProcess
profiled_thread_enabled = True | Monkey-patch the threading.Thread class with our own ProfiledThread. Any subsequent imports of threading.Thread
will reference ProfiledThread instead. | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/thread.py#L22-L35 | null | __author__ = 'calvin'
import cProfile
import logging
from pyperform import StringIO
import os
import pstats
import sys
import threading
import multiprocessing
Thread = threading.Thread # Start off using threading.Thread until changed
Process = multiprocessing.Process
BaseThread = threading.Thread # Store the Thread class from the threading module before monkey-patching
BaseProcess = multiprocessing.Process
profiled_thread_enabled = False
logged_thread_enabled = True
def enable_thread_logging(exception_callback=None):
"""
Monkey-patch the threading.Thread class with our own LoggedThread. Any subsequent imports of threading.Thread
will reference LoggedThread instead.
"""
global logged_thread_enabled, Thread
LoggedThread.exception_callback = exception_callback
Thread = threading.Thread = LoggedThread
logged_thread_enabled = True
class _Profiler(object):
"""
A Thread that contains it's own profiler. When the SSI_App closes, all profiles are combined and printed
to a single .profile.
"""
profile_dir = None
exception_callback = None
_type = '_Profiler'
def run(self):
profiler = cProfile.Profile()
try:
logging.debug('{cls}: Starting {cls}: {name}'.format(cls=self._type, name=self.name))
profiler.runcall(super(_Profiler, self).run)
logging.debug('{cls}: Prepating to exit {cls}: {name}'.format(cls=self._type, name=self.name))
except Exception as e:
logging.error('{cls}: Error encountered in {name}'.format(cls=self._type, name=self.name))
logging.error(e)
if self.exception_callback:
e_type, e_value, last_traceback = sys.exc_info()
self.exception_callback(e_type, e_value, last_traceback)
finally:
if self.profile_dir is None:
logging.warning('{cls}: profile_dir is not specified. '
'Profile \'{name}\' will not be saved.'.format(cls=self._type, name=self.name))
return
self.print_stats(profiler)
def print_stats(self, profiler):
name = (self._type + '-' + self.name) if self._type else self.name
filename = os.path.join(self.profile_dir, name)
logging.debug('Printing stats for {name}'.format(name=name))
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(profiler, stream=s)
# Take out directory names
ps.strip_dirs()
# Sort
ps.sort_stats(sortby)
# Print to the stream
ps.print_stats()
stats_file = filename + '.stats'
profile_file = filename + '.profile'
# Create the stats file
ps.dump_stats(stats_file)
# Create a readable .profile file
with open(profile_file, 'w') as f:
f.write(s.getvalue())
@staticmethod
def combine_profiles(profile_dir, outfile, sortby='cumulative'):
s = StringIO.StringIO()
stat_files = [f for f in os.listdir(profile_dir) if os.path.isfile(os.path.join(profile_dir, f))
and f.endswith('.stats')]
ps = pstats.Stats(os.path.join(profile_dir, stat_files[0]), stream=s)
if len(stat_files) > 1:
for stat in stat_files[1:]:
ps.add(os.path.join(profile_dir, stat))
profile_name = os.path.join(profile_dir, '{}.profile'.format(outfile.replace('.profile', '')))
with open(profile_name, 'w') as f:
ps.strip_dirs()
ps.sort_stats(sortby)
ps.print_stats()
f.write(s.getvalue())
class ProfiledThread(_Profiler, BaseThread):
n_threads = 0
_type = 'Thread'
def __init__(self, *args, **kwargs):
super(ProfiledThread, self).__init__(*args, **kwargs)
ProfiledThread.n_threads += 1
class ProfiledProcess(_Profiler, Process):
n_processes = 0
_type = 'Process'
def __init__(self, *args, **kwargs):
super(ProfiledProcess, self).__init__(*args, **kwargs)
ProfiledProcess.n_processes += 1
class LoggedThread(BaseThread):
exception_callback = None
def run(self):
logging.debug('LoggedThread: Starting LoggedThread {}'.format(self.name))
try:
super(LoggedThread, self).run()
except Exception as e:
logging.error('LoggedThread: Error encountered in Thread {name}'.format(name=self.name))
logging.error(e)
if LoggedThread.exception_callback:
e_type, e_value, last_traceback = sys.exc_info()
LoggedThread.exception_callback(e_type, e_value, last_traceback)
|
lobocv/pyperform | pyperform/thread.py | enable_thread_logging | python | def enable_thread_logging(exception_callback=None):
global logged_thread_enabled, Thread
LoggedThread.exception_callback = exception_callback
Thread = threading.Thread = LoggedThread
logged_thread_enabled = True | Monkey-patch the threading.Thread class with our own LoggedThread. Any subsequent imports of threading.Thread
will reference LoggedThread instead. | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/thread.py#L38-L46 | null | __author__ = 'calvin'
import cProfile
import logging
from pyperform import StringIO
import os
import pstats
import sys
import threading
import multiprocessing
Thread = threading.Thread # Start off using threading.Thread until changed
Process = multiprocessing.Process
BaseThread = threading.Thread # Store the Thread class from the threading module before monkey-patching
BaseProcess = multiprocessing.Process
profiled_thread_enabled = False
logged_thread_enabled = True
def enable_thread_profiling(profile_dir, exception_callback=None):
"""
Monkey-patch the threading.Thread class with our own ProfiledThread. Any subsequent imports of threading.Thread
will reference ProfiledThread instead.
"""
global profiled_thread_enabled, Thread, Process
if os.path.isdir(profile_dir):
_Profiler.profile_dir = profile_dir
else:
raise OSError('%s does not exist' % profile_dir)
_Profiler.exception_callback = exception_callback
Thread = threading.Thread = ProfiledThread
Process = multiprocessing.Process = ProfiledProcess
profiled_thread_enabled = True
class _Profiler(object):
"""
A Thread that contains it's own profiler. When the SSI_App closes, all profiles are combined and printed
to a single .profile.
"""
profile_dir = None
exception_callback = None
_type = '_Profiler'
def run(self):
profiler = cProfile.Profile()
try:
logging.debug('{cls}: Starting {cls}: {name}'.format(cls=self._type, name=self.name))
profiler.runcall(super(_Profiler, self).run)
logging.debug('{cls}: Prepating to exit {cls}: {name}'.format(cls=self._type, name=self.name))
except Exception as e:
logging.error('{cls}: Error encountered in {name}'.format(cls=self._type, name=self.name))
logging.error(e)
if self.exception_callback:
e_type, e_value, last_traceback = sys.exc_info()
self.exception_callback(e_type, e_value, last_traceback)
finally:
if self.profile_dir is None:
logging.warning('{cls}: profile_dir is not specified. '
'Profile \'{name}\' will not be saved.'.format(cls=self._type, name=self.name))
return
self.print_stats(profiler)
def print_stats(self, profiler):
name = (self._type + '-' + self.name) if self._type else self.name
filename = os.path.join(self.profile_dir, name)
logging.debug('Printing stats for {name}'.format(name=name))
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(profiler, stream=s)
# Take out directory names
ps.strip_dirs()
# Sort
ps.sort_stats(sortby)
# Print to the stream
ps.print_stats()
stats_file = filename + '.stats'
profile_file = filename + '.profile'
# Create the stats file
ps.dump_stats(stats_file)
# Create a readable .profile file
with open(profile_file, 'w') as f:
f.write(s.getvalue())
@staticmethod
def combine_profiles(profile_dir, outfile, sortby='cumulative'):
s = StringIO.StringIO()
stat_files = [f for f in os.listdir(profile_dir) if os.path.isfile(os.path.join(profile_dir, f))
and f.endswith('.stats')]
ps = pstats.Stats(os.path.join(profile_dir, stat_files[0]), stream=s)
if len(stat_files) > 1:
for stat in stat_files[1:]:
ps.add(os.path.join(profile_dir, stat))
profile_name = os.path.join(profile_dir, '{}.profile'.format(outfile.replace('.profile', '')))
with open(profile_name, 'w') as f:
ps.strip_dirs()
ps.sort_stats(sortby)
ps.print_stats()
f.write(s.getvalue())
class ProfiledThread(_Profiler, BaseThread):
n_threads = 0
_type = 'Thread'
def __init__(self, *args, **kwargs):
super(ProfiledThread, self).__init__(*args, **kwargs)
ProfiledThread.n_threads += 1
class ProfiledProcess(_Profiler, Process):
n_processes = 0
_type = 'Process'
def __init__(self, *args, **kwargs):
super(ProfiledProcess, self).__init__(*args, **kwargs)
ProfiledProcess.n_processes += 1
class LoggedThread(BaseThread):
exception_callback = None
def run(self):
logging.debug('LoggedThread: Starting LoggedThread {}'.format(self.name))
try:
super(LoggedThread, self).run()
except Exception as e:
logging.error('LoggedThread: Error encountered in Thread {name}'.format(name=self.name))
logging.error(e)
if LoggedThread.exception_callback:
e_type, e_value, last_traceback = sys.exc_info()
LoggedThread.exception_callback(e_type, e_value, last_traceback)
|
lobocv/pyperform | pyperform/tools.py | convert_time_units | python | def convert_time_units(t):
if t == 0:
return '0 s'
order = log10(t)
if -9 < order < -6:
time_units = 'ns'
factor = 1000000000
elif -6 <= order < -3:
time_units = 'us'
factor = 1000000
elif -3 <= order < -1:
time_units = 'ms'
factor = 1000.
elif -1 <= order:
time_units = 's'
factor = 1
return "{:.3f} {}".format(factor * t, time_units) | Convert time in seconds into reasonable time units. | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/tools.py#L17-L34 | null | __author__ = 'calvin'
import re
import sys
from math import log10
if sys.version[0] == '3':
pass
else:
range = xrange
classdef_regex = re.compile(r"\S*def .*#!|class .*#!")
tagged_line_regex = re.compile(r".*#!")
def globalize_indentation(src):
""" Strip the indentation level so the code runs in the global scope. """
lines = src.splitlines()
indent = len(lines[0]) - len(lines[0].strip(' '))
func_src = ''
for ii, l in enumerate(src.splitlines()):
line = l[indent:]
func_src += line + '\n'
return func_src
def remove_decorators(src):
""" Remove decorators from the source code """
src = src.strip()
src_lines = src.splitlines()
multi_line = False
n_deleted = 0
for n in range(len(src_lines)):
line = src_lines[n - n_deleted].strip()
if (line.startswith('@') and 'Benchmark' in line) or multi_line:
del src_lines[n - n_deleted]
n_deleted += 1
if line.endswith(')'):
multi_line = False
else:
multi_line = True
setup_src = '\n'.join(src_lines)
return setup_src
def get_tagged_imports(fp):
imports = []
inside_def = False
def_lines = []
def_indent = 0
with open(fp, 'r') as f:
lastLine = f.readline()
for line in f:
tagged_class_or_def = re.findall(classdef_regex, lastLine)
tagged_line = re.findall(tagged_line_regex, lastLine)
# Find the indentation level of the function/class definition and capture all source code lines
# until we get a line that is the same indentation level (end of function/class definition).
if tagged_class_or_def or inside_def:
if tagged_class_or_def and def_lines:
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
if inside_def:
# For lines within the definition
indent = len(lastLine) - len(lastLine.lstrip(' '))
if indent == def_indent and lastLine != '\n':
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
def_indent = 0
if tagged_line:
imports.append(lastLine)
else:
if lastLine != '\n':
def_lines.append(lastLine)
else:
# For the definition line
inside_def = True
def_indent = len(lastLine) - len(lastLine.lstrip(' '))
def_lines.append(lastLine)
elif tagged_line:
imports.append(lastLine)
lastLine = line
# Examine the last line
tagged_line = re.findall(tagged_line_regex, lastLine)
if inside_def:
def_lines.append(line)
imports.append(''.join(def_lines))
elif tagged_line:
imports.append(line)
src = '\n'.join(imports) + '\n'
return src
def generate_call_statement(func, is_class_method, *args, **kwargs):
# Create the call statement
if is_class_method:
stmt = 'instance.' + func.__name__ + '('
else:
stmt = func.__name__ + '('
for arg in args:
stmt += arg.__repr__() + ', '
for kw, val in kwargs.items():
stmt += '{0}={1}, '.format(kw, val.__repr__())
stmt = stmt.strip(', ')
stmt += ')'
return stmt
def walk_tree(start, attr):
"""
Recursively walk through a tree relationship. This iterates a tree in a top-down approach,
fully reaching the end of a lineage before moving onto the next sibling of that generation.
"""
path = [start]
for child in path:
yield child
idx = path.index(child)
for grandchild in reversed(getattr(child, attr)):
path.insert(idx + 1, grandchild) |
lobocv/pyperform | pyperform/tools.py | globalize_indentation | python | def globalize_indentation(src):
lines = src.splitlines()
indent = len(lines[0]) - len(lines[0].strip(' '))
func_src = ''
for ii, l in enumerate(src.splitlines()):
line = l[indent:]
func_src += line + '\n'
return func_src | Strip the indentation level so the code runs in the global scope. | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/tools.py#L37-L45 | null | __author__ = 'calvin'
import re
import sys
from math import log10
if sys.version[0] == '3':
pass
else:
range = xrange
classdef_regex = re.compile(r"\S*def .*#!|class .*#!")
tagged_line_regex = re.compile(r".*#!")
def convert_time_units(t):
""" Convert time in seconds into reasonable time units. """
if t == 0:
return '0 s'
order = log10(t)
if -9 < order < -6:
time_units = 'ns'
factor = 1000000000
elif -6 <= order < -3:
time_units = 'us'
factor = 1000000
elif -3 <= order < -1:
time_units = 'ms'
factor = 1000.
elif -1 <= order:
time_units = 's'
factor = 1
return "{:.3f} {}".format(factor * t, time_units)
def remove_decorators(src):
""" Remove decorators from the source code """
src = src.strip()
src_lines = src.splitlines()
multi_line = False
n_deleted = 0
for n in range(len(src_lines)):
line = src_lines[n - n_deleted].strip()
if (line.startswith('@') and 'Benchmark' in line) or multi_line:
del src_lines[n - n_deleted]
n_deleted += 1
if line.endswith(')'):
multi_line = False
else:
multi_line = True
setup_src = '\n'.join(src_lines)
return setup_src
def get_tagged_imports(fp):
imports = []
inside_def = False
def_lines = []
def_indent = 0
with open(fp, 'r') as f:
lastLine = f.readline()
for line in f:
tagged_class_or_def = re.findall(classdef_regex, lastLine)
tagged_line = re.findall(tagged_line_regex, lastLine)
# Find the indentation level of the function/class definition and capture all source code lines
# until we get a line that is the same indentation level (end of function/class definition).
if tagged_class_or_def or inside_def:
if tagged_class_or_def and def_lines:
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
if inside_def:
# For lines within the definition
indent = len(lastLine) - len(lastLine.lstrip(' '))
if indent == def_indent and lastLine != '\n':
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
def_indent = 0
if tagged_line:
imports.append(lastLine)
else:
if lastLine != '\n':
def_lines.append(lastLine)
else:
# For the definition line
inside_def = True
def_indent = len(lastLine) - len(lastLine.lstrip(' '))
def_lines.append(lastLine)
elif tagged_line:
imports.append(lastLine)
lastLine = line
# Examine the last line
tagged_line = re.findall(tagged_line_regex, lastLine)
if inside_def:
def_lines.append(line)
imports.append(''.join(def_lines))
elif tagged_line:
imports.append(line)
src = '\n'.join(imports) + '\n'
return src
def generate_call_statement(func, is_class_method, *args, **kwargs):
# Create the call statement
if is_class_method:
stmt = 'instance.' + func.__name__ + '('
else:
stmt = func.__name__ + '('
for arg in args:
stmt += arg.__repr__() + ', '
for kw, val in kwargs.items():
stmt += '{0}={1}, '.format(kw, val.__repr__())
stmt = stmt.strip(', ')
stmt += ')'
return stmt
def walk_tree(start, attr):
"""
Recursively walk through a tree relationship. This iterates a tree in a top-down approach,
fully reaching the end of a lineage before moving onto the next sibling of that generation.
"""
path = [start]
for child in path:
yield child
idx = path.index(child)
for grandchild in reversed(getattr(child, attr)):
path.insert(idx + 1, grandchild) |
lobocv/pyperform | pyperform/tools.py | remove_decorators | python | def remove_decorators(src):
src = src.strip()
src_lines = src.splitlines()
multi_line = False
n_deleted = 0
for n in range(len(src_lines)):
line = src_lines[n - n_deleted].strip()
if (line.startswith('@') and 'Benchmark' in line) or multi_line:
del src_lines[n - n_deleted]
n_deleted += 1
if line.endswith(')'):
multi_line = False
else:
multi_line = True
setup_src = '\n'.join(src_lines)
return setup_src | Remove decorators from the source code | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/tools.py#L48-L64 | null | __author__ = 'calvin'
import re
import sys
from math import log10
if sys.version[0] == '3':
pass
else:
range = xrange
classdef_regex = re.compile(r"\S*def .*#!|class .*#!")
tagged_line_regex = re.compile(r".*#!")
def convert_time_units(t):
""" Convert time in seconds into reasonable time units. """
if t == 0:
return '0 s'
order = log10(t)
if -9 < order < -6:
time_units = 'ns'
factor = 1000000000
elif -6 <= order < -3:
time_units = 'us'
factor = 1000000
elif -3 <= order < -1:
time_units = 'ms'
factor = 1000.
elif -1 <= order:
time_units = 's'
factor = 1
return "{:.3f} {}".format(factor * t, time_units)
def globalize_indentation(src):
""" Strip the indentation level so the code runs in the global scope. """
lines = src.splitlines()
indent = len(lines[0]) - len(lines[0].strip(' '))
func_src = ''
for ii, l in enumerate(src.splitlines()):
line = l[indent:]
func_src += line + '\n'
return func_src
def get_tagged_imports(fp):
imports = []
inside_def = False
def_lines = []
def_indent = 0
with open(fp, 'r') as f:
lastLine = f.readline()
for line in f:
tagged_class_or_def = re.findall(classdef_regex, lastLine)
tagged_line = re.findall(tagged_line_regex, lastLine)
# Find the indentation level of the function/class definition and capture all source code lines
# until we get a line that is the same indentation level (end of function/class definition).
if tagged_class_or_def or inside_def:
if tagged_class_or_def and def_lines:
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
if inside_def:
# For lines within the definition
indent = len(lastLine) - len(lastLine.lstrip(' '))
if indent == def_indent and lastLine != '\n':
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
def_indent = 0
if tagged_line:
imports.append(lastLine)
else:
if lastLine != '\n':
def_lines.append(lastLine)
else:
# For the definition line
inside_def = True
def_indent = len(lastLine) - len(lastLine.lstrip(' '))
def_lines.append(lastLine)
elif tagged_line:
imports.append(lastLine)
lastLine = line
# Examine the last line
tagged_line = re.findall(tagged_line_regex, lastLine)
if inside_def:
def_lines.append(line)
imports.append(''.join(def_lines))
elif tagged_line:
imports.append(line)
src = '\n'.join(imports) + '\n'
return src
def generate_call_statement(func, is_class_method, *args, **kwargs):
# Create the call statement
if is_class_method:
stmt = 'instance.' + func.__name__ + '('
else:
stmt = func.__name__ + '('
for arg in args:
stmt += arg.__repr__() + ', '
for kw, val in kwargs.items():
stmt += '{0}={1}, '.format(kw, val.__repr__())
stmt = stmt.strip(', ')
stmt += ')'
return stmt
def walk_tree(start, attr):
"""
Recursively walk through a tree relationship. This iterates a tree in a top-down approach,
fully reaching the end of a lineage before moving onto the next sibling of that generation.
"""
path = [start]
for child in path:
yield child
idx = path.index(child)
for grandchild in reversed(getattr(child, attr)):
path.insert(idx + 1, grandchild) |
lobocv/pyperform | pyperform/tools.py | walk_tree | python | def walk_tree(start, attr):
path = [start]
for child in path:
yield child
idx = path.index(child)
for grandchild in reversed(getattr(child, attr)):
path.insert(idx + 1, grandchild) | Recursively walk through a tree relationship. This iterates a tree in a top-down approach,
fully reaching the end of a lineage before moving onto the next sibling of that generation. | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/tools.py#L137-L147 | null | __author__ = 'calvin'
import re
import sys
from math import log10
if sys.version[0] == '3':
pass
else:
range = xrange
classdef_regex = re.compile(r"\S*def .*#!|class .*#!")
tagged_line_regex = re.compile(r".*#!")
def convert_time_units(t):
""" Convert time in seconds into reasonable time units. """
if t == 0:
return '0 s'
order = log10(t)
if -9 < order < -6:
time_units = 'ns'
factor = 1000000000
elif -6 <= order < -3:
time_units = 'us'
factor = 1000000
elif -3 <= order < -1:
time_units = 'ms'
factor = 1000.
elif -1 <= order:
time_units = 's'
factor = 1
return "{:.3f} {}".format(factor * t, time_units)
def globalize_indentation(src):
""" Strip the indentation level so the code runs in the global scope. """
lines = src.splitlines()
indent = len(lines[0]) - len(lines[0].strip(' '))
func_src = ''
for ii, l in enumerate(src.splitlines()):
line = l[indent:]
func_src += line + '\n'
return func_src
def remove_decorators(src):
""" Remove decorators from the source code """
src = src.strip()
src_lines = src.splitlines()
multi_line = False
n_deleted = 0
for n in range(len(src_lines)):
line = src_lines[n - n_deleted].strip()
if (line.startswith('@') and 'Benchmark' in line) or multi_line:
del src_lines[n - n_deleted]
n_deleted += 1
if line.endswith(')'):
multi_line = False
else:
multi_line = True
setup_src = '\n'.join(src_lines)
return setup_src
def get_tagged_imports(fp):
imports = []
inside_def = False
def_lines = []
def_indent = 0
with open(fp, 'r') as f:
lastLine = f.readline()
for line in f:
tagged_class_or_def = re.findall(classdef_regex, lastLine)
tagged_line = re.findall(tagged_line_regex, lastLine)
# Find the indentation level of the function/class definition and capture all source code lines
# until we get a line that is the same indentation level (end of function/class definition).
if tagged_class_or_def or inside_def:
if tagged_class_or_def and def_lines:
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
if inside_def:
# For lines within the definition
indent = len(lastLine) - len(lastLine.lstrip(' '))
if indent == def_indent and lastLine != '\n':
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
def_indent = 0
if tagged_line:
imports.append(lastLine)
else:
if lastLine != '\n':
def_lines.append(lastLine)
else:
# For the definition line
inside_def = True
def_indent = len(lastLine) - len(lastLine.lstrip(' '))
def_lines.append(lastLine)
elif tagged_line:
imports.append(lastLine)
lastLine = line
# Examine the last line
tagged_line = re.findall(tagged_line_regex, lastLine)
if inside_def:
def_lines.append(line)
imports.append(''.join(def_lines))
elif tagged_line:
imports.append(line)
src = '\n'.join(imports) + '\n'
return src
def generate_call_statement(func, is_class_method, *args, **kwargs):
# Create the call statement
if is_class_method:
stmt = 'instance.' + func.__name__ + '('
else:
stmt = func.__name__ + '('
for arg in args:
stmt += arg.__repr__() + ', '
for kw, val in kwargs.items():
stmt += '{0}={1}, '.format(kw, val.__repr__())
stmt = stmt.strip(', ')
stmt += ')'
return stmt
|
lobocv/pyperform | pyperform/benchmarkedclass.py | BenchmarkedClass.validate | python | def validate(self, benchmarks):
class_code = self.setup_src
instance_creation = '\ninstance = {}'.format(self.stmt)
for i, benchmark in enumerate(benchmarks):
if not benchmark.result_validation:
break
validation_code = class_code + instance_creation + '\nvalidation_result = ' + benchmark.stmt
validation_scope = {}
exec(validation_code, validation_scope)
# Store the result in the first function in the group.
if i == 0:
compare_against_function = benchmarks[0].callable.__name__
compare_against_result = validation_scope['validation_result']
logging.info('PyPerform: Validating group "{b.group}" against method '
'"{b.classname}.{b.callable.__name__}"'.format(b=benchmarks[0]))
else:
if compare_against_result == validation_scope['validation_result']:
logging.info('PyPerform: Validating {b.classname}.{b.callable.__name__}......PASSED!'
.format(b=benchmark))
else:
error = 'Results of functions {0} and {1} are not equivalent.\n{0}:\t {2}\n{1}:\t{3}'
raise ValidationError(error.format(compare_against_function, benchmark.callable.__name__,
compare_against_result, validation_scope['validation_result'])) | Execute the code once to get it's results (to be used in function validation). Compare the result to the
first function in the group.
:param benchmarks: list of benchmarks to validate. | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/benchmarkedclass.py#L36-L64 | null | class BenchmarkedClass(Benchmark):
bound_functions = {}
def __init__(self, setup=None, largs=None, kwargs=None, **kw):
super(BenchmarkedClass, self).__init__(setup, largs=largs, kwargs=kwargs, **kw)
def __call__(self, cls):
if self.enable:
super(BenchmarkedClass, self).__call__(cls)
setup_src = self.setup_src
setup_src += '\ninstance = {}'.format(self.stmt)
groups = set()
for p in self.bound_functions[cls.__name__]:
stmt = p.stmt
p.run_timeit(stmt, setup_src)
p.write_log()
if isinstance(p, BenchmarkedFunction):
print("{} \t {}".format(p.callable.__name__, convert_time_units(p.time_average_seconds)))
if hasattr(p, 'result_validation') and p.result_validation and p.group not in groups:
self.validate(p.groups[p.group])
groups.add(p.group)
return cls
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.