repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
joealcorn/xbox | xbox/vendor/requests/packages/urllib3/connectionpool.py | HTTPSConnectionPool._prepare_conn | python | def _prepare_conn(self, conn):
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
if self.proxy is not None:
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self.host, self.port)
else:
set_tunnel(self.host, self.port, self.proxy_headers)
# Establish tunnel connection early, because otherwise httplib
# would improperly set Host: header to proxy's IP:port.
conn.connect()
return conn | Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used. | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/packages/urllib3/connectionpool.py#L656-L687 | null | class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
**conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
# Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.org/en/latest/security.html '
'(This warning will only appear once by default.)'),
InsecureRequestWarning)
|
joealcorn/xbox | xbox/vendor/requests/packages/chardet/chardetect.py | description_of | python | def description_of(file, name='stdin'):
u = UniversalDetector()
for line in file:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (name,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % name | Return a string describing the probable encoding of a file. | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/packages/chardet/chardetect.py#L21-L33 | null | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from io import open
from sys import argv, stdin
from chardet.universaldetector import UniversalDetector
def main():
if len(argv) <= 1:
print(description_of(stdin))
else:
for path in argv[1:]:
with open(path, 'rb') as f:
print(description_of(f, path))
if __name__ == '__main__':
main()
|
chukysoria/pyspotify-connect | spotifyconnect/config.py | Config.load_application_key_file | python | def load_application_key_file(self, filename=b'spotify_appkey.key'):
with open(filename, 'rb') as fh:
self.app_key = fh.read() | Load your libspotify application key file.
If called without arguments, it tries to read ``spotify_appkey.key``
from the current working directory.
This is an alternative to setting :attr:`application_key` yourself. The
file must be a binary key file, not the C code key file that can be
compiled into an application. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/config.py#L103-L114 | null | class Config(object):
"""The session config.
Create an instance and assign to its attributes to configure. Then use the
config object to create a session::
>>> config = spotify.Config()
>>> config.user_agent = 'My awesome Spotify client'
>>> # Etc ...
>>> session = spotify.Session(config=config)
"""
def __init__(self):
self._sp_session_config = ffi.new('SpConfig *')
self.version = VERSION
self.buffer_size = BUFFER_SIZE # 1MB
try:
self.load_application_key_file()
except IOError:
logger.info(
'File spotify_appkey.key not found on default location.')
self.device_id = str(uuid.uuid4())
self.remote_name = REMOTE_NAME
self.brand_name = BRAND_NAME
self.model_name = MODEL_NAME
self.device_type = DeviceType.AudioDongle
@property
def sp_session_config(self):
return self._sp_session_config
@property
def version(self):
"""The API version of the libspotify we're using.
"""
return self._sp_session_config.version
@version.setter
def version(self, value):
self._sp_session_config.version = value
@property
def buffer_size(self):
"""The buffer size on bytes which libspotify-embedded-shared will uses.
"""
return self._sp_session_config.buffer_size
@buffer_size.setter
def buffer_size(self, value):
self._sp_session_config.buffer_size = value
self.sp_session_config.buffer = lib.malloc(value)
@property
def app_key(self):
"""Your libspotify application key.
Must be a bytestring. Alternatively, you can call
:meth:`load_application_key_file`, and pyspotify will correctly read
the file into :attr:`application_key`.
"""
return utils.to_bytes_or_none(
ffi.cast('char *', self._sp_session_config.app_key))
@app_key.setter
def app_key(self, value):
if value is None:
size = 0
else:
size = len(value)
assert size in (0, 321), (
'Invalid application key; expected 321 bytes, got %d bytes' % size)
self._application_key = utils.to_char_or_null(value)
self._sp_session_config.app_key = ffi.cast(
'void *', self._application_key)
self._sp_session_config.app_key_size = size
@property
def device_id(self):
"""Device ID for offline synchronization and logging purposes.
Defaults to :class:`None`.
The Device ID must be unique to the particular device instance, i.e. no
two units must supply the same Device ID. The Device ID must not change
between sessions or power cycles. Good examples is the device's MAC
address or unique serial number.
Setting the device ID to an empty string has the same effect as setting
it to :class:`None`.
"""
return utils.to_unicode_or_none(self._sp_session_config.deviceId)
@device_id.setter
def device_id(self, value):
# NOTE libspotify segfaults if device_id is set to an empty string,
# thus we convert empty strings to NULL.
self._deviceId = utils.to_char_or_null(value or None)
self._sp_session_config.deviceId = self._deviceId
@property
def remote_name(self):
return utils.to_unicode_or_none(self._sp_session_config.remoteName)
@remote_name.setter
def remote_name(self, value):
self._remoteName = utils.to_char_or_null(value or None)
self._sp_session_config.remoteName = self._remoteName
@property
def brand_name(self):
return utils.to_unicode_or_none(self._sp_session_config.brandName)
@brand_name.setter
def brand_name(self, value):
self._brandName = utils.to_char_or_null(value or None)
self._sp_session_config.brandName = self._brandName
@property
def model_name(self):
return utils.to_unicode_or_none(self._sp_session_config.modelName)
@model_name.setter
def model_name(self, value):
self._modelName = utils.to_char_or_null(value or None)
self._sp_session_config.modelName = self._modelName
@property
def device_type(self):
return DeviceType(self._sp_session_config.deviceType)
@device_type.setter
def device_type(self, value):
self._sp_session_config.deviceType = value
@property
def client_id(self):
return utils.to_unicode_or_none(self._sp_session_config.client_id)
@client_id.setter
def client_id(self, value):
self._client_id = utils.to_char_or_null(value or None)
self._sp_session_config.client_id = self._client_id
@property
def client_secret(self):
return utils.to_unicode_or_none(self._sp_session_config.client_secret)
@client_secret.setter
def client_secret(self, value):
self._client_secret = utils.to_char_or_null(value or None)
self._sp_session_config.client_secret = self._client_secret
@property
def userdata(self):
return self._sp_session_config.userdata
@userdata.setter
def userdata(self, value):
self._sp_session_config.userdata = value
@property
def error_callback(self):
return self._sp_session_config.error_callback
@error_callback.setter
def error_callback(self, value):
self._sp_session_config.error_callback = value
|
chukysoria/pyspotify-connect | spotifyconnect/connection.py | Connection.login | python | def login(self, username, password=None, blob=None, zeroconf=None):
username = utils.to_char(username)
if password is not None:
password = utils.to_char(password)
spotifyconnect.Error.maybe_raise(
lib.SpConnectionLoginPassword(
username, password))
elif blob is not None:
blob = utils.to_char(blob)
spotifyconnect.Error.maybe_raise(
lib.SpConnectionLoginBlob(username, blob))
elif zeroconf is not None:
spotifyconnect.Error.maybe_raise(
lib.SpConnectionLoginZeroConf(
username, *zeroconf))
else:
raise AttributeError(
"Must specify a login method (password, blob or zeroconf)") | Authenticate to Spotify's servers.
You can login with one of three combinations:
- ``username`` and ``password``
- ``username`` and ``blob``
- ``username`` and ``zeroconf``
To get the ``blob`` string, you must once log in with ``username`` and
``password``. You'll then get the ``blob`` string passed to the
:attr:`~ConnectionCallbacks.new_credentials` callback. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/connection.py#L99-L130 | [
"def to_char(value):\n \"\"\"Converts bytes, unicode, and C char arrays to C char arrays. \"\"\"\n return ffi.new('char[]', to_bytes(value))\n",
"def maybe_raise(cls, error_type, ignores=None):\n \"\"\"Raise an :exc:`LibError` unless the ``error_type`` is\n :attr:`ErrorType.OK` or in the ``ignores`` list of error types.\n Internal method.\n \"\"\"\n ignores = set(ignores or [])\n ignores.add(ErrorType.Ok)\n if error_type not in ignores:\n raise LibError(error_type)\n"
] | class Connection(utils.EventEmitter):
"""Connection controller.
You'll never need to create an instance of this class yourself. You'll find
it ready to use as the :attr:`~Session.connection` attribute on the
:class:`Session` instance.
"""
@serialized
def __init__(self, session):
super(Connection, self).__init__()
self._connectionStatus = ConnectionState.LoggedOut
spotifyconnect._connection_instance = self
self._cache = weakref.WeakValueDictionary()
self._emitters = []
self._callback_handles = set()
spotifyconnect.Error.maybe_raise(
lib.SpRegisterConnectionCallbacks(
_ConnectionCallbacks.get_struct(), session))
spotifyconnect.Error.maybe_raise(
lib.SpRegisterDebugCallbacks(
_DebugCallbacks.get_struct(), session))
@property
@serialized
def connection_state(self):
"""The session's current :class:`ConnectionState`.
The mapping is as follows
- :attr:`~ConnectionState.LoggedIn`: authenticated, online
- :attr:`~ConnectionState.LoggedOut`: not authenticated
- :attr:`~ConnectionState.TemporaryError`: Unknown error
Register listeners for the
:attr:`spotify.SessionEvent.CONNECTION_STATE_UPDATED` event to be
notified when the connection state changes.
"""
return ConnectionState(not lib.SpConnectionIsLoggedIn())
_cache = None
"""A mapping from sp_* objects to their corresponding Python instances.
The ``_cached`` helper constructors on wrapper objects use this cache for
finding and returning existing alive wrapper objects for the sp_* object it
is about to create a wrapper for.
The cache *does not* keep objects alive. It's only a means for looking up
the objects if they are kept alive somewhere else in the application.
Internal attribute.
"""
_emitters = None
"""A list of event emitters with attached listeners.
When an event emitter has attached event listeners, we must keep the
emitter alive for as long as the listeners are attached. This is achieved
by adding them to this list.
When creating wrapper objects around sp_* objects we must also return the
existing wrapper objects instead of creating new ones so that the set of
event listeners on the wrapper object can be modified. This is achieved
with a combination of this list and the :attr:`_cache` mapping.
Internal attribute.
"""
_callback_handles = None
"""A set of handles returned by :meth:`spotify.ffi.new_handle`.
These must be kept alive for the handle to remain valid until the callback
arrives, even if the end user does not maintain a reference to the object
the callback works on.
Internal attribute.
"""
@serialized
@serialized
def logout(self):
"""Log out the current user.
"""
spotifyconnect.Error.maybe_raise(lib.SpConnectionLogout())
|
chukysoria/pyspotify-connect | spotifyconnect/sink.py | Sink.on | python | def on(self):
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
spotifyconnect._session_instance.player.on(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery) | Turn on the alsa_sink sink.
This is done automatically when the sink is instantiated, so you'll
only need to call this method if you ever call :meth:`off` and want to
turn the sink back on. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/sink.py#L12-L22 | null | class Sink(object):
def off(self):
"""Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events.
"""
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close()
def _on_music_delivery(
self,
audio_format,
frames,
num_frames,
pending,
session):
# This method is called from an internal libspotify thread and must
# not block in any way.
raise NotImplementedError
def _close(self):
pass
|
chukysoria/pyspotify-connect | spotifyconnect/sink.py | Sink.off | python | def off(self):
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close() | Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/sink.py#L24-L33 | null | class Sink(object):
def on(self):
"""Turn on the alsa_sink sink.
This is done automatically when the sink is instantiated, so you'll
only need to call this method if you ever call :meth:`off` and want to
turn the sink back on.
"""
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
spotifyconnect._session_instance.player.on(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
def _on_music_delivery(
self,
audio_format,
frames,
num_frames,
pending,
session):
# This method is called from an internal libspotify thread and must
# not block in any way.
raise NotImplementedError
def _close(self):
pass
|
chukysoria/pyspotify-connect | spotifyconnect/player.py | Player.enable_shuffle | python | def enable_shuffle(self, value=None):
if value is None:
value = not self.shuffled
spotifyconnect.Error.maybe_raise(lib.SpPlaybackEnableShuffle(value)) | Enable shuffle mode | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/player.py#L109-L114 | [
"def maybe_raise(cls, error_type, ignores=None):\n \"\"\"Raise an :exc:`LibError` unless the ``error_type`` is\n :attr:`ErrorType.OK` or in the ``ignores`` list of error types.\n Internal method.\n \"\"\"\n ignores = set(ignores or [])\n ignores.add(ErrorType.Ok)\n if error_type not in ignores:\n raise LibError(error_type)\n"
] | class Player(utils.EventEmitter):
"""Playback controller.
You'll never need to create an instance of this class yourself. You'll find
it ready to use as the :attr:`~Session.player` attribute on the
:class:`Session` instance.
"""
@serialized
def __init__(self, session):
super(Player, self).__init__()
spotifyconnect._player_instance = self
self._cache = weakref.WeakValueDictionary()
self._emitters = []
self._callback_handles = set()
spotifyconnect.Error.maybe_raise(
lib.SpRegisterPlaybackCallbacks(
_PlayerCallbacks.get_struct(), session))
_cache = None
"""A mapping from sp_* objects to their corresponding Python instances.
The ``_cached`` helper constructors on wrapper objects use this cache for
finding and returning existing alive wrapper objects for the sp_* object it
is about to create a wrapper for.
The cache *does not* keep objects alive. It's only a means for looking up
the objects if they are kept alive somewhere else in the application.
Internal attribute.
"""
_emitters = None
"""A list of event emitters with attached listeners.
When an event emitter has attached event listeners, we must keep the
emitter alive for as long as the listeners are attached. This is achieved
by adding them to this list.
When creating wrapper objects around sp_* objects we must also return the
existing wrapper objects instead of creating new ones so that the set of
event listeners on the wrapper object can be modified. This is achieved
with a combination of this list and the :attr:`_cache` mapping.
Internal attribute.
"""
_callback_handles = None
"""A set of handles returned by :meth:`spotify.ffi.new_handle`.
These must be kept alive for the handle to remain valid until the callback
arrives, even if the end user does not maintain a reference to the object
the callback works on.
Internal attribute.
"""
@serialized
def play(self):
"""Play the currently loaded track.
This will cause alsa_sink data to be passed to the
:attr:`~SessionCallbacks.music_delivery` callback.
"""
spotifyconnect.Error.maybe_raise(lib.SpPlaybackPlay())
@serialized
def pause(self):
"""Pause the currently loaded track.
"""
spotifyconnect.Error.maybe_raise(lib.SpPlaybackPause())
@serialized
def skip_to_next(self):
"""Skips to the next track on the playlist.
"""
spotifyconnect.Error.maybe_raise(lib.SpPlaybackSkipToNext())
@serialized
def skip_to_prev(self):
"""Skips to the previous track on the playlist.
"""
spotifyconnect.Error.maybe_raise(lib.SpPlaybackSkipToPrev())
@serialized
def seek(self, offset):
"""Seek to the offset in ms in the currently loaded track."""
spotifyconnect.Error.maybe_raise(lib.SpPlaybackSeek(offset))
@serialized
@serialized
def enable_repeat(self, value=None):
"""Enable repeat mode
"""
if value is None:
value = not self.repeated
spotifyconnect.Error.maybe_raise(lib.SpPlaybackEnableRepeat(value))
@property
@serialized
def playing(self):
return lib.SpPlaybackIsPlaying()
@property
@serialized
def shuffled(self):
return lib.SpPlaybackIsShuffled()
@property
@serialized
def repeated(self):
return lib.SpPlaybackIsRepeated()
@property
@serialized
def active_device(self):
return lib.SpPlaybackIsActiveDevice()
@property
@serialized
def volume(self):
value = lib.SpPlaybackGetVolume()
corrected_value = value / 655.35
return corrected_value
@volume.setter
@serialized
def volume(self, value):
corrected_value = int(value * 655.35)
spotifyconnect.Error.maybe_raise(
lib.SpPlaybackUpdateVolume(corrected_value))
@property
def current_track(self):
return self.get_track_metadata()
@property
@serialized
def metadata_valid_range(self):
start = ffi.new("int *")
end = ffi.new("int *")
spotifyconnect.Error.maybe_raise(
lib.SpGetMetadataValidRange(start, end))
valid_range = {
'start': start[0],
'end': end[0]
}
return valid_range
@serialized
def get_track_metadata(self, offset=0):
sp_metadata = ffi.new('SpMetadata *')
spotifyconnect.Error.maybe_raise(
lib.SpGetMetadata(sp_metadata, offset))
return spotifyconnect.Metadata(sp_metadata)
@serialized
def set_bitrate(self, bitrate):
spotifyconnect.Error.maybe_raise(lib.SpPlaybackSetBitrate(bitrate))
|
chukysoria/pyspotify-connect | spotifyconnect/player.py | Player.enable_repeat | python | def enable_repeat(self, value=None):
if value is None:
value = not self.repeated
spotifyconnect.Error.maybe_raise(lib.SpPlaybackEnableRepeat(value)) | Enable repeat mode | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/player.py#L117-L122 | [
"def maybe_raise(cls, error_type, ignores=None):\n \"\"\"Raise an :exc:`LibError` unless the ``error_type`` is\n :attr:`ErrorType.OK` or in the ``ignores`` list of error types.\n Internal method.\n \"\"\"\n ignores = set(ignores or [])\n ignores.add(ErrorType.Ok)\n if error_type not in ignores:\n raise LibError(error_type)\n"
] | class Player(utils.EventEmitter):
"""Playback controller.
You'll never need to create an instance of this class yourself. You'll find
it ready to use as the :attr:`~Session.player` attribute on the
:class:`Session` instance.
"""
@serialized
def __init__(self, session):
super(Player, self).__init__()
spotifyconnect._player_instance = self
self._cache = weakref.WeakValueDictionary()
self._emitters = []
self._callback_handles = set()
spotifyconnect.Error.maybe_raise(
lib.SpRegisterPlaybackCallbacks(
_PlayerCallbacks.get_struct(), session))
_cache = None
"""A mapping from sp_* objects to their corresponding Python instances.
The ``_cached`` helper constructors on wrapper objects use this cache for
finding and returning existing alive wrapper objects for the sp_* object it
is about to create a wrapper for.
The cache *does not* keep objects alive. It's only a means for looking up
the objects if they are kept alive somewhere else in the application.
Internal attribute.
"""
_emitters = None
"""A list of event emitters with attached listeners.
When an event emitter has attached event listeners, we must keep the
emitter alive for as long as the listeners are attached. This is achieved
by adding them to this list.
When creating wrapper objects around sp_* objects we must also return the
existing wrapper objects instead of creating new ones so that the set of
event listeners on the wrapper object can be modified. This is achieved
with a combination of this list and the :attr:`_cache` mapping.
Internal attribute.
"""
_callback_handles = None
"""A set of handles returned by :meth:`spotify.ffi.new_handle`.
These must be kept alive for the handle to remain valid until the callback
arrives, even if the end user does not maintain a reference to the object
the callback works on.
Internal attribute.
"""
@serialized
def play(self):
"""Play the currently loaded track.
This will cause alsa_sink data to be passed to the
:attr:`~SessionCallbacks.music_delivery` callback.
"""
spotifyconnect.Error.maybe_raise(lib.SpPlaybackPlay())
@serialized
def pause(self):
"""Pause the currently loaded track.
"""
spotifyconnect.Error.maybe_raise(lib.SpPlaybackPause())
@serialized
def skip_to_next(self):
"""Skips to the next track on the playlist.
"""
spotifyconnect.Error.maybe_raise(lib.SpPlaybackSkipToNext())
@serialized
def skip_to_prev(self):
"""Skips to the previous track on the playlist.
"""
spotifyconnect.Error.maybe_raise(lib.SpPlaybackSkipToPrev())
@serialized
def seek(self, offset):
"""Seek to the offset in ms in the currently loaded track."""
spotifyconnect.Error.maybe_raise(lib.SpPlaybackSeek(offset))
@serialized
def enable_shuffle(self, value=None):
"""Enable shuffle mode
"""
if value is None:
value = not self.shuffled
spotifyconnect.Error.maybe_raise(lib.SpPlaybackEnableShuffle(value))
@serialized
@property
@serialized
def playing(self):
return lib.SpPlaybackIsPlaying()
@property
@serialized
def shuffled(self):
return lib.SpPlaybackIsShuffled()
@property
@serialized
def repeated(self):
return lib.SpPlaybackIsRepeated()
@property
@serialized
def active_device(self):
return lib.SpPlaybackIsActiveDevice()
@property
@serialized
def volume(self):
value = lib.SpPlaybackGetVolume()
corrected_value = value / 655.35
return corrected_value
@volume.setter
@serialized
def volume(self, value):
corrected_value = int(value * 655.35)
spotifyconnect.Error.maybe_raise(
lib.SpPlaybackUpdateVolume(corrected_value))
@property
def current_track(self):
return self.get_track_metadata()
@property
@serialized
def metadata_valid_range(self):
start = ffi.new("int *")
end = ffi.new("int *")
spotifyconnect.Error.maybe_raise(
lib.SpGetMetadataValidRange(start, end))
valid_range = {
'start': start[0],
'end': end[0]
}
return valid_range
@serialized
def get_track_metadata(self, offset=0):
sp_metadata = ffi.new('SpMetadata *')
spotifyconnect.Error.maybe_raise(
lib.SpGetMetadata(sp_metadata, offset))
return spotifyconnect.Metadata(sp_metadata)
@serialized
def set_bitrate(self, bitrate):
spotifyconnect.Error.maybe_raise(lib.SpPlaybackSetBitrate(bitrate))
|
chukysoria/pyspotify-connect | spotifyconnect/error.py | Error.maybe_raise | python | def maybe_raise(cls, error_type, ignores=None):
ignores = set(ignores or [])
ignores.add(ErrorType.Ok)
if error_type not in ignores:
raise LibError(error_type) | Raise an :exc:`LibError` unless the ``error_type`` is
:attr:`ErrorType.OK` or in the ``ignores`` list of error types.
Internal method. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/error.py#L21-L29 | null | class Error(Exception):
"""A Spotify error.
This is the superclass of all custom exceptions raised by pyspotify.
"""
@classmethod
|
chukysoria/pyspotify-connect | spotifyconnect/audio.py | AudioFormat.frame_size | python | def frame_size(self):
if self.sample_type == SampleType.S16NativeEndian:
# Sample size is 2 bytes
return self.sample_size * self.channels
else:
raise ValueError('Unknown sample type: %d', self.sample_type) | The byte size of a single frame of this format. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/audio.py#L62-L68 | null | class AudioFormat(object):
"""A Spotify alsa_sink format object.
You'll never need to create an instance of this class yourself, but you'll
get :class:`AudioFormat` objects as the ``audio_format`` argument to the
:attr:`~spotifyconnect.PlayerCallbacks.MUSIC_DELIVERY` callback.
"""
def __init__(self, sp_audioformat):
self._sp_audioformat = sp_audioformat
@property
def sample_type(self):
"""The :class:`SampleType`, currently always
:attr:`SampleType.S16NativeEndian`."""
return SampleType(self._sp_audioformat.sample_type)
@property
def sample_rate(self):
"""The sample rate, typically 44100 Hz."""
return self._sp_audioformat.sample_rate
@property
def channels(self):
"""The number of audio channels, typically 2."""
return self._sp_audioformat.channels
@property
@property
def sample_size(self):
"""The byte size of a single frame of this format."""
if self.sample_type == SampleType.S16NativeEndian:
# Sample size is 2 bytes
return 2
else:
raise ValueError('Unknown sample type: %d', self.sample_type)
|
chukysoria/pyspotify-connect | spotifyconnect/__init__.py | _setup_logging | python | def _setup_logging():
import logging
logger = logging.getLogger('spotify-connect')
handler = logging.NullHandler()
logger.addHandler(handler) | Setup logging to log to nowhere by default.
For details, see:
http://docs.python.org/3/howto/logging.html#library-config
Internal function. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/__init__.py#L19-L31 | null | from __future__ import unicode_literals
import threading
__version__ = '0.1.13'
# Global reentrant lock to be held whenever libspotify functions are called or
# libspotify owned data is worked on. This is the heart of pyspotify's thread
# safety.
_lock = threading.RLock()
# Reference to the spotifyconnect.Session instance. Used to enforce that one
# and only one session exists in each process.
_session_instance = None
def serialized(f):
"""Decorator that serializes access to all decorated functions.
The decorator acquires pyspotify's single global lock while calling any
wrapped function. It is used to serialize access to:
- All calls to functions on :attr:`spotify.lib`.
- All code blocks working on pointers returned from functions on
:attr:`spotify.lib`.
- All code blocks working on other internal data structures in pyspotify.
Together this is what makes pyspotify safe to use from multiple threads and
enables convenient features like the :class:`~spotify.EventLoop`.
Internal function.
"""
import functools
@functools.wraps(f)
def wrapper(*args, **kwargs):
with _lock:
return f(*args, **kwargs)
if not hasattr(wrapper, '__wrapped__'):
# Workaround for Python < 3.2
wrapper.__wrapped__ = f
return wrapper
class _SerializedLib(object):
"""CFFI library wrapper to serialize all calls to library functions.
Internal class.
"""
def __init__(self, lib):
for name in dir(lib):
attr = getattr(lib, name)
if name.startswith('Sp') and callable(attr):
attr = serialized(attr)
setattr(self, name, attr)
_setup_logging()
from spotifyconnect._spotifyconnect import ffi, lib # noqa
lib = _SerializedLib(lib)
from spotifyconnect.audio import * # noqa
from spotifyconnect.config import * # noqa
from spotifyconnect.connection import * # noqa
from spotifyconnect.error import * # noqa
from spotifyconnect.eventloop import * # noqa
from spotifyconnect.metadata import * # noqa
from spotifyconnect.player import * # noqa
from spotifyconnect.session import * # noqa
from spotifyconnect.sink import * # noqa
from spotifyconnect.zeroconf import * # noqa
|
chukysoria/pyspotify-connect | spotifyconnect/__init__.py | serialized | python | def serialized(f):
import functools
@functools.wraps(f)
def wrapper(*args, **kwargs):
with _lock:
return f(*args, **kwargs)
if not hasattr(wrapper, '__wrapped__'):
# Workaround for Python < 3.2
wrapper.__wrapped__ = f
return wrapper | Decorator that serializes access to all decorated functions.
The decorator acquires pyspotify's single global lock while calling any
wrapped function. It is used to serialize access to:
- All calls to functions on :attr:`spotify.lib`.
- All code blocks working on pointers returned from functions on
:attr:`spotify.lib`.
- All code blocks working on other internal data structures in pyspotify.
Together this is what makes pyspotify safe to use from multiple threads and
enables convenient features like the :class:`~spotify.EventLoop`.
Internal function. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/__init__.py#L34-L61 | null | from __future__ import unicode_literals
import threading
__version__ = '0.1.13'
# Global reentrant lock to be held whenever libspotify functions are called or
# libspotify owned data is worked on. This is the heart of pyspotify's thread
# safety.
_lock = threading.RLock()
# Reference to the spotifyconnect.Session instance. Used to enforce that one
# and only one session exists in each process.
_session_instance = None
def _setup_logging():
"""Setup logging to log to nowhere by default.
For details, see:
http://docs.python.org/3/howto/logging.html#library-config
Internal function.
"""
import logging
logger = logging.getLogger('spotify-connect')
handler = logging.NullHandler()
logger.addHandler(handler)
class _SerializedLib(object):
"""CFFI library wrapper to serialize all calls to library functions.
Internal class.
"""
def __init__(self, lib):
for name in dir(lib):
attr = getattr(lib, name)
if name.startswith('Sp') and callable(attr):
attr = serialized(attr)
setattr(self, name, attr)
_setup_logging()
from spotifyconnect._spotifyconnect import ffi, lib # noqa
lib = _SerializedLib(lib)
from spotifyconnect.audio import * # noqa
from spotifyconnect.config import * # noqa
from spotifyconnect.connection import * # noqa
from spotifyconnect.error import * # noqa
from spotifyconnect.eventloop import * # noqa
from spotifyconnect.metadata import * # noqa
from spotifyconnect.player import * # noqa
from spotifyconnect.session import * # noqa
from spotifyconnect.sink import * # noqa
from spotifyconnect.zeroconf import * # noqa
|
chukysoria/pyspotify-connect | spotifyconnect/utils.py | make_enum | python | def make_enum(lib_prefix, enum_prefix=''):
def wrapper(cls):
for attr in dir(lib):
if attr.startswith(lib_prefix):
name = attr.replace(lib_prefix, enum_prefix)
cls.add(name, getattr(lib, attr))
return cls
return wrapper | Class decorator for automatically adding enum values.
The values are read directly from the :attr:`spotify.lib` CFFI wrapper
around libspotify. All values starting with ``lib_prefix`` are added. The
``lib_prefix`` is stripped from the name. Optionally, ``enum_prefix`` can
be specified to add a prefix to all the names. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/utils.py#L143-L158 | null | from __future__ import unicode_literals
import collections
import sys
from spotifyconnect import ffi, lib, serialized
PY2 = sys.version_info[0] == 2
if PY2: # pragma: no branch
string_types = (basestring,) # noqa
text_type = unicode # noqa
binary_type = str
else:
string_types = (str,)
text_type = str
binary_type = bytes
class EventEmitter(object):
"""Mixin for adding event emitter functionality to a class."""
def __init__(self):
self._listeners = collections.defaultdict(list)
@serialized
def on(self, event, listener, *user_args):
"""Register a ``listener`` to be called on ``event``.
The listener will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
last.
If the listener function returns :class:`False`, it is removed and will
not be called the next time the ``event`` is emitted.
"""
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args))
@serialized
def off(self, event=None, listener=None):
"""Remove a ``listener`` that was to be called on ``event``.
If ``listener`` is :class:`None`, all listeners for the given ``event``
will be removed.
If ``event`` is :class:`None`, all listeners for all events on this
object will be removed.
"""
if event is None:
events = self._listeners.keys()
else:
events = [event]
for event in events:
if listener is None:
self._listeners[event] = []
else:
self._listeners[event] = [
l for l in self._listeners[event]
if l.callback != listener]
def emit(self, event, *event_args):
"""Call the registered listeners for ``event``.
The listeners will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
"""
listeners = self._listeners[event][:]
for listener in listeners:
args = list(event_args) + list(listener.user_args)
result = listener.callback(*args)
if result is False:
self.off(event, listener.callback)
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
"""
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values())
def call(self, event, *event_args):
"""Call the single registered listener for ``event``.
The listener will be called with any extra arguments passed to
:meth:`call` first, and then the extra arguments passed to :meth:`on`
Raises :exc:`AssertionError` if there is none or multiple listeners for
``event``. Returns the listener's return value on success.
"""
# XXX It would be a lot better for debugging if this error was raised
# when registering the second listener instead of when the event is
# emitted.
assert self.num_listeners(event) == 1, (
'Expected exactly 1 event listener, found %d listeners' %
self.num_listeners(event))
listener = self._listeners[event][0]
args = list(event_args) + list(listener.user_args)
return listener.callback(*args)
class _Listener(collections.namedtuple(
'Listener', ['callback', 'user_args'])):
"""An listener of events from an :class:`EventEmitter`"""
class IntEnum(int):
"""An enum type for values mapping to integers.
Tries to stay as close as possible to the enum type specified in
:pep:`435` and introduced in Python 3.4.
"""
def __new__(cls, value):
if not hasattr(cls, '_values'):
cls._values = {}
if value not in cls._values:
cls._values[value] = int.__new__(cls, value)
return cls._values[value]
def __repr__(self):
if hasattr(self, '_name'):
return '<%s.%s: %d>' % (self.__class__.__name__, self._name, self)
else:
return '<Unknown %s: %d>' % (self.__class__.__name__, self)
@classmethod
def add(cls, name, value):
"""Add a name-value pair to the enumeration."""
attr = cls(value)
attr._name = name
setattr(cls, name, attr)
def to_bytes(value):
"""Converts bytes, unicode, and C char arrays to bytes.
Unicode strings are encoded to UTF-8.
"""
if isinstance(value, text_type):
return value.encode('utf-8')
elif isinstance(value, ffi.CData):
return ffi.string(value)
elif isinstance(value, binary_type):
return value
else:
raise ValueError('Value must be text, bytes, or char[]')
def to_bytes_or_none(value):
"""Converts C char arrays to bytes and C NULL values to None."""
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value)
else:
raise ValueError('Value must be char[] or NULL')
def to_char(value):
"""Converts bytes, unicode, and C char arrays to C char arrays. """
return ffi.new('char[]', to_bytes(value))
def to_char_or_null(value):
"""Converts bytes, unicode, and C char arrays to C char arrays, and
:class:`None` to C NULL values.
"""
if value is None:
return ffi.NULL
else:
return to_char(value)
def to_unicode(value):
"""Converts bytes, unicode, and C char arrays to unicode strings.
Bytes and C char arrays are decoded from UTF-8.
"""
if isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
elif isinstance(value, binary_type):
return value.decode('utf-8')
elif isinstance(value, text_type):
return value
else:
raise ValueError('Value must be text, bytes, or char[]')
def to_unicode_or_none(value):
"""Converts C char arrays to unicode and C NULL values to None.
C char arrays are decoded from UTF-8.
"""
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
else:
raise ValueError('Value must be char[] or NULL')
|
chukysoria/pyspotify-connect | spotifyconnect/utils.py | to_bytes | python | def to_bytes(value):
if isinstance(value, text_type):
return value.encode('utf-8')
elif isinstance(value, ffi.CData):
return ffi.string(value)
elif isinstance(value, binary_type):
return value
else:
raise ValueError('Value must be text, bytes, or char[]') | Converts bytes, unicode, and C char arrays to bytes.
Unicode strings are encoded to UTF-8. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/utils.py#L161-L173 | null | from __future__ import unicode_literals
import collections
import sys
from spotifyconnect import ffi, lib, serialized
PY2 = sys.version_info[0] == 2
if PY2: # pragma: no branch
string_types = (basestring,) # noqa
text_type = unicode # noqa
binary_type = str
else:
string_types = (str,)
text_type = str
binary_type = bytes
class EventEmitter(object):
"""Mixin for adding event emitter functionality to a class."""
def __init__(self):
self._listeners = collections.defaultdict(list)
@serialized
def on(self, event, listener, *user_args):
"""Register a ``listener`` to be called on ``event``.
The listener will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
last.
If the listener function returns :class:`False`, it is removed and will
not be called the next time the ``event`` is emitted.
"""
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args))
@serialized
def off(self, event=None, listener=None):
"""Remove a ``listener`` that was to be called on ``event``.
If ``listener`` is :class:`None`, all listeners for the given ``event``
will be removed.
If ``event`` is :class:`None`, all listeners for all events on this
object will be removed.
"""
if event is None:
events = self._listeners.keys()
else:
events = [event]
for event in events:
if listener is None:
self._listeners[event] = []
else:
self._listeners[event] = [
l for l in self._listeners[event]
if l.callback != listener]
def emit(self, event, *event_args):
"""Call the registered listeners for ``event``.
The listeners will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
"""
listeners = self._listeners[event][:]
for listener in listeners:
args = list(event_args) + list(listener.user_args)
result = listener.callback(*args)
if result is False:
self.off(event, listener.callback)
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
"""
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values())
def call(self, event, *event_args):
"""Call the single registered listener for ``event``.
The listener will be called with any extra arguments passed to
:meth:`call` first, and then the extra arguments passed to :meth:`on`
Raises :exc:`AssertionError` if there is none or multiple listeners for
``event``. Returns the listener's return value on success.
"""
# XXX It would be a lot better for debugging if this error was raised
# when registering the second listener instead of when the event is
# emitted.
assert self.num_listeners(event) == 1, (
'Expected exactly 1 event listener, found %d listeners' %
self.num_listeners(event))
listener = self._listeners[event][0]
args = list(event_args) + list(listener.user_args)
return listener.callback(*args)
class _Listener(collections.namedtuple(
'Listener', ['callback', 'user_args'])):
"""An listener of events from an :class:`EventEmitter`"""
class IntEnum(int):
"""An enum type for values mapping to integers.
Tries to stay as close as possible to the enum type specified in
:pep:`435` and introduced in Python 3.4.
"""
def __new__(cls, value):
if not hasattr(cls, '_values'):
cls._values = {}
if value not in cls._values:
cls._values[value] = int.__new__(cls, value)
return cls._values[value]
def __repr__(self):
if hasattr(self, '_name'):
return '<%s.%s: %d>' % (self.__class__.__name__, self._name, self)
else:
return '<Unknown %s: %d>' % (self.__class__.__name__, self)
@classmethod
def add(cls, name, value):
"""Add a name-value pair to the enumeration."""
attr = cls(value)
attr._name = name
setattr(cls, name, attr)
def make_enum(lib_prefix, enum_prefix=''):
"""Class decorator for automatically adding enum values.
The values are read directly from the :attr:`spotify.lib` CFFI wrapper
around libspotify. All values starting with ``lib_prefix`` are added. The
``lib_prefix`` is stripped from the name. Optionally, ``enum_prefix`` can
be specified to add a prefix to all the names.
"""
def wrapper(cls):
for attr in dir(lib):
if attr.startswith(lib_prefix):
name = attr.replace(lib_prefix, enum_prefix)
cls.add(name, getattr(lib, attr))
return cls
return wrapper
def to_bytes_or_none(value):
"""Converts C char arrays to bytes and C NULL values to None."""
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value)
else:
raise ValueError('Value must be char[] or NULL')
def to_char(value):
"""Converts bytes, unicode, and C char arrays to C char arrays. """
return ffi.new('char[]', to_bytes(value))
def to_char_or_null(value):
"""Converts bytes, unicode, and C char arrays to C char arrays, and
:class:`None` to C NULL values.
"""
if value is None:
return ffi.NULL
else:
return to_char(value)
def to_unicode(value):
"""Converts bytes, unicode, and C char arrays to unicode strings.
Bytes and C char arrays are decoded from UTF-8.
"""
if isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
elif isinstance(value, binary_type):
return value.decode('utf-8')
elif isinstance(value, text_type):
return value
else:
raise ValueError('Value must be text, bytes, or char[]')
def to_unicode_or_none(value):
"""Converts C char arrays to unicode and C NULL values to None.
C char arrays are decoded from UTF-8.
"""
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
else:
raise ValueError('Value must be char[] or NULL')
|
chukysoria/pyspotify-connect | spotifyconnect/utils.py | to_bytes_or_none | python | def to_bytes_or_none(value):
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value)
else:
raise ValueError('Value must be char[] or NULL') | Converts C char arrays to bytes and C NULL values to None. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/utils.py#L176-L183 | null | from __future__ import unicode_literals
import collections
import sys
from spotifyconnect import ffi, lib, serialized
PY2 = sys.version_info[0] == 2
if PY2: # pragma: no branch
string_types = (basestring,) # noqa
text_type = unicode # noqa
binary_type = str
else:
string_types = (str,)
text_type = str
binary_type = bytes
class EventEmitter(object):
"""Mixin for adding event emitter functionality to a class."""
def __init__(self):
self._listeners = collections.defaultdict(list)
@serialized
def on(self, event, listener, *user_args):
"""Register a ``listener`` to be called on ``event``.
The listener will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
last.
If the listener function returns :class:`False`, it is removed and will
not be called the next time the ``event`` is emitted.
"""
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args))
@serialized
def off(self, event=None, listener=None):
"""Remove a ``listener`` that was to be called on ``event``.
If ``listener`` is :class:`None`, all listeners for the given ``event``
will be removed.
If ``event`` is :class:`None`, all listeners for all events on this
object will be removed.
"""
if event is None:
events = self._listeners.keys()
else:
events = [event]
for event in events:
if listener is None:
self._listeners[event] = []
else:
self._listeners[event] = [
l for l in self._listeners[event]
if l.callback != listener]
def emit(self, event, *event_args):
"""Call the registered listeners for ``event``.
The listeners will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
"""
listeners = self._listeners[event][:]
for listener in listeners:
args = list(event_args) + list(listener.user_args)
result = listener.callback(*args)
if result is False:
self.off(event, listener.callback)
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
"""
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values())
def call(self, event, *event_args):
"""Call the single registered listener for ``event``.
The listener will be called with any extra arguments passed to
:meth:`call` first, and then the extra arguments passed to :meth:`on`
Raises :exc:`AssertionError` if there is none or multiple listeners for
``event``. Returns the listener's return value on success.
"""
# XXX It would be a lot better for debugging if this error was raised
# when registering the second listener instead of when the event is
# emitted.
assert self.num_listeners(event) == 1, (
'Expected exactly 1 event listener, found %d listeners' %
self.num_listeners(event))
listener = self._listeners[event][0]
args = list(event_args) + list(listener.user_args)
return listener.callback(*args)
class _Listener(collections.namedtuple(
'Listener', ['callback', 'user_args'])):
"""An listener of events from an :class:`EventEmitter`"""
class IntEnum(int):
"""An enum type for values mapping to integers.
Tries to stay as close as possible to the enum type specified in
:pep:`435` and introduced in Python 3.4.
"""
def __new__(cls, value):
if not hasattr(cls, '_values'):
cls._values = {}
if value not in cls._values:
cls._values[value] = int.__new__(cls, value)
return cls._values[value]
def __repr__(self):
if hasattr(self, '_name'):
return '<%s.%s: %d>' % (self.__class__.__name__, self._name, self)
else:
return '<Unknown %s: %d>' % (self.__class__.__name__, self)
@classmethod
def add(cls, name, value):
"""Add a name-value pair to the enumeration."""
attr = cls(value)
attr._name = name
setattr(cls, name, attr)
def make_enum(lib_prefix, enum_prefix=''):
"""Class decorator for automatically adding enum values.
The values are read directly from the :attr:`spotify.lib` CFFI wrapper
around libspotify. All values starting with ``lib_prefix`` are added. The
``lib_prefix`` is stripped from the name. Optionally, ``enum_prefix`` can
be specified to add a prefix to all the names.
"""
def wrapper(cls):
for attr in dir(lib):
if attr.startswith(lib_prefix):
name = attr.replace(lib_prefix, enum_prefix)
cls.add(name, getattr(lib, attr))
return cls
return wrapper
def to_bytes(value):
"""Converts bytes, unicode, and C char arrays to bytes.
Unicode strings are encoded to UTF-8.
"""
if isinstance(value, text_type):
return value.encode('utf-8')
elif isinstance(value, ffi.CData):
return ffi.string(value)
elif isinstance(value, binary_type):
return value
else:
raise ValueError('Value must be text, bytes, or char[]')
def to_char(value):
"""Converts bytes, unicode, and C char arrays to C char arrays. """
return ffi.new('char[]', to_bytes(value))
def to_char_or_null(value):
"""Converts bytes, unicode, and C char arrays to C char arrays, and
:class:`None` to C NULL values.
"""
if value is None:
return ffi.NULL
else:
return to_char(value)
def to_unicode(value):
"""Converts bytes, unicode, and C char arrays to unicode strings.
Bytes and C char arrays are decoded from UTF-8.
"""
if isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
elif isinstance(value, binary_type):
return value.decode('utf-8')
elif isinstance(value, text_type):
return value
else:
raise ValueError('Value must be text, bytes, or char[]')
def to_unicode_or_none(value):
"""Converts C char arrays to unicode and C NULL values to None.
C char arrays are decoded from UTF-8.
"""
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
else:
raise ValueError('Value must be char[] or NULL')
|
chukysoria/pyspotify-connect | spotifyconnect/utils.py | to_unicode | python | def to_unicode(value):
if isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
elif isinstance(value, binary_type):
return value.decode('utf-8')
elif isinstance(value, text_type):
return value
else:
raise ValueError('Value must be text, bytes, or char[]') | Converts bytes, unicode, and C char arrays to unicode strings.
Bytes and C char arrays are decoded from UTF-8. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/utils.py#L201-L213 | null | from __future__ import unicode_literals
import collections
import sys
from spotifyconnect import ffi, lib, serialized
PY2 = sys.version_info[0] == 2
if PY2: # pragma: no branch
string_types = (basestring,) # noqa
text_type = unicode # noqa
binary_type = str
else:
string_types = (str,)
text_type = str
binary_type = bytes
class EventEmitter(object):
"""Mixin for adding event emitter functionality to a class."""
def __init__(self):
self._listeners = collections.defaultdict(list)
@serialized
def on(self, event, listener, *user_args):
"""Register a ``listener`` to be called on ``event``.
The listener will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
last.
If the listener function returns :class:`False`, it is removed and will
not be called the next time the ``event`` is emitted.
"""
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args))
@serialized
def off(self, event=None, listener=None):
"""Remove a ``listener`` that was to be called on ``event``.
If ``listener`` is :class:`None`, all listeners for the given ``event``
will be removed.
If ``event`` is :class:`None`, all listeners for all events on this
object will be removed.
"""
if event is None:
events = self._listeners.keys()
else:
events = [event]
for event in events:
if listener is None:
self._listeners[event] = []
else:
self._listeners[event] = [
l for l in self._listeners[event]
if l.callback != listener]
def emit(self, event, *event_args):
"""Call the registered listeners for ``event``.
The listeners will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
"""
listeners = self._listeners[event][:]
for listener in listeners:
args = list(event_args) + list(listener.user_args)
result = listener.callback(*args)
if result is False:
self.off(event, listener.callback)
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
"""
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values())
def call(self, event, *event_args):
"""Call the single registered listener for ``event``.
The listener will be called with any extra arguments passed to
:meth:`call` first, and then the extra arguments passed to :meth:`on`
Raises :exc:`AssertionError` if there is none or multiple listeners for
``event``. Returns the listener's return value on success.
"""
# XXX It would be a lot better for debugging if this error was raised
# when registering the second listener instead of when the event is
# emitted.
assert self.num_listeners(event) == 1, (
'Expected exactly 1 event listener, found %d listeners' %
self.num_listeners(event))
listener = self._listeners[event][0]
args = list(event_args) + list(listener.user_args)
return listener.callback(*args)
class _Listener(collections.namedtuple(
'Listener', ['callback', 'user_args'])):
"""An listener of events from an :class:`EventEmitter`"""
class IntEnum(int):
"""An enum type for values mapping to integers.
Tries to stay as close as possible to the enum type specified in
:pep:`435` and introduced in Python 3.4.
"""
def __new__(cls, value):
if not hasattr(cls, '_values'):
cls._values = {}
if value not in cls._values:
cls._values[value] = int.__new__(cls, value)
return cls._values[value]
def __repr__(self):
if hasattr(self, '_name'):
return '<%s.%s: %d>' % (self.__class__.__name__, self._name, self)
else:
return '<Unknown %s: %d>' % (self.__class__.__name__, self)
@classmethod
def add(cls, name, value):
"""Add a name-value pair to the enumeration."""
attr = cls(value)
attr._name = name
setattr(cls, name, attr)
def make_enum(lib_prefix, enum_prefix=''):
"""Class decorator for automatically adding enum values.
The values are read directly from the :attr:`spotify.lib` CFFI wrapper
around libspotify. All values starting with ``lib_prefix`` are added. The
``lib_prefix`` is stripped from the name. Optionally, ``enum_prefix`` can
be specified to add a prefix to all the names.
"""
def wrapper(cls):
for attr in dir(lib):
if attr.startswith(lib_prefix):
name = attr.replace(lib_prefix, enum_prefix)
cls.add(name, getattr(lib, attr))
return cls
return wrapper
def to_bytes(value):
"""Converts bytes, unicode, and C char arrays to bytes.
Unicode strings are encoded to UTF-8.
"""
if isinstance(value, text_type):
return value.encode('utf-8')
elif isinstance(value, ffi.CData):
return ffi.string(value)
elif isinstance(value, binary_type):
return value
else:
raise ValueError('Value must be text, bytes, or char[]')
def to_bytes_or_none(value):
"""Converts C char arrays to bytes and C NULL values to None."""
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value)
else:
raise ValueError('Value must be char[] or NULL')
def to_char(value):
"""Converts bytes, unicode, and C char arrays to C char arrays. """
return ffi.new('char[]', to_bytes(value))
def to_char_or_null(value):
"""Converts bytes, unicode, and C char arrays to C char arrays, and
:class:`None` to C NULL values.
"""
if value is None:
return ffi.NULL
else:
return to_char(value)
def to_unicode_or_none(value):
"""Converts C char arrays to unicode and C NULL values to None.
C char arrays are decoded from UTF-8.
"""
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
else:
raise ValueError('Value must be char[] or NULL')
|
chukysoria/pyspotify-connect | spotifyconnect/utils.py | to_unicode_or_none | python | def to_unicode_or_none(value):
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
else:
raise ValueError('Value must be char[] or NULL') | Converts C char arrays to unicode and C NULL values to None.
C char arrays are decoded from UTF-8. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/utils.py#L216-L226 | null | from __future__ import unicode_literals
import collections
import sys
from spotifyconnect import ffi, lib, serialized
PY2 = sys.version_info[0] == 2
if PY2: # pragma: no branch
string_types = (basestring,) # noqa
text_type = unicode # noqa
binary_type = str
else:
string_types = (str,)
text_type = str
binary_type = bytes
class EventEmitter(object):
"""Mixin for adding event emitter functionality to a class."""
def __init__(self):
self._listeners = collections.defaultdict(list)
@serialized
def on(self, event, listener, *user_args):
"""Register a ``listener`` to be called on ``event``.
The listener will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
last.
If the listener function returns :class:`False`, it is removed and will
not be called the next time the ``event`` is emitted.
"""
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args))
@serialized
def off(self, event=None, listener=None):
"""Remove a ``listener`` that was to be called on ``event``.
If ``listener`` is :class:`None`, all listeners for the given ``event``
will be removed.
If ``event`` is :class:`None`, all listeners for all events on this
object will be removed.
"""
if event is None:
events = self._listeners.keys()
else:
events = [event]
for event in events:
if listener is None:
self._listeners[event] = []
else:
self._listeners[event] = [
l for l in self._listeners[event]
if l.callback != listener]
def emit(self, event, *event_args):
"""Call the registered listeners for ``event``.
The listeners will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
"""
listeners = self._listeners[event][:]
for listener in listeners:
args = list(event_args) + list(listener.user_args)
result = listener.callback(*args)
if result is False:
self.off(event, listener.callback)
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
"""
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values())
def call(self, event, *event_args):
"""Call the single registered listener for ``event``.
The listener will be called with any extra arguments passed to
:meth:`call` first, and then the extra arguments passed to :meth:`on`
Raises :exc:`AssertionError` if there is none or multiple listeners for
``event``. Returns the listener's return value on success.
"""
# XXX It would be a lot better for debugging if this error was raised
# when registering the second listener instead of when the event is
# emitted.
assert self.num_listeners(event) == 1, (
'Expected exactly 1 event listener, found %d listeners' %
self.num_listeners(event))
listener = self._listeners[event][0]
args = list(event_args) + list(listener.user_args)
return listener.callback(*args)
class _Listener(collections.namedtuple(
'Listener', ['callback', 'user_args'])):
"""An listener of events from an :class:`EventEmitter`"""
class IntEnum(int):
"""An enum type for values mapping to integers.
Tries to stay as close as possible to the enum type specified in
:pep:`435` and introduced in Python 3.4.
"""
def __new__(cls, value):
if not hasattr(cls, '_values'):
cls._values = {}
if value not in cls._values:
cls._values[value] = int.__new__(cls, value)
return cls._values[value]
def __repr__(self):
if hasattr(self, '_name'):
return '<%s.%s: %d>' % (self.__class__.__name__, self._name, self)
else:
return '<Unknown %s: %d>' % (self.__class__.__name__, self)
@classmethod
def add(cls, name, value):
"""Add a name-value pair to the enumeration."""
attr = cls(value)
attr._name = name
setattr(cls, name, attr)
def make_enum(lib_prefix, enum_prefix=''):
"""Class decorator for automatically adding enum values.
The values are read directly from the :attr:`spotify.lib` CFFI wrapper
around libspotify. All values starting with ``lib_prefix`` are added. The
``lib_prefix`` is stripped from the name. Optionally, ``enum_prefix`` can
be specified to add a prefix to all the names.
"""
def wrapper(cls):
for attr in dir(lib):
if attr.startswith(lib_prefix):
name = attr.replace(lib_prefix, enum_prefix)
cls.add(name, getattr(lib, attr))
return cls
return wrapper
def to_bytes(value):
"""Converts bytes, unicode, and C char arrays to bytes.
Unicode strings are encoded to UTF-8.
"""
if isinstance(value, text_type):
return value.encode('utf-8')
elif isinstance(value, ffi.CData):
return ffi.string(value)
elif isinstance(value, binary_type):
return value
else:
raise ValueError('Value must be text, bytes, or char[]')
def to_bytes_or_none(value):
"""Converts C char arrays to bytes and C NULL values to None."""
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value)
else:
raise ValueError('Value must be char[] or NULL')
def to_char(value):
"""Converts bytes, unicode, and C char arrays to C char arrays. """
return ffi.new('char[]', to_bytes(value))
def to_char_or_null(value):
"""Converts bytes, unicode, and C char arrays to C char arrays, and
:class:`None` to C NULL values.
"""
if value is None:
return ffi.NULL
else:
return to_char(value)
def to_unicode(value):
"""Converts bytes, unicode, and C char arrays to unicode strings.
Bytes and C char arrays are decoded from UTF-8.
"""
if isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
elif isinstance(value, binary_type):
return value.decode('utf-8')
elif isinstance(value, text_type):
return value
else:
raise ValueError('Value must be text, bytes, or char[]')
|
chukysoria/pyspotify-connect | spotifyconnect/utils.py | EventEmitter.on | python | def on(self, event, listener, *user_args):
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args)) | Register a ``listener`` to be called on ``event``.
The listener will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
last.
If the listener function returns :class:`False`, it is removed and will
not be called the next time the ``event`` is emitted. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/utils.py#L29-L40 | null | class EventEmitter(object):
"""Mixin for adding event emitter functionality to a class."""
def __init__(self):
self._listeners = collections.defaultdict(list)
@serialized
@serialized
def off(self, event=None, listener=None):
"""Remove a ``listener`` that was to be called on ``event``.
If ``listener`` is :class:`None`, all listeners for the given ``event``
will be removed.
If ``event`` is :class:`None`, all listeners for all events on this
object will be removed.
"""
if event is None:
events = self._listeners.keys()
else:
events = [event]
for event in events:
if listener is None:
self._listeners[event] = []
else:
self._listeners[event] = [
l for l in self._listeners[event]
if l.callback != listener]
def emit(self, event, *event_args):
"""Call the registered listeners for ``event``.
The listeners will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
"""
listeners = self._listeners[event][:]
for listener in listeners:
args = list(event_args) + list(listener.user_args)
result = listener.callback(*args)
if result is False:
self.off(event, listener.callback)
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
"""
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values())
def call(self, event, *event_args):
"""Call the single registered listener for ``event``.
The listener will be called with any extra arguments passed to
:meth:`call` first, and then the extra arguments passed to :meth:`on`
Raises :exc:`AssertionError` if there is none or multiple listeners for
``event``. Returns the listener's return value on success.
"""
# XXX It would be a lot better for debugging if this error was raised
# when registering the second listener instead of when the event is
# emitted.
assert self.num_listeners(event) == 1, (
'Expected exactly 1 event listener, found %d listeners' %
self.num_listeners(event))
listener = self._listeners[event][0]
args = list(event_args) + list(listener.user_args)
return listener.callback(*args)
|
chukysoria/pyspotify-connect | spotifyconnect/utils.py | EventEmitter.off | python | def off(self, event=None, listener=None):
if event is None:
events = self._listeners.keys()
else:
events = [event]
for event in events:
if listener is None:
self._listeners[event] = []
else:
self._listeners[event] = [
l for l in self._listeners[event]
if l.callback != listener] | Remove a ``listener`` that was to be called on ``event``.
If ``listener`` is :class:`None`, all listeners for the given ``event``
will be removed.
If ``event`` is :class:`None`, all listeners for all events on this
object will be removed. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/utils.py#L43-L62 | null | class EventEmitter(object):
"""Mixin for adding event emitter functionality to a class."""
def __init__(self):
self._listeners = collections.defaultdict(list)
@serialized
def on(self, event, listener, *user_args):
"""Register a ``listener`` to be called on ``event``.
The listener will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
last.
If the listener function returns :class:`False`, it is removed and will
not be called the next time the ``event`` is emitted.
"""
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args))
@serialized
def emit(self, event, *event_args):
"""Call the registered listeners for ``event``.
The listeners will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
"""
listeners = self._listeners[event][:]
for listener in listeners:
args = list(event_args) + list(listener.user_args)
result = listener.callback(*args)
if result is False:
self.off(event, listener.callback)
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
"""
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values())
def call(self, event, *event_args):
"""Call the single registered listener for ``event``.
The listener will be called with any extra arguments passed to
:meth:`call` first, and then the extra arguments passed to :meth:`on`
Raises :exc:`AssertionError` if there is none or multiple listeners for
``event``. Returns the listener's return value on success.
"""
# XXX It would be a lot better for debugging if this error was raised
# when registering the second listener instead of when the event is
# emitted.
assert self.num_listeners(event) == 1, (
'Expected exactly 1 event listener, found %d listeners' %
self.num_listeners(event))
listener = self._listeners[event][0]
args = list(event_args) + list(listener.user_args)
return listener.callback(*args)
|
chukysoria/pyspotify-connect | spotifyconnect/utils.py | EventEmitter.emit | python | def emit(self, event, *event_args):
listeners = self._listeners[event][:]
for listener in listeners:
args = list(event_args) + list(listener.user_args)
result = listener.callback(*args)
if result is False:
self.off(event, listener.callback) | Call the registered listeners for ``event``.
The listeners will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on` | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/utils.py#L64-L75 | null | class EventEmitter(object):
"""Mixin for adding event emitter functionality to a class."""
def __init__(self):
self._listeners = collections.defaultdict(list)
@serialized
def on(self, event, listener, *user_args):
"""Register a ``listener`` to be called on ``event``.
The listener will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
last.
If the listener function returns :class:`False`, it is removed and will
not be called the next time the ``event`` is emitted.
"""
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args))
@serialized
def off(self, event=None, listener=None):
"""Remove a ``listener`` that was to be called on ``event``.
If ``listener`` is :class:`None`, all listeners for the given ``event``
will be removed.
If ``event`` is :class:`None`, all listeners for all events on this
object will be removed.
"""
if event is None:
events = self._listeners.keys()
else:
events = [event]
for event in events:
if listener is None:
self._listeners[event] = []
else:
self._listeners[event] = [
l for l in self._listeners[event]
if l.callback != listener]
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
"""
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values())
def call(self, event, *event_args):
"""Call the single registered listener for ``event``.
The listener will be called with any extra arguments passed to
:meth:`call` first, and then the extra arguments passed to :meth:`on`
Raises :exc:`AssertionError` if there is none or multiple listeners for
``event``. Returns the listener's return value on success.
"""
# XXX It would be a lot better for debugging if this error was raised
# when registering the second listener instead of when the event is
# emitted.
assert self.num_listeners(event) == 1, (
'Expected exactly 1 event listener, found %d listeners' %
self.num_listeners(event))
listener = self._listeners[event][0]
args = list(event_args) + list(listener.user_args)
return listener.callback(*args)
|
chukysoria/pyspotify-connect | spotifyconnect/utils.py | EventEmitter.num_listeners | python | def num_listeners(self, event=None):
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values()) | Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/utils.py#L77-L86 | null | class EventEmitter(object):
"""Mixin for adding event emitter functionality to a class."""
def __init__(self):
self._listeners = collections.defaultdict(list)
@serialized
def on(self, event, listener, *user_args):
"""Register a ``listener`` to be called on ``event``.
The listener will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
last.
If the listener function returns :class:`False`, it is removed and will
not be called the next time the ``event`` is emitted.
"""
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args))
@serialized
def off(self, event=None, listener=None):
"""Remove a ``listener`` that was to be called on ``event``.
If ``listener`` is :class:`None`, all listeners for the given ``event``
will be removed.
If ``event`` is :class:`None`, all listeners for all events on this
object will be removed.
"""
if event is None:
events = self._listeners.keys()
else:
events = [event]
for event in events:
if listener is None:
self._listeners[event] = []
else:
self._listeners[event] = [
l for l in self._listeners[event]
if l.callback != listener]
def emit(self, event, *event_args):
"""Call the registered listeners for ``event``.
The listeners will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
"""
listeners = self._listeners[event][:]
for listener in listeners:
args = list(event_args) + list(listener.user_args)
result = listener.callback(*args)
if result is False:
self.off(event, listener.callback)
def call(self, event, *event_args):
"""Call the single registered listener for ``event``.
The listener will be called with any extra arguments passed to
:meth:`call` first, and then the extra arguments passed to :meth:`on`
Raises :exc:`AssertionError` if there is none or multiple listeners for
``event``. Returns the listener's return value on success.
"""
# XXX It would be a lot better for debugging if this error was raised
# when registering the second listener instead of when the event is
# emitted.
assert self.num_listeners(event) == 1, (
'Expected exactly 1 event listener, found %d listeners' %
self.num_listeners(event))
listener = self._listeners[event][0]
args = list(event_args) + list(listener.user_args)
return listener.callback(*args)
|
chukysoria/pyspotify-connect | spotifyconnect/utils.py | EventEmitter.call | python | def call(self, event, *event_args):
# XXX It would be a lot better for debugging if this error was raised
# when registering the second listener instead of when the event is
# emitted.
assert self.num_listeners(event) == 1, (
'Expected exactly 1 event listener, found %d listeners' %
self.num_listeners(event))
listener = self._listeners[event][0]
args = list(event_args) + list(listener.user_args)
return listener.callback(*args) | Call the single registered listener for ``event``.
The listener will be called with any extra arguments passed to
:meth:`call` first, and then the extra arguments passed to :meth:`on`
Raises :exc:`AssertionError` if there is none or multiple listeners for
``event``. Returns the listener's return value on success. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/utils.py#L88-L105 | [
"def num_listeners(self, event=None):\n \"\"\"Return the number of listeners for ``event``.\n\n Return the total number of listeners for all events on this object if\n ``event`` is :class:`None`.\n \"\"\"\n if event is not None:\n return len(self._listeners[event])\n else:\n return sum(len(l) for l in self._listeners.values())\n"
] | class EventEmitter(object):
"""Mixin for adding event emitter functionality to a class."""
def __init__(self):
self._listeners = collections.defaultdict(list)
@serialized
def on(self, event, listener, *user_args):
"""Register a ``listener`` to be called on ``event``.
The listener will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
last.
If the listener function returns :class:`False`, it is removed and will
not be called the next time the ``event`` is emitted.
"""
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args))
@serialized
def off(self, event=None, listener=None):
"""Remove a ``listener`` that was to be called on ``event``.
If ``listener`` is :class:`None`, all listeners for the given ``event``
will be removed.
If ``event`` is :class:`None`, all listeners for all events on this
object will be removed.
"""
if event is None:
events = self._listeners.keys()
else:
events = [event]
for event in events:
if listener is None:
self._listeners[event] = []
else:
self._listeners[event] = [
l for l in self._listeners[event]
if l.callback != listener]
def emit(self, event, *event_args):
"""Call the registered listeners for ``event``.
The listeners will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
"""
listeners = self._listeners[event][:]
for listener in listeners:
args = list(event_args) + list(listener.user_args)
result = listener.callback(*args)
if result is False:
self.off(event, listener.callback)
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``.
Return the total number of listeners for all events on this object if
``event`` is :class:`None`.
"""
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values())
|
chukysoria/pyspotify-connect | spotifyconnect/utils.py | IntEnum.add | python | def add(cls, name, value):
attr = cls(value)
attr._name = name
setattr(cls, name, attr) | Add a name-value pair to the enumeration. | train | https://github.com/chukysoria/pyspotify-connect/blob/bd157fa4fb2b51b3641f198a35384678c1a4fa11/spotifyconnect/utils.py#L136-L140 | null | class IntEnum(int):
"""An enum type for values mapping to integers.
Tries to stay as close as possible to the enum type specified in
:pep:`435` and introduced in Python 3.4.
"""
def __new__(cls, value):
if not hasattr(cls, '_values'):
cls._values = {}
if value not in cls._values:
cls._values[value] = int.__new__(cls, value)
return cls._values[value]
def __repr__(self):
if hasattr(self, '_name'):
return '<%s.%s: %d>' % (self.__class__.__name__, self._name, self)
else:
return '<Unknown %s: %d>' % (self.__class__.__name__, self)
@classmethod
|
wharris/dougrain | dougrain/builder.py | Builder.add_curie | python | def add_curie(self, name, href):
self.draft.set_curie(self, name, href)
return self | Adds a CURIE definition.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
This method returns self, allowing it to be chained with additional
method calls. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/builder.py#L74-L84 | [
"def set_curie(self, doc, name, href):\n doc.add_link(self.curies_rel, href, name=name, templated=True)\n",
"def set_curie(self, doc, name, href):\n # CURIE links should always be in an array, even if there is only\n # one.\n doc.o.setdefault(LINKS_KEY, {}).setdefault(self.curies_rel, [])\n doc.o[LINKS_KEY][self.curies_rel].append(\n {'href': href, 'name': name, 'templated': True})\n"
] | class Builder(object):
"""Simplify creation of HAL documents.
``Builder`` provides a lightweight chainable API for creating HAL
documents.
Unlike ``dougrain.Document``, ``Builder`` provides no facilities for
interrogating or mutating existing documents. ``Builder`` also makes fewer
sanity checks than ``dougrain.Document``, which makes it considerably
faster, but more likely to produce invalid HAL documents.
"""
def __init__(self, href, draft=drafts.LATEST, **kwargs):
"""``Builder(href, draft=drafts.LATEST, **kwargs)``
Make a builder for a document. The document starts with a ``self``
link to ``href``.
The version of the spec may be specified in the optional ``draft``
argument, which defaults to the latest draft. For example, to build a
document conforming to the older Draft 4 of the spec:
``
dougrain.Document("/test", draft=dougrain.drafts.DRAFT_4)
``
Additional properties for the self link may be supplied in other
keyword arguments. For example:
``
dougrain.Document("/test", profile="/profiles/")
``
"""
self.o = {'_links': {'self': dict(href=href, **kwargs)}}
self.draft = draft.draft
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method is used when embedding a ``Builder`` in a
``dougrain.Document``.
"""
return self.o['_links']['self']['href']
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the document.
This method is used when adding a link to a ``Builder`` from a
``dougrain.Document``.
"""
return link.Link(self.o['_links']['self'], None)
def set_property(self, name, value):
"""Set a property on the document.
If there is no property with the name in ``name``, a new property is
created with the name from ``name`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
This method returns self, allowing it to be chained with additional
method calls.
WARNING: ``name`` should not be one of the reserved property
names (``'_links'`` or ``'_embedded'``). If ``name`` is ``'_links'`` or
``'_embedded'``, this method may silently corrupt the JSON object
representation and cause undefined behaviour later.
"""
self.o[name] = value
return self
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
Unlike ``dougrain.Document.add_link``, this method does not detect
equivalence between relationship types with different representations.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``dougrain.Document`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
If ``target`` is a ``Builder`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
This method returns self, allowing it to be chained with additional
method calls.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if isinstance(target, bytes):
target = target.decode('utf-8')
if isinstance(target, str) or isinstance(target, unicode):
new_link = dict(href=target, **kwargs)
else:
new_link = dict(href=target.url(), **kwargs)
self._add_rel('_links', rel, new_link, wrap)
return self
def embed(self, rel, target, wrap=False):
"""Embeds a document inside this document.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
This method returns self, allowing it to be chained with additional
method calls.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: a ``Builder`` instance or a ``dougrain.Document``
instance that will be embedded in this document.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Unlike ``dougrain.Document.embed``, this method does not detect
equivalence between relationship types with different representations.
WARNING: ``target`` should not be identical to ``self`` or any document
that embeds ``self``.
"""
new_embed = target.as_object()
self._add_rel('_embedded', rel, new_embed, wrap)
if self.draft.automatic_link:
self.add_link(rel, target, wrap)
return self
def _add_rel(self, key, rel, thing, wrap):
"""Adds ``thing`` to links or embedded resources.
Calling code should not use this method directly and should use
``embed`` or ``add_link`` instead.
"""
self.o.setdefault(key, {})
if wrap:
self.o[key].setdefault(rel, [])
if rel not in self.o[key]:
self.o[key][rel] = thing
return
existing = self.o[key].get(rel)
if isinstance(existing, list):
existing.append(thing)
return
self.o[key][rel] = [existing, thing]
|
wharris/dougrain | dougrain/builder.py | Builder.add_link | python | def add_link(self, rel, target, wrap=False, **kwargs):
if isinstance(target, bytes):
target = target.decode('utf-8')
if isinstance(target, str) or isinstance(target, unicode):
new_link = dict(href=target, **kwargs)
else:
new_link = dict(href=target.url(), **kwargs)
self._add_rel('_links', rel, new_link, wrap)
return self | Adds a link to the document.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
Unlike ``dougrain.Document.add_link``, this method does not detect
equivalence between relationship types with different representations.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``dougrain.Document`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
If ``target`` is a ``Builder`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
This method returns self, allowing it to be chained with additional
method calls.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/builder.py#L106-L151 | [
"def url(self):\n \"\"\"Returns the URL for the resource based on the ``self`` link.\n\n This method is used when embedding a ``Builder`` in a\n ``dougrain.Document``.\n \"\"\"\n return self.o['_links']['self']['href']\n",
"def _add_rel(self, key, rel, thing, wrap):\n \"\"\"Adds ``thing`` to links or embedded resources.\n\n Calling code should not use this method directly and should use\n ``embed`` or ``add_link`` instead.\n\n \"\"\"\n self.o.setdefault(key, {})\n\n if wrap:\n self.o[key].setdefault(rel, [])\n\n if rel not in self.o[key]:\n self.o[key][rel] = thing\n return\n\n existing = self.o[key].get(rel)\n if isinstance(existing, list):\n existing.append(thing)\n return\n\n self.o[key][rel] = [existing, thing]\n"
] | class Builder(object):
"""Simplify creation of HAL documents.
``Builder`` provides a lightweight chainable API for creating HAL
documents.
Unlike ``dougrain.Document``, ``Builder`` provides no facilities for
interrogating or mutating existing documents. ``Builder`` also makes fewer
sanity checks than ``dougrain.Document``, which makes it considerably
faster, but more likely to produce invalid HAL documents.
"""
def __init__(self, href, draft=drafts.LATEST, **kwargs):
"""``Builder(href, draft=drafts.LATEST, **kwargs)``
Make a builder for a document. The document starts with a ``self``
link to ``href``.
The version of the spec may be specified in the optional ``draft``
argument, which defaults to the latest draft. For example, to build a
document conforming to the older Draft 4 of the spec:
``
dougrain.Document("/test", draft=dougrain.drafts.DRAFT_4)
``
Additional properties for the self link may be supplied in other
keyword arguments. For example:
``
dougrain.Document("/test", profile="/profiles/")
``
"""
self.o = {'_links': {'self': dict(href=href, **kwargs)}}
self.draft = draft.draft
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method is used when embedding a ``Builder`` in a
``dougrain.Document``.
"""
return self.o['_links']['self']['href']
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the document.
This method is used when adding a link to a ``Builder`` from a
``dougrain.Document``.
"""
return link.Link(self.o['_links']['self'], None)
def add_curie(self, name, href):
"""Adds a CURIE definition.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
This method returns self, allowing it to be chained with additional
method calls.
"""
self.draft.set_curie(self, name, href)
return self
def set_property(self, name, value):
"""Set a property on the document.
If there is no property with the name in ``name``, a new property is
created with the name from ``name`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
This method returns self, allowing it to be chained with additional
method calls.
WARNING: ``name`` should not be one of the reserved property
names (``'_links'`` or ``'_embedded'``). If ``name`` is ``'_links'`` or
``'_embedded'``, this method may silently corrupt the JSON object
representation and cause undefined behaviour later.
"""
self.o[name] = value
return self
def embed(self, rel, target, wrap=False):
"""Embeds a document inside this document.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
This method returns self, allowing it to be chained with additional
method calls.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: a ``Builder`` instance or a ``dougrain.Document``
instance that will be embedded in this document.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Unlike ``dougrain.Document.embed``, this method does not detect
equivalence between relationship types with different representations.
WARNING: ``target`` should not be identical to ``self`` or any document
that embeds ``self``.
"""
new_embed = target.as_object()
self._add_rel('_embedded', rel, new_embed, wrap)
if self.draft.automatic_link:
self.add_link(rel, target, wrap)
return self
def _add_rel(self, key, rel, thing, wrap):
"""Adds ``thing`` to links or embedded resources.
Calling code should not use this method directly and should use
``embed`` or ``add_link`` instead.
"""
self.o.setdefault(key, {})
if wrap:
self.o[key].setdefault(rel, [])
if rel not in self.o[key]:
self.o[key][rel] = thing
return
existing = self.o[key].get(rel)
if isinstance(existing, list):
existing.append(thing)
return
self.o[key][rel] = [existing, thing]
|
wharris/dougrain | dougrain/builder.py | Builder.embed | python | def embed(self, rel, target, wrap=False):
new_embed = target.as_object()
self._add_rel('_embedded', rel, new_embed, wrap)
if self.draft.automatic_link:
self.add_link(rel, target, wrap)
return self | Embeds a document inside this document.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
This method returns self, allowing it to be chained with additional
method calls.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: a ``Builder`` instance or a ``dougrain.Document``
instance that will be embedded in this document.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Unlike ``dougrain.Document.embed``, this method does not detect
equivalence between relationship types with different representations.
WARNING: ``target`` should not be identical to ``self`` or any document
that embeds ``self``. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/builder.py#L153-L189 | [
"def as_object(self):\n \"\"\"Returns a dictionary representing the HAL JSON document.\"\"\"\n return self.o\n",
"def add_link(self, rel, target, wrap=False, **kwargs):\n \"\"\"Adds a link to the document.\n\n This method adds a link to the given ``target`` to the document with\n the given ``rel``. If one or more links are already present for that\n link relationship type, the new link will be added to the existing\n links for that link relationship type.\n\n Unlike ``dougrain.Document.add_link``, this method does not detect\n equivalence between relationship types with different representations.\n\n If ``target`` is a string, a link is added with ``target`` as its\n ``href`` property and other properties from the keyword arguments.\n\n If ``target`` is a ``dougrain.Document`` object, a link is added with\n ``target``'s URL as its ``href`` property and other property from the\n keyword arguments.\n\n If ``target`` is a ``Builder`` object, a link is added with\n ``target``'s URL as its ``href`` property and other property from the\n keyword arguments.\n\n This method returns self, allowing it to be chained with additional\n method calls.\n\n Arguments:\n\n - ``rel``: a string specifying the link relationship type of the link.\n It should be a well-known link relation name from the IANA registry\n (http://www.iana.org/assignments/link-relations/link-relations.xml),\n a full URI, or a CURIE.\n - ``target``: the destination of the link.\n - ``wrap``: Defaults to False, but if True, specifies that the link\n object should be initally wrapped in a JSON array even if it is the\n first link for the given ``rel``.\n\n \"\"\"\n if isinstance(target, bytes):\n target = target.decode('utf-8')\n if isinstance(target, str) or isinstance(target, unicode):\n new_link = dict(href=target, **kwargs)\n else:\n new_link = dict(href=target.url(), **kwargs)\n\n self._add_rel('_links', rel, new_link, wrap)\n return self\n",
"def _add_rel(self, key, rel, thing, wrap):\n \"\"\"Adds ``thing`` to links or embedded resources.\n\n Calling code should not use this method directly and should use\n ``embed`` or ``add_link`` instead.\n\n \"\"\"\n self.o.setdefault(key, {})\n\n if wrap:\n self.o[key].setdefault(rel, [])\n\n if rel not in self.o[key]:\n self.o[key][rel] = thing\n return\n\n existing = self.o[key].get(rel)\n if isinstance(existing, list):\n existing.append(thing)\n return\n\n self.o[key][rel] = [existing, thing]\n"
] | class Builder(object):
"""Simplify creation of HAL documents.
``Builder`` provides a lightweight chainable API for creating HAL
documents.
Unlike ``dougrain.Document``, ``Builder`` provides no facilities for
interrogating or mutating existing documents. ``Builder`` also makes fewer
sanity checks than ``dougrain.Document``, which makes it considerably
faster, but more likely to produce invalid HAL documents.
"""
def __init__(self, href, draft=drafts.LATEST, **kwargs):
"""``Builder(href, draft=drafts.LATEST, **kwargs)``
Make a builder for a document. The document starts with a ``self``
link to ``href``.
The version of the spec may be specified in the optional ``draft``
argument, which defaults to the latest draft. For example, to build a
document conforming to the older Draft 4 of the spec:
``
dougrain.Document("/test", draft=dougrain.drafts.DRAFT_4)
``
Additional properties for the self link may be supplied in other
keyword arguments. For example:
``
dougrain.Document("/test", profile="/profiles/")
``
"""
self.o = {'_links': {'self': dict(href=href, **kwargs)}}
self.draft = draft.draft
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method is used when embedding a ``Builder`` in a
``dougrain.Document``.
"""
return self.o['_links']['self']['href']
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the document.
This method is used when adding a link to a ``Builder`` from a
``dougrain.Document``.
"""
return link.Link(self.o['_links']['self'], None)
def add_curie(self, name, href):
"""Adds a CURIE definition.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
This method returns self, allowing it to be chained with additional
method calls.
"""
self.draft.set_curie(self, name, href)
return self
def set_property(self, name, value):
"""Set a property on the document.
If there is no property with the name in ``name``, a new property is
created with the name from ``name`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
This method returns self, allowing it to be chained with additional
method calls.
WARNING: ``name`` should not be one of the reserved property
names (``'_links'`` or ``'_embedded'``). If ``name`` is ``'_links'`` or
``'_embedded'``, this method may silently corrupt the JSON object
representation and cause undefined behaviour later.
"""
self.o[name] = value
return self
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
Unlike ``dougrain.Document.add_link``, this method does not detect
equivalence between relationship types with different representations.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``dougrain.Document`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
If ``target`` is a ``Builder`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
This method returns self, allowing it to be chained with additional
method calls.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if isinstance(target, bytes):
target = target.decode('utf-8')
if isinstance(target, str) or isinstance(target, unicode):
new_link = dict(href=target, **kwargs)
else:
new_link = dict(href=target.url(), **kwargs)
self._add_rel('_links', rel, new_link, wrap)
return self
def _add_rel(self, key, rel, thing, wrap):
"""Adds ``thing`` to links or embedded resources.
Calling code should not use this method directly and should use
``embed`` or ``add_link`` instead.
"""
self.o.setdefault(key, {})
if wrap:
self.o[key].setdefault(rel, [])
if rel not in self.o[key]:
self.o[key][rel] = thing
return
existing = self.o[key].get(rel)
if isinstance(existing, list):
existing.append(thing)
return
self.o[key][rel] = [existing, thing]
|
wharris/dougrain | dougrain/builder.py | Builder._add_rel | python | def _add_rel(self, key, rel, thing, wrap):
self.o.setdefault(key, {})
if wrap:
self.o[key].setdefault(rel, [])
if rel not in self.o[key]:
self.o[key][rel] = thing
return
existing = self.o[key].get(rel)
if isinstance(existing, list):
existing.append(thing)
return
self.o[key][rel] = [existing, thing] | Adds ``thing`` to links or embedded resources.
Calling code should not use this method directly and should use
``embed`` or ``add_link`` instead. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/builder.py#L191-L212 | null | class Builder(object):
"""Simplify creation of HAL documents.
``Builder`` provides a lightweight chainable API for creating HAL
documents.
Unlike ``dougrain.Document``, ``Builder`` provides no facilities for
interrogating or mutating existing documents. ``Builder`` also makes fewer
sanity checks than ``dougrain.Document``, which makes it considerably
faster, but more likely to produce invalid HAL documents.
"""
def __init__(self, href, draft=drafts.LATEST, **kwargs):
"""``Builder(href, draft=drafts.LATEST, **kwargs)``
Make a builder for a document. The document starts with a ``self``
link to ``href``.
The version of the spec may be specified in the optional ``draft``
argument, which defaults to the latest draft. For example, to build a
document conforming to the older Draft 4 of the spec:
``
dougrain.Document("/test", draft=dougrain.drafts.DRAFT_4)
``
Additional properties for the self link may be supplied in other
keyword arguments. For example:
``
dougrain.Document("/test", profile="/profiles/")
``
"""
self.o = {'_links': {'self': dict(href=href, **kwargs)}}
self.draft = draft.draft
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method is used when embedding a ``Builder`` in a
``dougrain.Document``.
"""
return self.o['_links']['self']['href']
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the document.
This method is used when adding a link to a ``Builder`` from a
``dougrain.Document``.
"""
return link.Link(self.o['_links']['self'], None)
def add_curie(self, name, href):
"""Adds a CURIE definition.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
This method returns self, allowing it to be chained with additional
method calls.
"""
self.draft.set_curie(self, name, href)
return self
def set_property(self, name, value):
"""Set a property on the document.
If there is no property with the name in ``name``, a new property is
created with the name from ``name`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
This method returns self, allowing it to be chained with additional
method calls.
WARNING: ``name`` should not be one of the reserved property
names (``'_links'`` or ``'_embedded'``). If ``name`` is ``'_links'`` or
``'_embedded'``, this method may silently corrupt the JSON object
representation and cause undefined behaviour later.
"""
self.o[name] = value
return self
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
Unlike ``dougrain.Document.add_link``, this method does not detect
equivalence between relationship types with different representations.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``dougrain.Document`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
If ``target`` is a ``Builder`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
This method returns self, allowing it to be chained with additional
method calls.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if isinstance(target, bytes):
target = target.decode('utf-8')
if isinstance(target, str) or isinstance(target, unicode):
new_link = dict(href=target, **kwargs)
else:
new_link = dict(href=target.url(), **kwargs)
self._add_rel('_links', rel, new_link, wrap)
return self
def embed(self, rel, target, wrap=False):
"""Embeds a document inside this document.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
This method returns self, allowing it to be chained with additional
method calls.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: a ``Builder`` instance or a ``dougrain.Document``
instance that will be embedded in this document.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Unlike ``dougrain.Document.embed``, this method does not detect
equivalence between relationship types with different representations.
WARNING: ``target`` should not be identical to ``self`` or any document
that embeds ``self``.
"""
new_embed = target.as_object()
self._add_rel('_embedded', rel, new_embed, wrap)
if self.draft.automatic_link:
self.add_link(rel, target, wrap)
return self
|
wharris/dougrain | dougrain/document.py | mutator | python | def mutator(*cache_names):
def deco(fn):
@wraps(fn)
def _fn(self, *args, **kwargs):
try:
return fn(self, *args, **kwargs)
finally:
for cache_name in cache_names:
setattr(self, cache_name, None)
return _fn
return deco | Decorator for ``Document`` methods that change the document.
This decorator ensures that the object's caches are kept in sync
when changes are made. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L217-L235 | null | # Copyright (c) 2013 Will Harris
# See the file license.txt for copying permission.
"""
Manipulating HAL documents.
"""
from __future__ import absolute_import
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
import itertools
from collections import Mapping
from functools import wraps
import dougrain.link
link = dougrain.link
import dougrain.curie as curie
from .drafts import AUTO
from .drafts import LINKS_KEY
from .drafts import EMBEDDED_KEY
class CanonicalRels(Mapping, object):
"""Smart querying of link relationship types and link relationships.
A ``CanonicalRels`` instance is a read-only dictionary-like object that
provides smart retrieval and de-duplication by link relationship type. It
is used to make access to links and embedded resources more convenient.
In addition to well-know link relationship types (eg. ``"self"``), keys can
be custom link relationship types represented as URIs (eg.
``"http://example.com/rels/comments"``), or as URI references (eg.
``"/rels/comments"``), or as CURIEs (eg. ``"rel:comments"``).
``CanonicalRels`` treats custom link relationship types as equivalent if
they expand to the same URI, called the canonical key here. Given a
suitable base URI and set of CURIE templates,
``http://example.com/rels/comments``, ``/rels/comments``, and
``rel:comments`` are all equivalent.
``CanonicalRels`` de-duplicates items with equivalent keys. De-duplication
is achieved by appending the new values to the existing values for the
canonical key. So, ``{"/rels/spam":eggs,"rel:spam":ham}`` becomes
``{"http://example.com/rels/spam":[eggs,ham]}``.
Values can be retrieved using any key that is equivalent to the item's
canonical key.
"""
def __init__(self, rels, curies, base_uri, item_filter=lambda _: True):
"""Create a ``CanonicalRels`` instance.
Arguments:
- ``rels``: the relationships to be queried. ``rels`` should be
a sequence of ``(key, value)`` tuples or an object
with an ``items`` method that returns such a
sequence (such as a dictionary). For each tuple in
the sequence, ``key`` is a string that identifies
the link relationship type and ``value`` is the
target of the relationship or a sequence of targets
of the relationship.
- ``curies``: a ``CurieCollection`` used to expand CURIE keys.
- ``base_uri``: URL used as the basis when expanding keys that are
relative URI references.
- ``item_filter``: optional filter on target relationships.
``item_filter`` should be a callable that accepts a
target relationship and returns False if the target
relationship should be excluded. ``item_filter``
will be called once with each target relationship.
"""
if hasattr(rels, 'items'):
items = rels.items()
else:
items = rels
self.curies = curies
self.base_uri = base_uri
self.rels = {}
for key, value in items:
canonical_key = self.canonical_key(key)
if not canonical_key in self.rels:
self.rels[canonical_key] = (key, value)
continue
original_key, current_value = self.rels[canonical_key]
new_value = [item for item in current_value if item_filter(item)]
new_value.extend(item for item in value if item_filter(item))
self.rels[canonical_key] = original_key, new_value
self.rels = self.rels
def canonical_key(self, key):
"""Returns the canonical key for the given ``key``."""
if key.startswith('/'):
return urlparse.urljoin(self.base_uri, key)
else:
return self.curies.expand(key)
def original_key(self, key):
"""Returns the first key seen for the given ``key``."""
return self.rels[self.canonical_key(key)][0]
def __getitem__(self, key):
"""Returns the link relationship that match the given ``key``.
``self[key]`` will return any link relationship who's key is equivalent
to ``key``. Keys are equivalent if their canonical keys are equal.
If there is more than one link relationship that matches ``key``, a
list of matching link relationships is returned.
If there is one link relationship that matches ``key``, that link
relationship is returned.
If there are no link relationships that match ``key``, a ``KeyError``
is thrown.
"""
return self.rels[self.canonical_key(key)][1]
def __iter__(self):
return iter(self.rels)
def __len__(self):
return len(self.rels)
def __contains__(self, key):
"""Returns ``True`` if there are any link relationships for for
``self[key].``
"""
return self.canonical_key(key) in self.rels
def keys(self):
"""Returns a list of keys that map to every item.
Each key returned is an original key. That is, the first key
encountered for the canonical key.
"""
return [original_key for original_key, _ in self.rels.values()]
class Relationships(Mapping, object):
"""Merged view of relationships from a HAL document.
Relationships, that is links and embedded resources, are presented as a
dictionary-like object mapping the full URI of the link relationship type
to a list of relationships.
If there are both embedded resources and links for the same link relation
type, the embedded resources will appear before the links. Otherwise,
relationships are presented in the order they appear in their respective
collection.
Relationships are de-duplicated by their URL, as defined by their ``self``
link in the case of embedded resources and by their ``href`` in the case of
links. Only the first relationship with that URL will be included.
"""
def __init__(self, links, embedded, curies, base_uri):
"""Initialize a ``Relationships`` object.
Parameters:
- ``links``: a dictionary mapping a link relationship type to a
``Link`` instance or a ``list`` of ``Link``
instances.
- ``embedded``: a dictionary mapping a link relationship type to a
``Document`` instance or a ``list`` of ``Document``
instances.
- ``curies``: a ``CurieCollection`` instance used to expand
link relationship type into full link relationship type
URLs.
"""
rels = itertools.chain(embedded.items(), links.items())
existing_urls = set()
def item_filter(item):
url = item.url()
if url is not None and url in existing_urls:
return False
existing_urls.add(item.url())
return True
self.canonical_rels = CanonicalRels(rels,
curies,
base_uri,
item_filter)
def __getitem__(self, key):
value = self.canonical_rels.__getitem__(key)
if not isinstance(value, list):
value = [value]
return value
def __iter__(self):
return iter(self.rels)
def __len__(self):
return len(self.rels)
def keys(self):
return self.canonical_rels.keys()
class Document(object):
"""Represents the document for a HAL resource.
Constructors:
- ``Document.empty(base_uri=None)``:
returns an empty ``Document``.
- ``Document.from_object(o, base_uri=None, parent_curies=None)``:
returns a new ``Document`` based on a JSON object.
Public Instance Attributes:
- ``properties``: ``dict`` containing the properties of the HAL document,
excluding ``_links`` and ``_embedded``. ``properties``
should be treated as read-only.
- ``links``: ``dict`` containing the document's links, excluding
``curies``. Each link relationship type is mapped to a
``Link`` instance or a list of ``Link`` instances. ``links``
should be treated as read-only.
- ``embedded``: dictionary containing the document's embedded resources.
Each link relationship type is mapped to a ``Document``
instance.
- ``rels``: a ``Relationships`` instance holding a merged view of the
relationships from the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec to
which the document should conform. Defaults to
``drafts.AUTO``.
"""
def __init__(self, o, base_uri, parent_curies=None, draft=AUTO):
self.prepare_cache()
self.o = o
self.base_uri = base_uri
self.parent_curies = parent_curies
self.draft = draft.detect(o)
RESERVED_ATTRIBUTE_NAMES = (LINKS_KEY, EMBEDDED_KEY)
def properties_cache(self):
properties = dict(self.o)
for name in self.RESERVED_ATTRIBUTE_NAMES:
properties[name] = None
del properties[name]
return properties
def links_cache(self):
links = {}
links_json = self.o.get(LINKS_KEY, {})
for key, value in links_json.items():
if key == self.draft.curies_rel:
continue
links[key] = link.Link.from_object(value, self.base_uri)
return CanonicalRels(links, self.curies, self.base_uri)
def curies_cache(self):
result = curie.CurieCollection()
if self.parent_curies is not None:
result.update(self.parent_curies)
links_json = self.o.get('_links', {})
curies_json = links_json.get(self.draft.curies_rel)
if not curies_json:
return result
curies = link.Link.from_object(curies_json, self.base_uri)
if not isinstance(curies, list):
curies = [curies]
for curie_link in curies:
result[curie_link.name] = curie_link
return result
def embedded_cache(self):
embedded = {}
for key, value in self.o.get(EMBEDDED_KEY, {}).items():
embedded[key] = self.from_object(value,
self.base_uri,
self.curies)
return CanonicalRels(embedded, self.curies, self.base_uri)
def rels_cache(self):
return Relationships(self.links, self.embedded, self.curies,
self.base_uri)
def prepare_cache(self):
self._properties_cache = None
self._curies_cache = None
self._links_cache = None
self._embedded_cache = None
self._rels_cache = None
@property
def properties(self):
if self._properties_cache is None:
self._properties_cache = self.properties_cache()
return self._properties_cache
@property
def curies(self):
if self._curies_cache is None:
self._curies_cache = self.curies_cache()
return self._curies_cache
@property
def links(self):
if self._links_cache is None:
self._links_cache = self.links_cache()
return self._links_cache
@property
def embedded(self):
if self._embedded_cache is None:
self._embedded_cache = self.embedded_cache()
return self._embedded_cache
@property
def rels(self):
if self._rels_cache is None:
self._rels_cache = self.rels_cache()
return self._rels_cache
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method returns the ``href`` of the document's ``self`` link if it
has one, or ``None`` if the document lacks a ``self`` link, or the
``href`` of the document's first ``self`` link if it has more than one.
"""
if not 'self' in self.links:
return None
self_link = self.links['self']
if isinstance(self_link, list):
for link in self_link:
return link.url()
return self_link.url()
def expand_curie(self, link):
"""Returns the expansion of a CURIE value.
Arguments:
- ``link``: a string holding a curie value to expand.
This method attempts to expand ``link`` using the document's ``curies``
collection (see ``curie.CurieCollection.expand``).
"""
return self.curies.expand(link)
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the resource."""
return self.links['self']
@mutator('_properties_cache')
def set_property(self, key, value):
"""Set a property on the document.
Calling code should use this method to add and modify properties
on the document instead of modifying ``properties`` directly.
If ``key`` is ``"_links"`` or ``"_embedded"`` this method will silently
fail.
If there is no property with the name in ``key``, a new property is
created with the name from ``key`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
return
self.o[key] = value
@mutator('_properties_cache')
def delete_property(self, key):
"""Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key]
def link(self, href, **kwargs):
"""Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri)
@mutator('_links_cache')
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
Calling code should use this method to add links instead of
modifying ``links`` directly.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``Link`` object, it is added to the document and the
keyword arguments are ignored.
If ``target`` is a ``Document`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if hasattr(target, 'as_link'):
link = target.as_link()
else:
link = self.link(target, **kwargs)
links = self.o.setdefault(LINKS_KEY, {})
new_link = link.as_object()
collected_links = CanonicalRels(links, self.curies, self.base_uri)
if rel not in collected_links:
if wrap:
links[rel] = [new_link]
else:
links[rel] = new_link
return
original_rel = collected_links.original_key(rel)
current_links = links[original_rel]
if isinstance(current_links, list):
current_links.append(new_link)
else:
links[original_rel] = [current_links, new_link]
@mutator('_links_cache')
def delete_link(self, rel=None, href=lambda _: True):
"""Deletes links from the document.
Calling code should use this method to remove links instead of
modyfying ``links`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
links that will be deleted. If neither of the optional arguments are
given, this method deletes every link in the document. If ``rel`` is
given, only links for the matching link relationship type are deleted.
If ``href`` is given, only links with a matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only links with matching
``href`` for the matching link relationship type are delted.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the links to be deleted.
- ``href``: optionally, a string specifying the ``href`` of the links
to be deleted, or a callable that returns true when its
single argument is in the set of ``href``s to be deleted.
"""
if not LINKS_KEY in self.o:
return
links = self.o[LINKS_KEY]
if rel is None:
for rel in list(links.keys()):
self.delete_link(rel, href)
return
if callable(href):
href_filter = href
else:
href_filter = lambda x: x == href
links_for_rel = links.setdefault(rel, [])
if isinstance(links_for_rel, dict):
links_for_rel = [links_for_rel]
new_links_for_rel = []
for link in links_for_rel:
if not href_filter(link['href']):
new_links_for_rel.append(link)
if new_links_for_rel:
if len(new_links_for_rel) == 1:
new_links_for_rel = new_links_for_rel[0]
links[rel] = new_links_for_rel
else:
del links[rel]
if not self.o[LINKS_KEY]:
del self.o[LINKS_KEY]
@classmethod
def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):
"""Returns a new ``Document`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Document``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``parent_curies``: optional ``CurieCollection`` instance holding the
CURIEs of the parent document in which the new
document is to be embedded. Calling code should
not normally provide this argument.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
if isinstance(o, list):
return [cls.from_object(x, base_uri, parent_curies, draft)
for x in o]
return cls(o, base_uri, parent_curies, draft)
@classmethod
def empty(cls, base_uri=None, draft=AUTO):
"""Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
return cls.from_object({}, base_uri=base_uri, draft=draft)
@mutator('_embedded_cache')
def embed(self, rel, other, wrap=False):
"""Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
"""
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap)
@mutator('_embedded_cache')
def delete_embedded(self, rel=None, href=lambda _: True):
"""Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
"""
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
@mutator('_curies_cache')
def set_curie(self, name, href):
"""Sets a CURIE.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
"""
self.draft.set_curie(self, name, href)
@mutator('_curies_cache')
def drop_curie(self, name):
"""Removes a CURIE.
The CURIE link with the given name is removed from the document.
"""
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies['name'] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie['name'] == name:
del curies[i]
break
continue
def __iter__(self):
yield self
def __eq__(self, other):
if not isinstance(other, Document):
return False
return self.as_object() == other.as_object()
def __repr__(self):
return "<Document %r>" % self.url()
|
wharris/dougrain | dougrain/document.py | CanonicalRels.canonical_key | python | def canonical_key(self, key):
if key.startswith('/'):
return urlparse.urljoin(self.base_uri, key)
else:
return self.curies.expand(key) | Returns the canonical key for the given ``key``. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L99-L104 | null | class CanonicalRels(Mapping, object):
"""Smart querying of link relationship types and link relationships.
A ``CanonicalRels`` instance is a read-only dictionary-like object that
provides smart retrieval and de-duplication by link relationship type. It
is used to make access to links and embedded resources more convenient.
In addition to well-know link relationship types (eg. ``"self"``), keys can
be custom link relationship types represented as URIs (eg.
``"http://example.com/rels/comments"``), or as URI references (eg.
``"/rels/comments"``), or as CURIEs (eg. ``"rel:comments"``).
``CanonicalRels`` treats custom link relationship types as equivalent if
they expand to the same URI, called the canonical key here. Given a
suitable base URI and set of CURIE templates,
``http://example.com/rels/comments``, ``/rels/comments``, and
``rel:comments`` are all equivalent.
``CanonicalRels`` de-duplicates items with equivalent keys. De-duplication
is achieved by appending the new values to the existing values for the
canonical key. So, ``{"/rels/spam":eggs,"rel:spam":ham}`` becomes
``{"http://example.com/rels/spam":[eggs,ham]}``.
Values can be retrieved using any key that is equivalent to the item's
canonical key.
"""
def __init__(self, rels, curies, base_uri, item_filter=lambda _: True):
"""Create a ``CanonicalRels`` instance.
Arguments:
- ``rels``: the relationships to be queried. ``rels`` should be
a sequence of ``(key, value)`` tuples or an object
with an ``items`` method that returns such a
sequence (such as a dictionary). For each tuple in
the sequence, ``key`` is a string that identifies
the link relationship type and ``value`` is the
target of the relationship or a sequence of targets
of the relationship.
- ``curies``: a ``CurieCollection`` used to expand CURIE keys.
- ``base_uri``: URL used as the basis when expanding keys that are
relative URI references.
- ``item_filter``: optional filter on target relationships.
``item_filter`` should be a callable that accepts a
target relationship and returns False if the target
relationship should be excluded. ``item_filter``
will be called once with each target relationship.
"""
if hasattr(rels, 'items'):
items = rels.items()
else:
items = rels
self.curies = curies
self.base_uri = base_uri
self.rels = {}
for key, value in items:
canonical_key = self.canonical_key(key)
if not canonical_key in self.rels:
self.rels[canonical_key] = (key, value)
continue
original_key, current_value = self.rels[canonical_key]
new_value = [item for item in current_value if item_filter(item)]
new_value.extend(item for item in value if item_filter(item))
self.rels[canonical_key] = original_key, new_value
self.rels = self.rels
def original_key(self, key):
"""Returns the first key seen for the given ``key``."""
return self.rels[self.canonical_key(key)][0]
def __getitem__(self, key):
"""Returns the link relationship that match the given ``key``.
``self[key]`` will return any link relationship who's key is equivalent
to ``key``. Keys are equivalent if their canonical keys are equal.
If there is more than one link relationship that matches ``key``, a
list of matching link relationships is returned.
If there is one link relationship that matches ``key``, that link
relationship is returned.
If there are no link relationships that match ``key``, a ``KeyError``
is thrown.
"""
return self.rels[self.canonical_key(key)][1]
def __iter__(self):
return iter(self.rels)
def __len__(self):
return len(self.rels)
def __contains__(self, key):
"""Returns ``True`` if there are any link relationships for for
``self[key].``
"""
return self.canonical_key(key) in self.rels
def keys(self):
"""Returns a list of keys that map to every item.
Each key returned is an original key. That is, the first key
encountered for the canonical key.
"""
return [original_key for original_key, _ in self.rels.values()]
|
wharris/dougrain | dougrain/document.py | Document.url | python | def url(self):
if not 'self' in self.links:
return None
self_link = self.links['self']
if isinstance(self_link, list):
for link in self_link:
return link.url()
return self_link.url() | Returns the URL for the resource based on the ``self`` link.
This method returns the ``href`` of the document's ``self`` link if it
has one, or ``None`` if the document lacks a ``self`` link, or the
``href`` of the document's first ``self`` link if it has more than one. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L366-L383 | null | class Document(object):
"""Represents the document for a HAL resource.
Constructors:
- ``Document.empty(base_uri=None)``:
returns an empty ``Document``.
- ``Document.from_object(o, base_uri=None, parent_curies=None)``:
returns a new ``Document`` based on a JSON object.
Public Instance Attributes:
- ``properties``: ``dict`` containing the properties of the HAL document,
excluding ``_links`` and ``_embedded``. ``properties``
should be treated as read-only.
- ``links``: ``dict`` containing the document's links, excluding
``curies``. Each link relationship type is mapped to a
``Link`` instance or a list of ``Link`` instances. ``links``
should be treated as read-only.
- ``embedded``: dictionary containing the document's embedded resources.
Each link relationship type is mapped to a ``Document``
instance.
- ``rels``: a ``Relationships`` instance holding a merged view of the
relationships from the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec to
which the document should conform. Defaults to
``drafts.AUTO``.
"""
def __init__(self, o, base_uri, parent_curies=None, draft=AUTO):
self.prepare_cache()
self.o = o
self.base_uri = base_uri
self.parent_curies = parent_curies
self.draft = draft.detect(o)
RESERVED_ATTRIBUTE_NAMES = (LINKS_KEY, EMBEDDED_KEY)
def properties_cache(self):
properties = dict(self.o)
for name in self.RESERVED_ATTRIBUTE_NAMES:
properties[name] = None
del properties[name]
return properties
def links_cache(self):
links = {}
links_json = self.o.get(LINKS_KEY, {})
for key, value in links_json.items():
if key == self.draft.curies_rel:
continue
links[key] = link.Link.from_object(value, self.base_uri)
return CanonicalRels(links, self.curies, self.base_uri)
def curies_cache(self):
result = curie.CurieCollection()
if self.parent_curies is not None:
result.update(self.parent_curies)
links_json = self.o.get('_links', {})
curies_json = links_json.get(self.draft.curies_rel)
if not curies_json:
return result
curies = link.Link.from_object(curies_json, self.base_uri)
if not isinstance(curies, list):
curies = [curies]
for curie_link in curies:
result[curie_link.name] = curie_link
return result
def embedded_cache(self):
embedded = {}
for key, value in self.o.get(EMBEDDED_KEY, {}).items():
embedded[key] = self.from_object(value,
self.base_uri,
self.curies)
return CanonicalRels(embedded, self.curies, self.base_uri)
def rels_cache(self):
return Relationships(self.links, self.embedded, self.curies,
self.base_uri)
def prepare_cache(self):
self._properties_cache = None
self._curies_cache = None
self._links_cache = None
self._embedded_cache = None
self._rels_cache = None
@property
def properties(self):
if self._properties_cache is None:
self._properties_cache = self.properties_cache()
return self._properties_cache
@property
def curies(self):
if self._curies_cache is None:
self._curies_cache = self.curies_cache()
return self._curies_cache
@property
def links(self):
if self._links_cache is None:
self._links_cache = self.links_cache()
return self._links_cache
@property
def embedded(self):
if self._embedded_cache is None:
self._embedded_cache = self.embedded_cache()
return self._embedded_cache
@property
def rels(self):
if self._rels_cache is None:
self._rels_cache = self.rels_cache()
return self._rels_cache
def expand_curie(self, link):
"""Returns the expansion of a CURIE value.
Arguments:
- ``link``: a string holding a curie value to expand.
This method attempts to expand ``link`` using the document's ``curies``
collection (see ``curie.CurieCollection.expand``).
"""
return self.curies.expand(link)
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the resource."""
return self.links['self']
@mutator('_properties_cache')
def set_property(self, key, value):
"""Set a property on the document.
Calling code should use this method to add and modify properties
on the document instead of modifying ``properties`` directly.
If ``key`` is ``"_links"`` or ``"_embedded"`` this method will silently
fail.
If there is no property with the name in ``key``, a new property is
created with the name from ``key`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
return
self.o[key] = value
@mutator('_properties_cache')
def delete_property(self, key):
"""Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key]
def link(self, href, **kwargs):
"""Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri)
@mutator('_links_cache')
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
Calling code should use this method to add links instead of
modifying ``links`` directly.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``Link`` object, it is added to the document and the
keyword arguments are ignored.
If ``target`` is a ``Document`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if hasattr(target, 'as_link'):
link = target.as_link()
else:
link = self.link(target, **kwargs)
links = self.o.setdefault(LINKS_KEY, {})
new_link = link.as_object()
collected_links = CanonicalRels(links, self.curies, self.base_uri)
if rel not in collected_links:
if wrap:
links[rel] = [new_link]
else:
links[rel] = new_link
return
original_rel = collected_links.original_key(rel)
current_links = links[original_rel]
if isinstance(current_links, list):
current_links.append(new_link)
else:
links[original_rel] = [current_links, new_link]
@mutator('_links_cache')
def delete_link(self, rel=None, href=lambda _: True):
"""Deletes links from the document.
Calling code should use this method to remove links instead of
modyfying ``links`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
links that will be deleted. If neither of the optional arguments are
given, this method deletes every link in the document. If ``rel`` is
given, only links for the matching link relationship type are deleted.
If ``href`` is given, only links with a matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only links with matching
``href`` for the matching link relationship type are delted.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the links to be deleted.
- ``href``: optionally, a string specifying the ``href`` of the links
to be deleted, or a callable that returns true when its
single argument is in the set of ``href``s to be deleted.
"""
if not LINKS_KEY in self.o:
return
links = self.o[LINKS_KEY]
if rel is None:
for rel in list(links.keys()):
self.delete_link(rel, href)
return
if callable(href):
href_filter = href
else:
href_filter = lambda x: x == href
links_for_rel = links.setdefault(rel, [])
if isinstance(links_for_rel, dict):
links_for_rel = [links_for_rel]
new_links_for_rel = []
for link in links_for_rel:
if not href_filter(link['href']):
new_links_for_rel.append(link)
if new_links_for_rel:
if len(new_links_for_rel) == 1:
new_links_for_rel = new_links_for_rel[0]
links[rel] = new_links_for_rel
else:
del links[rel]
if not self.o[LINKS_KEY]:
del self.o[LINKS_KEY]
@classmethod
def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):
"""Returns a new ``Document`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Document``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``parent_curies``: optional ``CurieCollection`` instance holding the
CURIEs of the parent document in which the new
document is to be embedded. Calling code should
not normally provide this argument.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
if isinstance(o, list):
return [cls.from_object(x, base_uri, parent_curies, draft)
for x in o]
return cls(o, base_uri, parent_curies, draft)
@classmethod
def empty(cls, base_uri=None, draft=AUTO):
"""Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
return cls.from_object({}, base_uri=base_uri, draft=draft)
@mutator('_embedded_cache')
def embed(self, rel, other, wrap=False):
"""Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
"""
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap)
@mutator('_embedded_cache')
def delete_embedded(self, rel=None, href=lambda _: True):
"""Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
"""
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
@mutator('_curies_cache')
def set_curie(self, name, href):
"""Sets a CURIE.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
"""
self.draft.set_curie(self, name, href)
@mutator('_curies_cache')
def drop_curie(self, name):
"""Removes a CURIE.
The CURIE link with the given name is removed from the document.
"""
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies['name'] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie['name'] == name:
del curies[i]
break
continue
def __iter__(self):
yield self
def __eq__(self, other):
if not isinstance(other, Document):
return False
return self.as_object() == other.as_object()
def __repr__(self):
return "<Document %r>" % self.url()
|
wharris/dougrain | dougrain/document.py | Document.set_property | python | def set_property(self, key, value):
if key in self.RESERVED_ATTRIBUTE_NAMES:
return
self.o[key] = value | Set a property on the document.
Calling code should use this method to add and modify properties
on the document instead of modifying ``properties`` directly.
If ``key`` is ``"_links"`` or ``"_embedded"`` this method will silently
fail.
If there is no property with the name in ``key``, a new property is
created with the name from ``key`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L406-L423 | null | class Document(object):
"""Represents the document for a HAL resource.
Constructors:
- ``Document.empty(base_uri=None)``:
returns an empty ``Document``.
- ``Document.from_object(o, base_uri=None, parent_curies=None)``:
returns a new ``Document`` based on a JSON object.
Public Instance Attributes:
- ``properties``: ``dict`` containing the properties of the HAL document,
excluding ``_links`` and ``_embedded``. ``properties``
should be treated as read-only.
- ``links``: ``dict`` containing the document's links, excluding
``curies``. Each link relationship type is mapped to a
``Link`` instance or a list of ``Link`` instances. ``links``
should be treated as read-only.
- ``embedded``: dictionary containing the document's embedded resources.
Each link relationship type is mapped to a ``Document``
instance.
- ``rels``: a ``Relationships`` instance holding a merged view of the
relationships from the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec to
which the document should conform. Defaults to
``drafts.AUTO``.
"""
def __init__(self, o, base_uri, parent_curies=None, draft=AUTO):
self.prepare_cache()
self.o = o
self.base_uri = base_uri
self.parent_curies = parent_curies
self.draft = draft.detect(o)
RESERVED_ATTRIBUTE_NAMES = (LINKS_KEY, EMBEDDED_KEY)
def properties_cache(self):
properties = dict(self.o)
for name in self.RESERVED_ATTRIBUTE_NAMES:
properties[name] = None
del properties[name]
return properties
def links_cache(self):
links = {}
links_json = self.o.get(LINKS_KEY, {})
for key, value in links_json.items():
if key == self.draft.curies_rel:
continue
links[key] = link.Link.from_object(value, self.base_uri)
return CanonicalRels(links, self.curies, self.base_uri)
def curies_cache(self):
result = curie.CurieCollection()
if self.parent_curies is not None:
result.update(self.parent_curies)
links_json = self.o.get('_links', {})
curies_json = links_json.get(self.draft.curies_rel)
if not curies_json:
return result
curies = link.Link.from_object(curies_json, self.base_uri)
if not isinstance(curies, list):
curies = [curies]
for curie_link in curies:
result[curie_link.name] = curie_link
return result
def embedded_cache(self):
embedded = {}
for key, value in self.o.get(EMBEDDED_KEY, {}).items():
embedded[key] = self.from_object(value,
self.base_uri,
self.curies)
return CanonicalRels(embedded, self.curies, self.base_uri)
def rels_cache(self):
return Relationships(self.links, self.embedded, self.curies,
self.base_uri)
def prepare_cache(self):
self._properties_cache = None
self._curies_cache = None
self._links_cache = None
self._embedded_cache = None
self._rels_cache = None
@property
def properties(self):
if self._properties_cache is None:
self._properties_cache = self.properties_cache()
return self._properties_cache
@property
def curies(self):
if self._curies_cache is None:
self._curies_cache = self.curies_cache()
return self._curies_cache
@property
def links(self):
if self._links_cache is None:
self._links_cache = self.links_cache()
return self._links_cache
@property
def embedded(self):
if self._embedded_cache is None:
self._embedded_cache = self.embedded_cache()
return self._embedded_cache
@property
def rels(self):
if self._rels_cache is None:
self._rels_cache = self.rels_cache()
return self._rels_cache
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method returns the ``href`` of the document's ``self`` link if it
has one, or ``None`` if the document lacks a ``self`` link, or the
``href`` of the document's first ``self`` link if it has more than one.
"""
if not 'self' in self.links:
return None
self_link = self.links['self']
if isinstance(self_link, list):
for link in self_link:
return link.url()
return self_link.url()
def expand_curie(self, link):
"""Returns the expansion of a CURIE value.
Arguments:
- ``link``: a string holding a curie value to expand.
This method attempts to expand ``link`` using the document's ``curies``
collection (see ``curie.CurieCollection.expand``).
"""
return self.curies.expand(link)
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the resource."""
return self.links['self']
@mutator('_properties_cache')
@mutator('_properties_cache')
def delete_property(self, key):
"""Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key]
def link(self, href, **kwargs):
"""Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri)
@mutator('_links_cache')
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
Calling code should use this method to add links instead of
modifying ``links`` directly.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``Link`` object, it is added to the document and the
keyword arguments are ignored.
If ``target`` is a ``Document`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if hasattr(target, 'as_link'):
link = target.as_link()
else:
link = self.link(target, **kwargs)
links = self.o.setdefault(LINKS_KEY, {})
new_link = link.as_object()
collected_links = CanonicalRels(links, self.curies, self.base_uri)
if rel not in collected_links:
if wrap:
links[rel] = [new_link]
else:
links[rel] = new_link
return
original_rel = collected_links.original_key(rel)
current_links = links[original_rel]
if isinstance(current_links, list):
current_links.append(new_link)
else:
links[original_rel] = [current_links, new_link]
@mutator('_links_cache')
def delete_link(self, rel=None, href=lambda _: True):
"""Deletes links from the document.
Calling code should use this method to remove links instead of
modyfying ``links`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
links that will be deleted. If neither of the optional arguments are
given, this method deletes every link in the document. If ``rel`` is
given, only links for the matching link relationship type are deleted.
If ``href`` is given, only links with a matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only links with matching
``href`` for the matching link relationship type are delted.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the links to be deleted.
- ``href``: optionally, a string specifying the ``href`` of the links
to be deleted, or a callable that returns true when its
single argument is in the set of ``href``s to be deleted.
"""
if not LINKS_KEY in self.o:
return
links = self.o[LINKS_KEY]
if rel is None:
for rel in list(links.keys()):
self.delete_link(rel, href)
return
if callable(href):
href_filter = href
else:
href_filter = lambda x: x == href
links_for_rel = links.setdefault(rel, [])
if isinstance(links_for_rel, dict):
links_for_rel = [links_for_rel]
new_links_for_rel = []
for link in links_for_rel:
if not href_filter(link['href']):
new_links_for_rel.append(link)
if new_links_for_rel:
if len(new_links_for_rel) == 1:
new_links_for_rel = new_links_for_rel[0]
links[rel] = new_links_for_rel
else:
del links[rel]
if not self.o[LINKS_KEY]:
del self.o[LINKS_KEY]
@classmethod
def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):
"""Returns a new ``Document`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Document``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``parent_curies``: optional ``CurieCollection`` instance holding the
CURIEs of the parent document in which the new
document is to be embedded. Calling code should
not normally provide this argument.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
if isinstance(o, list):
return [cls.from_object(x, base_uri, parent_curies, draft)
for x in o]
return cls(o, base_uri, parent_curies, draft)
@classmethod
def empty(cls, base_uri=None, draft=AUTO):
"""Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
return cls.from_object({}, base_uri=base_uri, draft=draft)
@mutator('_embedded_cache')
def embed(self, rel, other, wrap=False):
"""Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
"""
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap)
@mutator('_embedded_cache')
def delete_embedded(self, rel=None, href=lambda _: True):
"""Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
"""
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
@mutator('_curies_cache')
def set_curie(self, name, href):
"""Sets a CURIE.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
"""
self.draft.set_curie(self, name, href)
@mutator('_curies_cache')
def drop_curie(self, name):
"""Removes a CURIE.
The CURIE link with the given name is removed from the document.
"""
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies['name'] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie['name'] == name:
del curies[i]
break
continue
def __iter__(self):
yield self
def __eq__(self, other):
if not isinstance(other, Document):
return False
return self.as_object() == other.as_object()
def __repr__(self):
return "<Document %r>" % self.url()
|
wharris/dougrain | dougrain/document.py | Document.delete_property | python | def delete_property(self, key):
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key] | Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L426-L438 | null | class Document(object):
"""Represents the document for a HAL resource.
Constructors:
- ``Document.empty(base_uri=None)``:
returns an empty ``Document``.
- ``Document.from_object(o, base_uri=None, parent_curies=None)``:
returns a new ``Document`` based on a JSON object.
Public Instance Attributes:
- ``properties``: ``dict`` containing the properties of the HAL document,
excluding ``_links`` and ``_embedded``. ``properties``
should be treated as read-only.
- ``links``: ``dict`` containing the document's links, excluding
``curies``. Each link relationship type is mapped to a
``Link`` instance or a list of ``Link`` instances. ``links``
should be treated as read-only.
- ``embedded``: dictionary containing the document's embedded resources.
Each link relationship type is mapped to a ``Document``
instance.
- ``rels``: a ``Relationships`` instance holding a merged view of the
relationships from the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec to
which the document should conform. Defaults to
``drafts.AUTO``.
"""
def __init__(self, o, base_uri, parent_curies=None, draft=AUTO):
self.prepare_cache()
self.o = o
self.base_uri = base_uri
self.parent_curies = parent_curies
self.draft = draft.detect(o)
RESERVED_ATTRIBUTE_NAMES = (LINKS_KEY, EMBEDDED_KEY)
def properties_cache(self):
properties = dict(self.o)
for name in self.RESERVED_ATTRIBUTE_NAMES:
properties[name] = None
del properties[name]
return properties
def links_cache(self):
links = {}
links_json = self.o.get(LINKS_KEY, {})
for key, value in links_json.items():
if key == self.draft.curies_rel:
continue
links[key] = link.Link.from_object(value, self.base_uri)
return CanonicalRels(links, self.curies, self.base_uri)
def curies_cache(self):
result = curie.CurieCollection()
if self.parent_curies is not None:
result.update(self.parent_curies)
links_json = self.o.get('_links', {})
curies_json = links_json.get(self.draft.curies_rel)
if not curies_json:
return result
curies = link.Link.from_object(curies_json, self.base_uri)
if not isinstance(curies, list):
curies = [curies]
for curie_link in curies:
result[curie_link.name] = curie_link
return result
def embedded_cache(self):
embedded = {}
for key, value in self.o.get(EMBEDDED_KEY, {}).items():
embedded[key] = self.from_object(value,
self.base_uri,
self.curies)
return CanonicalRels(embedded, self.curies, self.base_uri)
def rels_cache(self):
return Relationships(self.links, self.embedded, self.curies,
self.base_uri)
def prepare_cache(self):
self._properties_cache = None
self._curies_cache = None
self._links_cache = None
self._embedded_cache = None
self._rels_cache = None
@property
def properties(self):
if self._properties_cache is None:
self._properties_cache = self.properties_cache()
return self._properties_cache
@property
def curies(self):
if self._curies_cache is None:
self._curies_cache = self.curies_cache()
return self._curies_cache
@property
def links(self):
if self._links_cache is None:
self._links_cache = self.links_cache()
return self._links_cache
@property
def embedded(self):
if self._embedded_cache is None:
self._embedded_cache = self.embedded_cache()
return self._embedded_cache
@property
def rels(self):
if self._rels_cache is None:
self._rels_cache = self.rels_cache()
return self._rels_cache
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method returns the ``href`` of the document's ``self`` link if it
has one, or ``None`` if the document lacks a ``self`` link, or the
``href`` of the document's first ``self`` link if it has more than one.
"""
if not 'self' in self.links:
return None
self_link = self.links['self']
if isinstance(self_link, list):
for link in self_link:
return link.url()
return self_link.url()
def expand_curie(self, link):
"""Returns the expansion of a CURIE value.
Arguments:
- ``link``: a string holding a curie value to expand.
This method attempts to expand ``link`` using the document's ``curies``
collection (see ``curie.CurieCollection.expand``).
"""
return self.curies.expand(link)
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the resource."""
return self.links['self']
@mutator('_properties_cache')
def set_property(self, key, value):
"""Set a property on the document.
Calling code should use this method to add and modify properties
on the document instead of modifying ``properties`` directly.
If ``key`` is ``"_links"`` or ``"_embedded"`` this method will silently
fail.
If there is no property with the name in ``key``, a new property is
created with the name from ``key`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
return
self.o[key] = value
@mutator('_properties_cache')
def link(self, href, **kwargs):
"""Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri)
@mutator('_links_cache')
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
Calling code should use this method to add links instead of
modifying ``links`` directly.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``Link`` object, it is added to the document and the
keyword arguments are ignored.
If ``target`` is a ``Document`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if hasattr(target, 'as_link'):
link = target.as_link()
else:
link = self.link(target, **kwargs)
links = self.o.setdefault(LINKS_KEY, {})
new_link = link.as_object()
collected_links = CanonicalRels(links, self.curies, self.base_uri)
if rel not in collected_links:
if wrap:
links[rel] = [new_link]
else:
links[rel] = new_link
return
original_rel = collected_links.original_key(rel)
current_links = links[original_rel]
if isinstance(current_links, list):
current_links.append(new_link)
else:
links[original_rel] = [current_links, new_link]
@mutator('_links_cache')
def delete_link(self, rel=None, href=lambda _: True):
"""Deletes links from the document.
Calling code should use this method to remove links instead of
modyfying ``links`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
links that will be deleted. If neither of the optional arguments are
given, this method deletes every link in the document. If ``rel`` is
given, only links for the matching link relationship type are deleted.
If ``href`` is given, only links with a matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only links with matching
``href`` for the matching link relationship type are delted.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the links to be deleted.
- ``href``: optionally, a string specifying the ``href`` of the links
to be deleted, or a callable that returns true when its
single argument is in the set of ``href``s to be deleted.
"""
if not LINKS_KEY in self.o:
return
links = self.o[LINKS_KEY]
if rel is None:
for rel in list(links.keys()):
self.delete_link(rel, href)
return
if callable(href):
href_filter = href
else:
href_filter = lambda x: x == href
links_for_rel = links.setdefault(rel, [])
if isinstance(links_for_rel, dict):
links_for_rel = [links_for_rel]
new_links_for_rel = []
for link in links_for_rel:
if not href_filter(link['href']):
new_links_for_rel.append(link)
if new_links_for_rel:
if len(new_links_for_rel) == 1:
new_links_for_rel = new_links_for_rel[0]
links[rel] = new_links_for_rel
else:
del links[rel]
if not self.o[LINKS_KEY]:
del self.o[LINKS_KEY]
@classmethod
def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):
"""Returns a new ``Document`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Document``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``parent_curies``: optional ``CurieCollection`` instance holding the
CURIEs of the parent document in which the new
document is to be embedded. Calling code should
not normally provide this argument.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
if isinstance(o, list):
return [cls.from_object(x, base_uri, parent_curies, draft)
for x in o]
return cls(o, base_uri, parent_curies, draft)
@classmethod
def empty(cls, base_uri=None, draft=AUTO):
"""Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
return cls.from_object({}, base_uri=base_uri, draft=draft)
@mutator('_embedded_cache')
def embed(self, rel, other, wrap=False):
"""Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
"""
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap)
@mutator('_embedded_cache')
def delete_embedded(self, rel=None, href=lambda _: True):
"""Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
"""
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
@mutator('_curies_cache')
def set_curie(self, name, href):
"""Sets a CURIE.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
"""
self.draft.set_curie(self, name, href)
@mutator('_curies_cache')
def drop_curie(self, name):
"""Removes a CURIE.
The CURIE link with the given name is removed from the document.
"""
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies['name'] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie['name'] == name:
del curies[i]
break
continue
def __iter__(self):
yield self
def __eq__(self, other):
if not isinstance(other, Document):
return False
return self.as_object() == other.as_object()
def __repr__(self):
return "<Document %r>" % self.url()
|
wharris/dougrain | dougrain/document.py | Document.link | python | def link(self, href, **kwargs):
return link.Link(dict(href=href, **kwargs), self.base_uri) | Retuns a new link relative to this resource. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L440-L442 | null | class Document(object):
"""Represents the document for a HAL resource.
Constructors:
- ``Document.empty(base_uri=None)``:
returns an empty ``Document``.
- ``Document.from_object(o, base_uri=None, parent_curies=None)``:
returns a new ``Document`` based on a JSON object.
Public Instance Attributes:
- ``properties``: ``dict`` containing the properties of the HAL document,
excluding ``_links`` and ``_embedded``. ``properties``
should be treated as read-only.
- ``links``: ``dict`` containing the document's links, excluding
``curies``. Each link relationship type is mapped to a
``Link`` instance or a list of ``Link`` instances. ``links``
should be treated as read-only.
- ``embedded``: dictionary containing the document's embedded resources.
Each link relationship type is mapped to a ``Document``
instance.
- ``rels``: a ``Relationships`` instance holding a merged view of the
relationships from the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec to
which the document should conform. Defaults to
``drafts.AUTO``.
"""
def __init__(self, o, base_uri, parent_curies=None, draft=AUTO):
self.prepare_cache()
self.o = o
self.base_uri = base_uri
self.parent_curies = parent_curies
self.draft = draft.detect(o)
RESERVED_ATTRIBUTE_NAMES = (LINKS_KEY, EMBEDDED_KEY)
def properties_cache(self):
properties = dict(self.o)
for name in self.RESERVED_ATTRIBUTE_NAMES:
properties[name] = None
del properties[name]
return properties
def links_cache(self):
links = {}
links_json = self.o.get(LINKS_KEY, {})
for key, value in links_json.items():
if key == self.draft.curies_rel:
continue
links[key] = link.Link.from_object(value, self.base_uri)
return CanonicalRels(links, self.curies, self.base_uri)
def curies_cache(self):
result = curie.CurieCollection()
if self.parent_curies is not None:
result.update(self.parent_curies)
links_json = self.o.get('_links', {})
curies_json = links_json.get(self.draft.curies_rel)
if not curies_json:
return result
curies = link.Link.from_object(curies_json, self.base_uri)
if not isinstance(curies, list):
curies = [curies]
for curie_link in curies:
result[curie_link.name] = curie_link
return result
def embedded_cache(self):
embedded = {}
for key, value in self.o.get(EMBEDDED_KEY, {}).items():
embedded[key] = self.from_object(value,
self.base_uri,
self.curies)
return CanonicalRels(embedded, self.curies, self.base_uri)
def rels_cache(self):
return Relationships(self.links, self.embedded, self.curies,
self.base_uri)
def prepare_cache(self):
self._properties_cache = None
self._curies_cache = None
self._links_cache = None
self._embedded_cache = None
self._rels_cache = None
@property
def properties(self):
if self._properties_cache is None:
self._properties_cache = self.properties_cache()
return self._properties_cache
@property
def curies(self):
if self._curies_cache is None:
self._curies_cache = self.curies_cache()
return self._curies_cache
@property
def links(self):
if self._links_cache is None:
self._links_cache = self.links_cache()
return self._links_cache
@property
def embedded(self):
if self._embedded_cache is None:
self._embedded_cache = self.embedded_cache()
return self._embedded_cache
@property
def rels(self):
if self._rels_cache is None:
self._rels_cache = self.rels_cache()
return self._rels_cache
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method returns the ``href`` of the document's ``self`` link if it
has one, or ``None`` if the document lacks a ``self`` link, or the
``href`` of the document's first ``self`` link if it has more than one.
"""
if not 'self' in self.links:
return None
self_link = self.links['self']
if isinstance(self_link, list):
for link in self_link:
return link.url()
return self_link.url()
def expand_curie(self, link):
"""Returns the expansion of a CURIE value.
Arguments:
- ``link``: a string holding a curie value to expand.
This method attempts to expand ``link`` using the document's ``curies``
collection (see ``curie.CurieCollection.expand``).
"""
return self.curies.expand(link)
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the resource."""
return self.links['self']
@mutator('_properties_cache')
def set_property(self, key, value):
"""Set a property on the document.
Calling code should use this method to add and modify properties
on the document instead of modifying ``properties`` directly.
If ``key`` is ``"_links"`` or ``"_embedded"`` this method will silently
fail.
If there is no property with the name in ``key``, a new property is
created with the name from ``key`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
return
self.o[key] = value
@mutator('_properties_cache')
def delete_property(self, key):
"""Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key]
@mutator('_links_cache')
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
Calling code should use this method to add links instead of
modifying ``links`` directly.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``Link`` object, it is added to the document and the
keyword arguments are ignored.
If ``target`` is a ``Document`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if hasattr(target, 'as_link'):
link = target.as_link()
else:
link = self.link(target, **kwargs)
links = self.o.setdefault(LINKS_KEY, {})
new_link = link.as_object()
collected_links = CanonicalRels(links, self.curies, self.base_uri)
if rel not in collected_links:
if wrap:
links[rel] = [new_link]
else:
links[rel] = new_link
return
original_rel = collected_links.original_key(rel)
current_links = links[original_rel]
if isinstance(current_links, list):
current_links.append(new_link)
else:
links[original_rel] = [current_links, new_link]
@mutator('_links_cache')
def delete_link(self, rel=None, href=lambda _: True):
"""Deletes links from the document.
Calling code should use this method to remove links instead of
modyfying ``links`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
links that will be deleted. If neither of the optional arguments are
given, this method deletes every link in the document. If ``rel`` is
given, only links for the matching link relationship type are deleted.
If ``href`` is given, only links with a matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only links with matching
``href`` for the matching link relationship type are delted.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the links to be deleted.
- ``href``: optionally, a string specifying the ``href`` of the links
to be deleted, or a callable that returns true when its
single argument is in the set of ``href``s to be deleted.
"""
if not LINKS_KEY in self.o:
return
links = self.o[LINKS_KEY]
if rel is None:
for rel in list(links.keys()):
self.delete_link(rel, href)
return
if callable(href):
href_filter = href
else:
href_filter = lambda x: x == href
links_for_rel = links.setdefault(rel, [])
if isinstance(links_for_rel, dict):
links_for_rel = [links_for_rel]
new_links_for_rel = []
for link in links_for_rel:
if not href_filter(link['href']):
new_links_for_rel.append(link)
if new_links_for_rel:
if len(new_links_for_rel) == 1:
new_links_for_rel = new_links_for_rel[0]
links[rel] = new_links_for_rel
else:
del links[rel]
if not self.o[LINKS_KEY]:
del self.o[LINKS_KEY]
@classmethod
def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):
"""Returns a new ``Document`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Document``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``parent_curies``: optional ``CurieCollection`` instance holding the
CURIEs of the parent document in which the new
document is to be embedded. Calling code should
not normally provide this argument.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
if isinstance(o, list):
return [cls.from_object(x, base_uri, parent_curies, draft)
for x in o]
return cls(o, base_uri, parent_curies, draft)
@classmethod
def empty(cls, base_uri=None, draft=AUTO):
"""Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
return cls.from_object({}, base_uri=base_uri, draft=draft)
@mutator('_embedded_cache')
def embed(self, rel, other, wrap=False):
"""Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
"""
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap)
@mutator('_embedded_cache')
def delete_embedded(self, rel=None, href=lambda _: True):
"""Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
"""
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
@mutator('_curies_cache')
def set_curie(self, name, href):
"""Sets a CURIE.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
"""
self.draft.set_curie(self, name, href)
@mutator('_curies_cache')
def drop_curie(self, name):
"""Removes a CURIE.
The CURIE link with the given name is removed from the document.
"""
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies['name'] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie['name'] == name:
del curies[i]
break
continue
def __iter__(self):
yield self
def __eq__(self, other):
if not isinstance(other, Document):
return False
return self.as_object() == other.as_object()
def __repr__(self):
return "<Document %r>" % self.url()
|
wharris/dougrain | dougrain/document.py | Document.add_link | python | def add_link(self, rel, target, wrap=False, **kwargs):
if hasattr(target, 'as_link'):
link = target.as_link()
else:
link = self.link(target, **kwargs)
links = self.o.setdefault(LINKS_KEY, {})
new_link = link.as_object()
collected_links = CanonicalRels(links, self.curies, self.base_uri)
if rel not in collected_links:
if wrap:
links[rel] = [new_link]
else:
links[rel] = new_link
return
original_rel = collected_links.original_key(rel)
current_links = links[original_rel]
if isinstance(current_links, list):
current_links.append(new_link)
else:
links[original_rel] = [current_links, new_link] | Adds a link to the document.
Calling code should use this method to add links instead of
modifying ``links`` directly.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``Link`` object, it is added to the document and the
keyword arguments are ignored.
If ``target`` is a ``Document`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L445-L502 | [
"def link(self, href, **kwargs):\n \"\"\"Retuns a new link relative to this resource.\"\"\"\n return link.Link(dict(href=href, **kwargs), self.base_uri)\n"
] | class Document(object):
"""Represents the document for a HAL resource.
Constructors:
- ``Document.empty(base_uri=None)``:
returns an empty ``Document``.
- ``Document.from_object(o, base_uri=None, parent_curies=None)``:
returns a new ``Document`` based on a JSON object.
Public Instance Attributes:
- ``properties``: ``dict`` containing the properties of the HAL document,
excluding ``_links`` and ``_embedded``. ``properties``
should be treated as read-only.
- ``links``: ``dict`` containing the document's links, excluding
``curies``. Each link relationship type is mapped to a
``Link`` instance or a list of ``Link`` instances. ``links``
should be treated as read-only.
- ``embedded``: dictionary containing the document's embedded resources.
Each link relationship type is mapped to a ``Document``
instance.
- ``rels``: a ``Relationships`` instance holding a merged view of the
relationships from the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec to
which the document should conform. Defaults to
``drafts.AUTO``.
"""
def __init__(self, o, base_uri, parent_curies=None, draft=AUTO):
self.prepare_cache()
self.o = o
self.base_uri = base_uri
self.parent_curies = parent_curies
self.draft = draft.detect(o)
RESERVED_ATTRIBUTE_NAMES = (LINKS_KEY, EMBEDDED_KEY)
def properties_cache(self):
properties = dict(self.o)
for name in self.RESERVED_ATTRIBUTE_NAMES:
properties[name] = None
del properties[name]
return properties
def links_cache(self):
links = {}
links_json = self.o.get(LINKS_KEY, {})
for key, value in links_json.items():
if key == self.draft.curies_rel:
continue
links[key] = link.Link.from_object(value, self.base_uri)
return CanonicalRels(links, self.curies, self.base_uri)
def curies_cache(self):
result = curie.CurieCollection()
if self.parent_curies is not None:
result.update(self.parent_curies)
links_json = self.o.get('_links', {})
curies_json = links_json.get(self.draft.curies_rel)
if not curies_json:
return result
curies = link.Link.from_object(curies_json, self.base_uri)
if not isinstance(curies, list):
curies = [curies]
for curie_link in curies:
result[curie_link.name] = curie_link
return result
def embedded_cache(self):
embedded = {}
for key, value in self.o.get(EMBEDDED_KEY, {}).items():
embedded[key] = self.from_object(value,
self.base_uri,
self.curies)
return CanonicalRels(embedded, self.curies, self.base_uri)
def rels_cache(self):
return Relationships(self.links, self.embedded, self.curies,
self.base_uri)
def prepare_cache(self):
self._properties_cache = None
self._curies_cache = None
self._links_cache = None
self._embedded_cache = None
self._rels_cache = None
@property
def properties(self):
if self._properties_cache is None:
self._properties_cache = self.properties_cache()
return self._properties_cache
@property
def curies(self):
if self._curies_cache is None:
self._curies_cache = self.curies_cache()
return self._curies_cache
@property
def links(self):
if self._links_cache is None:
self._links_cache = self.links_cache()
return self._links_cache
@property
def embedded(self):
if self._embedded_cache is None:
self._embedded_cache = self.embedded_cache()
return self._embedded_cache
@property
def rels(self):
if self._rels_cache is None:
self._rels_cache = self.rels_cache()
return self._rels_cache
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method returns the ``href`` of the document's ``self`` link if it
has one, or ``None`` if the document lacks a ``self`` link, or the
``href`` of the document's first ``self`` link if it has more than one.
"""
if not 'self' in self.links:
return None
self_link = self.links['self']
if isinstance(self_link, list):
for link in self_link:
return link.url()
return self_link.url()
def expand_curie(self, link):
"""Returns the expansion of a CURIE value.
Arguments:
- ``link``: a string holding a curie value to expand.
This method attempts to expand ``link`` using the document's ``curies``
collection (see ``curie.CurieCollection.expand``).
"""
return self.curies.expand(link)
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the resource."""
return self.links['self']
@mutator('_properties_cache')
def set_property(self, key, value):
"""Set a property on the document.
Calling code should use this method to add and modify properties
on the document instead of modifying ``properties`` directly.
If ``key`` is ``"_links"`` or ``"_embedded"`` this method will silently
fail.
If there is no property with the name in ``key``, a new property is
created with the name from ``key`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
return
self.o[key] = value
@mutator('_properties_cache')
def delete_property(self, key):
"""Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key]
def link(self, href, **kwargs):
"""Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri)
@mutator('_links_cache')
@mutator('_links_cache')
def delete_link(self, rel=None, href=lambda _: True):
"""Deletes links from the document.
Calling code should use this method to remove links instead of
modyfying ``links`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
links that will be deleted. If neither of the optional arguments are
given, this method deletes every link in the document. If ``rel`` is
given, only links for the matching link relationship type are deleted.
If ``href`` is given, only links with a matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only links with matching
``href`` for the matching link relationship type are delted.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the links to be deleted.
- ``href``: optionally, a string specifying the ``href`` of the links
to be deleted, or a callable that returns true when its
single argument is in the set of ``href``s to be deleted.
"""
if not LINKS_KEY in self.o:
return
links = self.o[LINKS_KEY]
if rel is None:
for rel in list(links.keys()):
self.delete_link(rel, href)
return
if callable(href):
href_filter = href
else:
href_filter = lambda x: x == href
links_for_rel = links.setdefault(rel, [])
if isinstance(links_for_rel, dict):
links_for_rel = [links_for_rel]
new_links_for_rel = []
for link in links_for_rel:
if not href_filter(link['href']):
new_links_for_rel.append(link)
if new_links_for_rel:
if len(new_links_for_rel) == 1:
new_links_for_rel = new_links_for_rel[0]
links[rel] = new_links_for_rel
else:
del links[rel]
if not self.o[LINKS_KEY]:
del self.o[LINKS_KEY]
@classmethod
def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):
"""Returns a new ``Document`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Document``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``parent_curies``: optional ``CurieCollection`` instance holding the
CURIEs of the parent document in which the new
document is to be embedded. Calling code should
not normally provide this argument.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
if isinstance(o, list):
return [cls.from_object(x, base_uri, parent_curies, draft)
for x in o]
return cls(o, base_uri, parent_curies, draft)
@classmethod
def empty(cls, base_uri=None, draft=AUTO):
"""Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
return cls.from_object({}, base_uri=base_uri, draft=draft)
@mutator('_embedded_cache')
def embed(self, rel, other, wrap=False):
"""Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
"""
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap)
@mutator('_embedded_cache')
def delete_embedded(self, rel=None, href=lambda _: True):
"""Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
"""
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
@mutator('_curies_cache')
def set_curie(self, name, href):
"""Sets a CURIE.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
"""
self.draft.set_curie(self, name, href)
@mutator('_curies_cache')
def drop_curie(self, name):
"""Removes a CURIE.
The CURIE link with the given name is removed from the document.
"""
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies['name'] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie['name'] == name:
del curies[i]
break
continue
def __iter__(self):
yield self
def __eq__(self, other):
if not isinstance(other, Document):
return False
return self.as_object() == other.as_object()
def __repr__(self):
return "<Document %r>" % self.url()
|
wharris/dougrain | dougrain/document.py | Document.delete_link | python | def delete_link(self, rel=None, href=lambda _: True):
if not LINKS_KEY in self.o:
return
links = self.o[LINKS_KEY]
if rel is None:
for rel in list(links.keys()):
self.delete_link(rel, href)
return
if callable(href):
href_filter = href
else:
href_filter = lambda x: x == href
links_for_rel = links.setdefault(rel, [])
if isinstance(links_for_rel, dict):
links_for_rel = [links_for_rel]
new_links_for_rel = []
for link in links_for_rel:
if not href_filter(link['href']):
new_links_for_rel.append(link)
if new_links_for_rel:
if len(new_links_for_rel) == 1:
new_links_for_rel = new_links_for_rel[0]
links[rel] = new_links_for_rel
else:
del links[rel]
if not self.o[LINKS_KEY]:
del self.o[LINKS_KEY] | Deletes links from the document.
Calling code should use this method to remove links instead of
modyfying ``links`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
links that will be deleted. If neither of the optional arguments are
given, this method deletes every link in the document. If ``rel`` is
given, only links for the matching link relationship type are deleted.
If ``href`` is given, only links with a matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only links with matching
``href`` for the matching link relationship type are delted.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the links to be deleted.
- ``href``: optionally, a string specifying the ``href`` of the links
to be deleted, or a callable that returns true when its
single argument is in the set of ``href``s to be deleted. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L505-L560 | [
"def delete_link(self, rel=None, href=lambda _: True):\n",
"href_filter = lambda x: x == href\n"
] | class Document(object):
"""Represents the document for a HAL resource.
Constructors:
- ``Document.empty(base_uri=None)``:
returns an empty ``Document``.
- ``Document.from_object(o, base_uri=None, parent_curies=None)``:
returns a new ``Document`` based on a JSON object.
Public Instance Attributes:
- ``properties``: ``dict`` containing the properties of the HAL document,
excluding ``_links`` and ``_embedded``. ``properties``
should be treated as read-only.
- ``links``: ``dict`` containing the document's links, excluding
``curies``. Each link relationship type is mapped to a
``Link`` instance or a list of ``Link`` instances. ``links``
should be treated as read-only.
- ``embedded``: dictionary containing the document's embedded resources.
Each link relationship type is mapped to a ``Document``
instance.
- ``rels``: a ``Relationships`` instance holding a merged view of the
relationships from the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec to
which the document should conform. Defaults to
``drafts.AUTO``.
"""
def __init__(self, o, base_uri, parent_curies=None, draft=AUTO):
self.prepare_cache()
self.o = o
self.base_uri = base_uri
self.parent_curies = parent_curies
self.draft = draft.detect(o)
RESERVED_ATTRIBUTE_NAMES = (LINKS_KEY, EMBEDDED_KEY)
def properties_cache(self):
properties = dict(self.o)
for name in self.RESERVED_ATTRIBUTE_NAMES:
properties[name] = None
del properties[name]
return properties
def links_cache(self):
links = {}
links_json = self.o.get(LINKS_KEY, {})
for key, value in links_json.items():
if key == self.draft.curies_rel:
continue
links[key] = link.Link.from_object(value, self.base_uri)
return CanonicalRels(links, self.curies, self.base_uri)
def curies_cache(self):
result = curie.CurieCollection()
if self.parent_curies is not None:
result.update(self.parent_curies)
links_json = self.o.get('_links', {})
curies_json = links_json.get(self.draft.curies_rel)
if not curies_json:
return result
curies = link.Link.from_object(curies_json, self.base_uri)
if not isinstance(curies, list):
curies = [curies]
for curie_link in curies:
result[curie_link.name] = curie_link
return result
def embedded_cache(self):
embedded = {}
for key, value in self.o.get(EMBEDDED_KEY, {}).items():
embedded[key] = self.from_object(value,
self.base_uri,
self.curies)
return CanonicalRels(embedded, self.curies, self.base_uri)
def rels_cache(self):
return Relationships(self.links, self.embedded, self.curies,
self.base_uri)
def prepare_cache(self):
self._properties_cache = None
self._curies_cache = None
self._links_cache = None
self._embedded_cache = None
self._rels_cache = None
@property
def properties(self):
if self._properties_cache is None:
self._properties_cache = self.properties_cache()
return self._properties_cache
@property
def curies(self):
if self._curies_cache is None:
self._curies_cache = self.curies_cache()
return self._curies_cache
@property
def links(self):
if self._links_cache is None:
self._links_cache = self.links_cache()
return self._links_cache
@property
def embedded(self):
if self._embedded_cache is None:
self._embedded_cache = self.embedded_cache()
return self._embedded_cache
@property
def rels(self):
if self._rels_cache is None:
self._rels_cache = self.rels_cache()
return self._rels_cache
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method returns the ``href`` of the document's ``self`` link if it
has one, or ``None`` if the document lacks a ``self`` link, or the
``href`` of the document's first ``self`` link if it has more than one.
"""
if not 'self' in self.links:
return None
self_link = self.links['self']
if isinstance(self_link, list):
for link in self_link:
return link.url()
return self_link.url()
def expand_curie(self, link):
"""Returns the expansion of a CURIE value.
Arguments:
- ``link``: a string holding a curie value to expand.
This method attempts to expand ``link`` using the document's ``curies``
collection (see ``curie.CurieCollection.expand``).
"""
return self.curies.expand(link)
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the resource."""
return self.links['self']
@mutator('_properties_cache')
def set_property(self, key, value):
"""Set a property on the document.
Calling code should use this method to add and modify properties
on the document instead of modifying ``properties`` directly.
If ``key`` is ``"_links"`` or ``"_embedded"`` this method will silently
fail.
If there is no property with the name in ``key``, a new property is
created with the name from ``key`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
return
self.o[key] = value
@mutator('_properties_cache')
def delete_property(self, key):
"""Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key]
def link(self, href, **kwargs):
"""Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri)
@mutator('_links_cache')
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
Calling code should use this method to add links instead of
modifying ``links`` directly.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``Link`` object, it is added to the document and the
keyword arguments are ignored.
If ``target`` is a ``Document`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if hasattr(target, 'as_link'):
link = target.as_link()
else:
link = self.link(target, **kwargs)
links = self.o.setdefault(LINKS_KEY, {})
new_link = link.as_object()
collected_links = CanonicalRels(links, self.curies, self.base_uri)
if rel not in collected_links:
if wrap:
links[rel] = [new_link]
else:
links[rel] = new_link
return
original_rel = collected_links.original_key(rel)
current_links = links[original_rel]
if isinstance(current_links, list):
current_links.append(new_link)
else:
links[original_rel] = [current_links, new_link]
@mutator('_links_cache')
@classmethod
def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):
"""Returns a new ``Document`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Document``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``parent_curies``: optional ``CurieCollection`` instance holding the
CURIEs of the parent document in which the new
document is to be embedded. Calling code should
not normally provide this argument.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
if isinstance(o, list):
return [cls.from_object(x, base_uri, parent_curies, draft)
for x in o]
return cls(o, base_uri, parent_curies, draft)
@classmethod
def empty(cls, base_uri=None, draft=AUTO):
"""Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
return cls.from_object({}, base_uri=base_uri, draft=draft)
@mutator('_embedded_cache')
def embed(self, rel, other, wrap=False):
"""Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
"""
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap)
@mutator('_embedded_cache')
def delete_embedded(self, rel=None, href=lambda _: True):
"""Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
"""
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
@mutator('_curies_cache')
def set_curie(self, name, href):
"""Sets a CURIE.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
"""
self.draft.set_curie(self, name, href)
@mutator('_curies_cache')
def drop_curie(self, name):
"""Removes a CURIE.
The CURIE link with the given name is removed from the document.
"""
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies['name'] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie['name'] == name:
del curies[i]
break
continue
def __iter__(self):
yield self
def __eq__(self, other):
if not isinstance(other, Document):
return False
return self.as_object() == other.as_object()
def __repr__(self):
return "<Document %r>" % self.url()
|
wharris/dougrain | dougrain/document.py | Document.from_object | python | def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):
if isinstance(o, list):
return [cls.from_object(x, base_uri, parent_curies, draft)
for x in o]
return cls(o, base_uri, parent_curies, draft) | Returns a new ``Document`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Document``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``parent_curies``: optional ``CurieCollection`` instance holding the
CURIEs of the parent document in which the new
document is to be embedded. Calling code should
not normally provide this argument.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L563-L586 | null | class Document(object):
"""Represents the document for a HAL resource.
Constructors:
- ``Document.empty(base_uri=None)``:
returns an empty ``Document``.
- ``Document.from_object(o, base_uri=None, parent_curies=None)``:
returns a new ``Document`` based on a JSON object.
Public Instance Attributes:
- ``properties``: ``dict`` containing the properties of the HAL document,
excluding ``_links`` and ``_embedded``. ``properties``
should be treated as read-only.
- ``links``: ``dict`` containing the document's links, excluding
``curies``. Each link relationship type is mapped to a
``Link`` instance or a list of ``Link`` instances. ``links``
should be treated as read-only.
- ``embedded``: dictionary containing the document's embedded resources.
Each link relationship type is mapped to a ``Document``
instance.
- ``rels``: a ``Relationships`` instance holding a merged view of the
relationships from the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec to
which the document should conform. Defaults to
``drafts.AUTO``.
"""
def __init__(self, o, base_uri, parent_curies=None, draft=AUTO):
self.prepare_cache()
self.o = o
self.base_uri = base_uri
self.parent_curies = parent_curies
self.draft = draft.detect(o)
RESERVED_ATTRIBUTE_NAMES = (LINKS_KEY, EMBEDDED_KEY)
def properties_cache(self):
properties = dict(self.o)
for name in self.RESERVED_ATTRIBUTE_NAMES:
properties[name] = None
del properties[name]
return properties
def links_cache(self):
links = {}
links_json = self.o.get(LINKS_KEY, {})
for key, value in links_json.items():
if key == self.draft.curies_rel:
continue
links[key] = link.Link.from_object(value, self.base_uri)
return CanonicalRels(links, self.curies, self.base_uri)
def curies_cache(self):
result = curie.CurieCollection()
if self.parent_curies is not None:
result.update(self.parent_curies)
links_json = self.o.get('_links', {})
curies_json = links_json.get(self.draft.curies_rel)
if not curies_json:
return result
curies = link.Link.from_object(curies_json, self.base_uri)
if not isinstance(curies, list):
curies = [curies]
for curie_link in curies:
result[curie_link.name] = curie_link
return result
def embedded_cache(self):
embedded = {}
for key, value in self.o.get(EMBEDDED_KEY, {}).items():
embedded[key] = self.from_object(value,
self.base_uri,
self.curies)
return CanonicalRels(embedded, self.curies, self.base_uri)
def rels_cache(self):
return Relationships(self.links, self.embedded, self.curies,
self.base_uri)
def prepare_cache(self):
self._properties_cache = None
self._curies_cache = None
self._links_cache = None
self._embedded_cache = None
self._rels_cache = None
@property
def properties(self):
if self._properties_cache is None:
self._properties_cache = self.properties_cache()
return self._properties_cache
@property
def curies(self):
if self._curies_cache is None:
self._curies_cache = self.curies_cache()
return self._curies_cache
@property
def links(self):
if self._links_cache is None:
self._links_cache = self.links_cache()
return self._links_cache
@property
def embedded(self):
if self._embedded_cache is None:
self._embedded_cache = self.embedded_cache()
return self._embedded_cache
@property
def rels(self):
if self._rels_cache is None:
self._rels_cache = self.rels_cache()
return self._rels_cache
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method returns the ``href`` of the document's ``self`` link if it
has one, or ``None`` if the document lacks a ``self`` link, or the
``href`` of the document's first ``self`` link if it has more than one.
"""
if not 'self' in self.links:
return None
self_link = self.links['self']
if isinstance(self_link, list):
for link in self_link:
return link.url()
return self_link.url()
def expand_curie(self, link):
"""Returns the expansion of a CURIE value.
Arguments:
- ``link``: a string holding a curie value to expand.
This method attempts to expand ``link`` using the document's ``curies``
collection (see ``curie.CurieCollection.expand``).
"""
return self.curies.expand(link)
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the resource."""
return self.links['self']
@mutator('_properties_cache')
def set_property(self, key, value):
"""Set a property on the document.
Calling code should use this method to add and modify properties
on the document instead of modifying ``properties`` directly.
If ``key`` is ``"_links"`` or ``"_embedded"`` this method will silently
fail.
If there is no property with the name in ``key``, a new property is
created with the name from ``key`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
return
self.o[key] = value
@mutator('_properties_cache')
def delete_property(self, key):
"""Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key]
def link(self, href, **kwargs):
"""Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri)
@mutator('_links_cache')
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
Calling code should use this method to add links instead of
modifying ``links`` directly.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``Link`` object, it is added to the document and the
keyword arguments are ignored.
If ``target`` is a ``Document`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if hasattr(target, 'as_link'):
link = target.as_link()
else:
link = self.link(target, **kwargs)
links = self.o.setdefault(LINKS_KEY, {})
new_link = link.as_object()
collected_links = CanonicalRels(links, self.curies, self.base_uri)
if rel not in collected_links:
if wrap:
links[rel] = [new_link]
else:
links[rel] = new_link
return
original_rel = collected_links.original_key(rel)
current_links = links[original_rel]
if isinstance(current_links, list):
current_links.append(new_link)
else:
links[original_rel] = [current_links, new_link]
@mutator('_links_cache')
def delete_link(self, rel=None, href=lambda _: True):
"""Deletes links from the document.
Calling code should use this method to remove links instead of
modyfying ``links`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
links that will be deleted. If neither of the optional arguments are
given, this method deletes every link in the document. If ``rel`` is
given, only links for the matching link relationship type are deleted.
If ``href`` is given, only links with a matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only links with matching
``href`` for the matching link relationship type are delted.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the links to be deleted.
- ``href``: optionally, a string specifying the ``href`` of the links
to be deleted, or a callable that returns true when its
single argument is in the set of ``href``s to be deleted.
"""
if not LINKS_KEY in self.o:
return
links = self.o[LINKS_KEY]
if rel is None:
for rel in list(links.keys()):
self.delete_link(rel, href)
return
if callable(href):
href_filter = href
else:
href_filter = lambda x: x == href
links_for_rel = links.setdefault(rel, [])
if isinstance(links_for_rel, dict):
links_for_rel = [links_for_rel]
new_links_for_rel = []
for link in links_for_rel:
if not href_filter(link['href']):
new_links_for_rel.append(link)
if new_links_for_rel:
if len(new_links_for_rel) == 1:
new_links_for_rel = new_links_for_rel[0]
links[rel] = new_links_for_rel
else:
del links[rel]
if not self.o[LINKS_KEY]:
del self.o[LINKS_KEY]
@classmethod
@classmethod
def empty(cls, base_uri=None, draft=AUTO):
"""Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
return cls.from_object({}, base_uri=base_uri, draft=draft)
@mutator('_embedded_cache')
def embed(self, rel, other, wrap=False):
"""Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
"""
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap)
@mutator('_embedded_cache')
def delete_embedded(self, rel=None, href=lambda _: True):
"""Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
"""
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
@mutator('_curies_cache')
def set_curie(self, name, href):
"""Sets a CURIE.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
"""
self.draft.set_curie(self, name, href)
@mutator('_curies_cache')
def drop_curie(self, name):
"""Removes a CURIE.
The CURIE link with the given name is removed from the document.
"""
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies['name'] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie['name'] == name:
del curies[i]
break
continue
def __iter__(self):
yield self
def __eq__(self, other):
if not isinstance(other, Document):
return False
return self.as_object() == other.as_object()
def __repr__(self):
return "<Document %r>" % self.url()
|
wharris/dougrain | dougrain/document.py | Document.empty | python | def empty(cls, base_uri=None, draft=AUTO):
return cls.from_object({}, base_uri=base_uri, draft=draft) | Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L589-L600 | [
"def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):\n \"\"\"Returns a new ``Document`` based on a JSON object or array.\n\n Arguments:\n\n - ``o``: a dictionary holding the deserializated JSON for the new\n ``Document``, or a ``list`` of such documents.\n - ``base_uri``: optional URL used as the basis when expanding\n relative URLs in the document.\n - ``parent_curies``: optional ``CurieCollection`` instance holding the\n CURIEs of the parent document in which the new\n document is to be embedded. Calling code should\n not normally provide this argument.\n - ``draft``: a ``Draft`` instance that selects the version of the spec\n to which the document should conform. Defaults to\n ``drafts.AUTO``.\n\n \"\"\"\n\n if isinstance(o, list):\n return [cls.from_object(x, base_uri, parent_curies, draft)\n for x in o]\n\n return cls(o, base_uri, parent_curies, draft)\n"
] | class Document(object):
"""Represents the document for a HAL resource.
Constructors:
- ``Document.empty(base_uri=None)``:
returns an empty ``Document``.
- ``Document.from_object(o, base_uri=None, parent_curies=None)``:
returns a new ``Document`` based on a JSON object.
Public Instance Attributes:
- ``properties``: ``dict`` containing the properties of the HAL document,
excluding ``_links`` and ``_embedded``. ``properties``
should be treated as read-only.
- ``links``: ``dict`` containing the document's links, excluding
``curies``. Each link relationship type is mapped to a
``Link`` instance or a list of ``Link`` instances. ``links``
should be treated as read-only.
- ``embedded``: dictionary containing the document's embedded resources.
Each link relationship type is mapped to a ``Document``
instance.
- ``rels``: a ``Relationships`` instance holding a merged view of the
relationships from the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec to
which the document should conform. Defaults to
``drafts.AUTO``.
"""
def __init__(self, o, base_uri, parent_curies=None, draft=AUTO):
self.prepare_cache()
self.o = o
self.base_uri = base_uri
self.parent_curies = parent_curies
self.draft = draft.detect(o)
RESERVED_ATTRIBUTE_NAMES = (LINKS_KEY, EMBEDDED_KEY)
def properties_cache(self):
properties = dict(self.o)
for name in self.RESERVED_ATTRIBUTE_NAMES:
properties[name] = None
del properties[name]
return properties
def links_cache(self):
links = {}
links_json = self.o.get(LINKS_KEY, {})
for key, value in links_json.items():
if key == self.draft.curies_rel:
continue
links[key] = link.Link.from_object(value, self.base_uri)
return CanonicalRels(links, self.curies, self.base_uri)
def curies_cache(self):
result = curie.CurieCollection()
if self.parent_curies is not None:
result.update(self.parent_curies)
links_json = self.o.get('_links', {})
curies_json = links_json.get(self.draft.curies_rel)
if not curies_json:
return result
curies = link.Link.from_object(curies_json, self.base_uri)
if not isinstance(curies, list):
curies = [curies]
for curie_link in curies:
result[curie_link.name] = curie_link
return result
def embedded_cache(self):
embedded = {}
for key, value in self.o.get(EMBEDDED_KEY, {}).items():
embedded[key] = self.from_object(value,
self.base_uri,
self.curies)
return CanonicalRels(embedded, self.curies, self.base_uri)
def rels_cache(self):
return Relationships(self.links, self.embedded, self.curies,
self.base_uri)
def prepare_cache(self):
self._properties_cache = None
self._curies_cache = None
self._links_cache = None
self._embedded_cache = None
self._rels_cache = None
@property
def properties(self):
if self._properties_cache is None:
self._properties_cache = self.properties_cache()
return self._properties_cache
@property
def curies(self):
if self._curies_cache is None:
self._curies_cache = self.curies_cache()
return self._curies_cache
@property
def links(self):
if self._links_cache is None:
self._links_cache = self.links_cache()
return self._links_cache
@property
def embedded(self):
if self._embedded_cache is None:
self._embedded_cache = self.embedded_cache()
return self._embedded_cache
@property
def rels(self):
if self._rels_cache is None:
self._rels_cache = self.rels_cache()
return self._rels_cache
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method returns the ``href`` of the document's ``self`` link if it
has one, or ``None`` if the document lacks a ``self`` link, or the
``href`` of the document's first ``self`` link if it has more than one.
"""
if not 'self' in self.links:
return None
self_link = self.links['self']
if isinstance(self_link, list):
for link in self_link:
return link.url()
return self_link.url()
def expand_curie(self, link):
"""Returns the expansion of a CURIE value.
Arguments:
- ``link``: a string holding a curie value to expand.
This method attempts to expand ``link`` using the document's ``curies``
collection (see ``curie.CurieCollection.expand``).
"""
return self.curies.expand(link)
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the resource."""
return self.links['self']
@mutator('_properties_cache')
def set_property(self, key, value):
"""Set a property on the document.
Calling code should use this method to add and modify properties
on the document instead of modifying ``properties`` directly.
If ``key`` is ``"_links"`` or ``"_embedded"`` this method will silently
fail.
If there is no property with the name in ``key``, a new property is
created with the name from ``key`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
return
self.o[key] = value
@mutator('_properties_cache')
def delete_property(self, key):
"""Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key]
def link(self, href, **kwargs):
"""Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri)
@mutator('_links_cache')
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
Calling code should use this method to add links instead of
modifying ``links`` directly.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``Link`` object, it is added to the document and the
keyword arguments are ignored.
If ``target`` is a ``Document`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if hasattr(target, 'as_link'):
link = target.as_link()
else:
link = self.link(target, **kwargs)
links = self.o.setdefault(LINKS_KEY, {})
new_link = link.as_object()
collected_links = CanonicalRels(links, self.curies, self.base_uri)
if rel not in collected_links:
if wrap:
links[rel] = [new_link]
else:
links[rel] = new_link
return
original_rel = collected_links.original_key(rel)
current_links = links[original_rel]
if isinstance(current_links, list):
current_links.append(new_link)
else:
links[original_rel] = [current_links, new_link]
@mutator('_links_cache')
def delete_link(self, rel=None, href=lambda _: True):
"""Deletes links from the document.
Calling code should use this method to remove links instead of
modyfying ``links`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
links that will be deleted. If neither of the optional arguments are
given, this method deletes every link in the document. If ``rel`` is
given, only links for the matching link relationship type are deleted.
If ``href`` is given, only links with a matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only links with matching
``href`` for the matching link relationship type are delted.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the links to be deleted.
- ``href``: optionally, a string specifying the ``href`` of the links
to be deleted, or a callable that returns true when its
single argument is in the set of ``href``s to be deleted.
"""
if not LINKS_KEY in self.o:
return
links = self.o[LINKS_KEY]
if rel is None:
for rel in list(links.keys()):
self.delete_link(rel, href)
return
if callable(href):
href_filter = href
else:
href_filter = lambda x: x == href
links_for_rel = links.setdefault(rel, [])
if isinstance(links_for_rel, dict):
links_for_rel = [links_for_rel]
new_links_for_rel = []
for link in links_for_rel:
if not href_filter(link['href']):
new_links_for_rel.append(link)
if new_links_for_rel:
if len(new_links_for_rel) == 1:
new_links_for_rel = new_links_for_rel[0]
links[rel] = new_links_for_rel
else:
del links[rel]
if not self.o[LINKS_KEY]:
del self.o[LINKS_KEY]
@classmethod
def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):
"""Returns a new ``Document`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Document``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``parent_curies``: optional ``CurieCollection`` instance holding the
CURIEs of the parent document in which the new
document is to be embedded. Calling code should
not normally provide this argument.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
if isinstance(o, list):
return [cls.from_object(x, base_uri, parent_curies, draft)
for x in o]
return cls(o, base_uri, parent_curies, draft)
@classmethod
@mutator('_embedded_cache')
def embed(self, rel, other, wrap=False):
"""Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
"""
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap)
@mutator('_embedded_cache')
def delete_embedded(self, rel=None, href=lambda _: True):
"""Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
"""
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
@mutator('_curies_cache')
def set_curie(self, name, href):
"""Sets a CURIE.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
"""
self.draft.set_curie(self, name, href)
@mutator('_curies_cache')
def drop_curie(self, name):
"""Removes a CURIE.
The CURIE link with the given name is removed from the document.
"""
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies['name'] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie['name'] == name:
del curies[i]
break
continue
def __iter__(self):
yield self
def __eq__(self, other):
if not isinstance(other, Document):
return False
return self.as_object() == other.as_object()
def __repr__(self):
return "<Document %r>" % self.url()
|
wharris/dougrain | dougrain/document.py | Document.embed | python | def embed(self, rel, other, wrap=False):
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap) | Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L603-L662 | null | class Document(object):
"""Represents the document for a HAL resource.
Constructors:
- ``Document.empty(base_uri=None)``:
returns an empty ``Document``.
- ``Document.from_object(o, base_uri=None, parent_curies=None)``:
returns a new ``Document`` based on a JSON object.
Public Instance Attributes:
- ``properties``: ``dict`` containing the properties of the HAL document,
excluding ``_links`` and ``_embedded``. ``properties``
should be treated as read-only.
- ``links``: ``dict`` containing the document's links, excluding
``curies``. Each link relationship type is mapped to a
``Link`` instance or a list of ``Link`` instances. ``links``
should be treated as read-only.
- ``embedded``: dictionary containing the document's embedded resources.
Each link relationship type is mapped to a ``Document``
instance.
- ``rels``: a ``Relationships`` instance holding a merged view of the
relationships from the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec to
which the document should conform. Defaults to
``drafts.AUTO``.
"""
def __init__(self, o, base_uri, parent_curies=None, draft=AUTO):
self.prepare_cache()
self.o = o
self.base_uri = base_uri
self.parent_curies = parent_curies
self.draft = draft.detect(o)
RESERVED_ATTRIBUTE_NAMES = (LINKS_KEY, EMBEDDED_KEY)
def properties_cache(self):
properties = dict(self.o)
for name in self.RESERVED_ATTRIBUTE_NAMES:
properties[name] = None
del properties[name]
return properties
def links_cache(self):
links = {}
links_json = self.o.get(LINKS_KEY, {})
for key, value in links_json.items():
if key == self.draft.curies_rel:
continue
links[key] = link.Link.from_object(value, self.base_uri)
return CanonicalRels(links, self.curies, self.base_uri)
def curies_cache(self):
result = curie.CurieCollection()
if self.parent_curies is not None:
result.update(self.parent_curies)
links_json = self.o.get('_links', {})
curies_json = links_json.get(self.draft.curies_rel)
if not curies_json:
return result
curies = link.Link.from_object(curies_json, self.base_uri)
if not isinstance(curies, list):
curies = [curies]
for curie_link in curies:
result[curie_link.name] = curie_link
return result
def embedded_cache(self):
embedded = {}
for key, value in self.o.get(EMBEDDED_KEY, {}).items():
embedded[key] = self.from_object(value,
self.base_uri,
self.curies)
return CanonicalRels(embedded, self.curies, self.base_uri)
def rels_cache(self):
return Relationships(self.links, self.embedded, self.curies,
self.base_uri)
def prepare_cache(self):
self._properties_cache = None
self._curies_cache = None
self._links_cache = None
self._embedded_cache = None
self._rels_cache = None
@property
def properties(self):
if self._properties_cache is None:
self._properties_cache = self.properties_cache()
return self._properties_cache
@property
def curies(self):
if self._curies_cache is None:
self._curies_cache = self.curies_cache()
return self._curies_cache
@property
def links(self):
if self._links_cache is None:
self._links_cache = self.links_cache()
return self._links_cache
@property
def embedded(self):
if self._embedded_cache is None:
self._embedded_cache = self.embedded_cache()
return self._embedded_cache
@property
def rels(self):
if self._rels_cache is None:
self._rels_cache = self.rels_cache()
return self._rels_cache
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method returns the ``href`` of the document's ``self`` link if it
has one, or ``None`` if the document lacks a ``self`` link, or the
``href`` of the document's first ``self`` link if it has more than one.
"""
if not 'self' in self.links:
return None
self_link = self.links['self']
if isinstance(self_link, list):
for link in self_link:
return link.url()
return self_link.url()
def expand_curie(self, link):
"""Returns the expansion of a CURIE value.
Arguments:
- ``link``: a string holding a curie value to expand.
This method attempts to expand ``link`` using the document's ``curies``
collection (see ``curie.CurieCollection.expand``).
"""
return self.curies.expand(link)
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the resource."""
return self.links['self']
@mutator('_properties_cache')
def set_property(self, key, value):
"""Set a property on the document.
Calling code should use this method to add and modify properties
on the document instead of modifying ``properties`` directly.
If ``key`` is ``"_links"`` or ``"_embedded"`` this method will silently
fail.
If there is no property with the name in ``key``, a new property is
created with the name from ``key`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
return
self.o[key] = value
@mutator('_properties_cache')
def delete_property(self, key):
"""Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key]
def link(self, href, **kwargs):
"""Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri)
@mutator('_links_cache')
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
Calling code should use this method to add links instead of
modifying ``links`` directly.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``Link`` object, it is added to the document and the
keyword arguments are ignored.
If ``target`` is a ``Document`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if hasattr(target, 'as_link'):
link = target.as_link()
else:
link = self.link(target, **kwargs)
links = self.o.setdefault(LINKS_KEY, {})
new_link = link.as_object()
collected_links = CanonicalRels(links, self.curies, self.base_uri)
if rel not in collected_links:
if wrap:
links[rel] = [new_link]
else:
links[rel] = new_link
return
original_rel = collected_links.original_key(rel)
current_links = links[original_rel]
if isinstance(current_links, list):
current_links.append(new_link)
else:
links[original_rel] = [current_links, new_link]
@mutator('_links_cache')
def delete_link(self, rel=None, href=lambda _: True):
"""Deletes links from the document.
Calling code should use this method to remove links instead of
modyfying ``links`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
links that will be deleted. If neither of the optional arguments are
given, this method deletes every link in the document. If ``rel`` is
given, only links for the matching link relationship type are deleted.
If ``href`` is given, only links with a matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only links with matching
``href`` for the matching link relationship type are delted.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the links to be deleted.
- ``href``: optionally, a string specifying the ``href`` of the links
to be deleted, or a callable that returns true when its
single argument is in the set of ``href``s to be deleted.
"""
if not LINKS_KEY in self.o:
return
links = self.o[LINKS_KEY]
if rel is None:
for rel in list(links.keys()):
self.delete_link(rel, href)
return
if callable(href):
href_filter = href
else:
href_filter = lambda x: x == href
links_for_rel = links.setdefault(rel, [])
if isinstance(links_for_rel, dict):
links_for_rel = [links_for_rel]
new_links_for_rel = []
for link in links_for_rel:
if not href_filter(link['href']):
new_links_for_rel.append(link)
if new_links_for_rel:
if len(new_links_for_rel) == 1:
new_links_for_rel = new_links_for_rel[0]
links[rel] = new_links_for_rel
else:
del links[rel]
if not self.o[LINKS_KEY]:
del self.o[LINKS_KEY]
@classmethod
def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):
"""Returns a new ``Document`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Document``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``parent_curies``: optional ``CurieCollection`` instance holding the
CURIEs of the parent document in which the new
document is to be embedded. Calling code should
not normally provide this argument.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
if isinstance(o, list):
return [cls.from_object(x, base_uri, parent_curies, draft)
for x in o]
return cls(o, base_uri, parent_curies, draft)
@classmethod
def empty(cls, base_uri=None, draft=AUTO):
"""Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
return cls.from_object({}, base_uri=base_uri, draft=draft)
@mutator('_embedded_cache')
@mutator('_embedded_cache')
def delete_embedded(self, rel=None, href=lambda _: True):
"""Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
"""
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
@mutator('_curies_cache')
def set_curie(self, name, href):
"""Sets a CURIE.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
"""
self.draft.set_curie(self, name, href)
@mutator('_curies_cache')
def drop_curie(self, name):
"""Removes a CURIE.
The CURIE link with the given name is removed from the document.
"""
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies['name'] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie['name'] == name:
del curies[i]
break
continue
def __iter__(self):
yield self
def __eq__(self, other):
if not isinstance(other, Document):
return False
return self.as_object() == other.as_object()
def __repr__(self):
return "<Document %r>" % self.url()
|
wharris/dougrain | dougrain/document.py | Document.delete_embedded | python | def delete_embedded(self, rel=None, href=lambda _: True):
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY] | Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L665-L731 | [
"def url(self):\n \"\"\"Returns the URL for the resource based on the ``self`` link.\n\n This method returns the ``href`` of the document's ``self`` link if it\n has one, or ``None`` if the document lacks a ``self`` link, or the\n ``href`` of the document's first ``self`` link if it has more than one.\n\n \"\"\"\n if not 'self' in self.links:\n return None\n\n self_link = self.links['self']\n\n if isinstance(self_link, list):\n for link in self_link:\n return link.url()\n\n return self_link.url()\n",
"def delete_embedded(self, rel=None, href=lambda _: True):\n",
"url_filter = lambda x: x == href\n"
] | class Document(object):
"""Represents the document for a HAL resource.
Constructors:
- ``Document.empty(base_uri=None)``:
returns an empty ``Document``.
- ``Document.from_object(o, base_uri=None, parent_curies=None)``:
returns a new ``Document`` based on a JSON object.
Public Instance Attributes:
- ``properties``: ``dict`` containing the properties of the HAL document,
excluding ``_links`` and ``_embedded``. ``properties``
should be treated as read-only.
- ``links``: ``dict`` containing the document's links, excluding
``curies``. Each link relationship type is mapped to a
``Link`` instance or a list of ``Link`` instances. ``links``
should be treated as read-only.
- ``embedded``: dictionary containing the document's embedded resources.
Each link relationship type is mapped to a ``Document``
instance.
- ``rels``: a ``Relationships`` instance holding a merged view of the
relationships from the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec to
which the document should conform. Defaults to
``drafts.AUTO``.
"""
def __init__(self, o, base_uri, parent_curies=None, draft=AUTO):
self.prepare_cache()
self.o = o
self.base_uri = base_uri
self.parent_curies = parent_curies
self.draft = draft.detect(o)
RESERVED_ATTRIBUTE_NAMES = (LINKS_KEY, EMBEDDED_KEY)
def properties_cache(self):
properties = dict(self.o)
for name in self.RESERVED_ATTRIBUTE_NAMES:
properties[name] = None
del properties[name]
return properties
def links_cache(self):
links = {}
links_json = self.o.get(LINKS_KEY, {})
for key, value in links_json.items():
if key == self.draft.curies_rel:
continue
links[key] = link.Link.from_object(value, self.base_uri)
return CanonicalRels(links, self.curies, self.base_uri)
def curies_cache(self):
result = curie.CurieCollection()
if self.parent_curies is not None:
result.update(self.parent_curies)
links_json = self.o.get('_links', {})
curies_json = links_json.get(self.draft.curies_rel)
if not curies_json:
return result
curies = link.Link.from_object(curies_json, self.base_uri)
if not isinstance(curies, list):
curies = [curies]
for curie_link in curies:
result[curie_link.name] = curie_link
return result
def embedded_cache(self):
embedded = {}
for key, value in self.o.get(EMBEDDED_KEY, {}).items():
embedded[key] = self.from_object(value,
self.base_uri,
self.curies)
return CanonicalRels(embedded, self.curies, self.base_uri)
def rels_cache(self):
return Relationships(self.links, self.embedded, self.curies,
self.base_uri)
def prepare_cache(self):
self._properties_cache = None
self._curies_cache = None
self._links_cache = None
self._embedded_cache = None
self._rels_cache = None
@property
def properties(self):
if self._properties_cache is None:
self._properties_cache = self.properties_cache()
return self._properties_cache
@property
def curies(self):
if self._curies_cache is None:
self._curies_cache = self.curies_cache()
return self._curies_cache
@property
def links(self):
if self._links_cache is None:
self._links_cache = self.links_cache()
return self._links_cache
@property
def embedded(self):
if self._embedded_cache is None:
self._embedded_cache = self.embedded_cache()
return self._embedded_cache
@property
def rels(self):
if self._rels_cache is None:
self._rels_cache = self.rels_cache()
return self._rels_cache
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method returns the ``href`` of the document's ``self`` link if it
has one, or ``None`` if the document lacks a ``self`` link, or the
``href`` of the document's first ``self`` link if it has more than one.
"""
if not 'self' in self.links:
return None
self_link = self.links['self']
if isinstance(self_link, list):
for link in self_link:
return link.url()
return self_link.url()
def expand_curie(self, link):
"""Returns the expansion of a CURIE value.
Arguments:
- ``link``: a string holding a curie value to expand.
This method attempts to expand ``link`` using the document's ``curies``
collection (see ``curie.CurieCollection.expand``).
"""
return self.curies.expand(link)
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the resource."""
return self.links['self']
@mutator('_properties_cache')
def set_property(self, key, value):
"""Set a property on the document.
Calling code should use this method to add and modify properties
on the document instead of modifying ``properties`` directly.
If ``key`` is ``"_links"`` or ``"_embedded"`` this method will silently
fail.
If there is no property with the name in ``key``, a new property is
created with the name from ``key`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
return
self.o[key] = value
@mutator('_properties_cache')
def delete_property(self, key):
"""Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key]
def link(self, href, **kwargs):
"""Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri)
@mutator('_links_cache')
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
Calling code should use this method to add links instead of
modifying ``links`` directly.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``Link`` object, it is added to the document and the
keyword arguments are ignored.
If ``target`` is a ``Document`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if hasattr(target, 'as_link'):
link = target.as_link()
else:
link = self.link(target, **kwargs)
links = self.o.setdefault(LINKS_KEY, {})
new_link = link.as_object()
collected_links = CanonicalRels(links, self.curies, self.base_uri)
if rel not in collected_links:
if wrap:
links[rel] = [new_link]
else:
links[rel] = new_link
return
original_rel = collected_links.original_key(rel)
current_links = links[original_rel]
if isinstance(current_links, list):
current_links.append(new_link)
else:
links[original_rel] = [current_links, new_link]
@mutator('_links_cache')
def delete_link(self, rel=None, href=lambda _: True):
"""Deletes links from the document.
Calling code should use this method to remove links instead of
modyfying ``links`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
links that will be deleted. If neither of the optional arguments are
given, this method deletes every link in the document. If ``rel`` is
given, only links for the matching link relationship type are deleted.
If ``href`` is given, only links with a matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only links with matching
``href`` for the matching link relationship type are delted.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the links to be deleted.
- ``href``: optionally, a string specifying the ``href`` of the links
to be deleted, or a callable that returns true when its
single argument is in the set of ``href``s to be deleted.
"""
if not LINKS_KEY in self.o:
return
links = self.o[LINKS_KEY]
if rel is None:
for rel in list(links.keys()):
self.delete_link(rel, href)
return
if callable(href):
href_filter = href
else:
href_filter = lambda x: x == href
links_for_rel = links.setdefault(rel, [])
if isinstance(links_for_rel, dict):
links_for_rel = [links_for_rel]
new_links_for_rel = []
for link in links_for_rel:
if not href_filter(link['href']):
new_links_for_rel.append(link)
if new_links_for_rel:
if len(new_links_for_rel) == 1:
new_links_for_rel = new_links_for_rel[0]
links[rel] = new_links_for_rel
else:
del links[rel]
if not self.o[LINKS_KEY]:
del self.o[LINKS_KEY]
@classmethod
def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):
"""Returns a new ``Document`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Document``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``parent_curies``: optional ``CurieCollection`` instance holding the
CURIEs of the parent document in which the new
document is to be embedded. Calling code should
not normally provide this argument.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
if isinstance(o, list):
return [cls.from_object(x, base_uri, parent_curies, draft)
for x in o]
return cls(o, base_uri, parent_curies, draft)
@classmethod
def empty(cls, base_uri=None, draft=AUTO):
"""Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
return cls.from_object({}, base_uri=base_uri, draft=draft)
@mutator('_embedded_cache')
def embed(self, rel, other, wrap=False):
"""Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
"""
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap)
@mutator('_embedded_cache')
@mutator('_curies_cache')
def set_curie(self, name, href):
"""Sets a CURIE.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
"""
self.draft.set_curie(self, name, href)
@mutator('_curies_cache')
def drop_curie(self, name):
"""Removes a CURIE.
The CURIE link with the given name is removed from the document.
"""
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies['name'] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie['name'] == name:
del curies[i]
break
continue
def __iter__(self):
yield self
def __eq__(self, other):
if not isinstance(other, Document):
return False
return self.as_object() == other.as_object()
def __repr__(self):
return "<Document %r>" % self.url()
|
wharris/dougrain | dougrain/document.py | Document.drop_curie | python | def drop_curie(self, name):
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies['name'] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie['name'] == name:
del curies[i]
break
continue | Removes a CURIE.
The CURIE link with the given name is removed from the document. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L745-L761 | null | class Document(object):
"""Represents the document for a HAL resource.
Constructors:
- ``Document.empty(base_uri=None)``:
returns an empty ``Document``.
- ``Document.from_object(o, base_uri=None, parent_curies=None)``:
returns a new ``Document`` based on a JSON object.
Public Instance Attributes:
- ``properties``: ``dict`` containing the properties of the HAL document,
excluding ``_links`` and ``_embedded``. ``properties``
should be treated as read-only.
- ``links``: ``dict`` containing the document's links, excluding
``curies``. Each link relationship type is mapped to a
``Link`` instance or a list of ``Link`` instances. ``links``
should be treated as read-only.
- ``embedded``: dictionary containing the document's embedded resources.
Each link relationship type is mapped to a ``Document``
instance.
- ``rels``: a ``Relationships`` instance holding a merged view of the
relationships from the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec to
which the document should conform. Defaults to
``drafts.AUTO``.
"""
def __init__(self, o, base_uri, parent_curies=None, draft=AUTO):
self.prepare_cache()
self.o = o
self.base_uri = base_uri
self.parent_curies = parent_curies
self.draft = draft.detect(o)
RESERVED_ATTRIBUTE_NAMES = (LINKS_KEY, EMBEDDED_KEY)
def properties_cache(self):
properties = dict(self.o)
for name in self.RESERVED_ATTRIBUTE_NAMES:
properties[name] = None
del properties[name]
return properties
def links_cache(self):
links = {}
links_json = self.o.get(LINKS_KEY, {})
for key, value in links_json.items():
if key == self.draft.curies_rel:
continue
links[key] = link.Link.from_object(value, self.base_uri)
return CanonicalRels(links, self.curies, self.base_uri)
def curies_cache(self):
result = curie.CurieCollection()
if self.parent_curies is not None:
result.update(self.parent_curies)
links_json = self.o.get('_links', {})
curies_json = links_json.get(self.draft.curies_rel)
if not curies_json:
return result
curies = link.Link.from_object(curies_json, self.base_uri)
if not isinstance(curies, list):
curies = [curies]
for curie_link in curies:
result[curie_link.name] = curie_link
return result
def embedded_cache(self):
embedded = {}
for key, value in self.o.get(EMBEDDED_KEY, {}).items():
embedded[key] = self.from_object(value,
self.base_uri,
self.curies)
return CanonicalRels(embedded, self.curies, self.base_uri)
def rels_cache(self):
return Relationships(self.links, self.embedded, self.curies,
self.base_uri)
def prepare_cache(self):
self._properties_cache = None
self._curies_cache = None
self._links_cache = None
self._embedded_cache = None
self._rels_cache = None
@property
def properties(self):
if self._properties_cache is None:
self._properties_cache = self.properties_cache()
return self._properties_cache
@property
def curies(self):
if self._curies_cache is None:
self._curies_cache = self.curies_cache()
return self._curies_cache
@property
def links(self):
if self._links_cache is None:
self._links_cache = self.links_cache()
return self._links_cache
@property
def embedded(self):
if self._embedded_cache is None:
self._embedded_cache = self.embedded_cache()
return self._embedded_cache
@property
def rels(self):
if self._rels_cache is None:
self._rels_cache = self.rels_cache()
return self._rels_cache
def url(self):
"""Returns the URL for the resource based on the ``self`` link.
This method returns the ``href`` of the document's ``self`` link if it
has one, or ``None`` if the document lacks a ``self`` link, or the
``href`` of the document's first ``self`` link if it has more than one.
"""
if not 'self' in self.links:
return None
self_link = self.links['self']
if isinstance(self_link, list):
for link in self_link:
return link.url()
return self_link.url()
def expand_curie(self, link):
"""Returns the expansion of a CURIE value.
Arguments:
- ``link``: a string holding a curie value to expand.
This method attempts to expand ``link`` using the document's ``curies``
collection (see ``curie.CurieCollection.expand``).
"""
return self.curies.expand(link)
def as_object(self):
"""Returns a dictionary representing the HAL JSON document."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the resource."""
return self.links['self']
@mutator('_properties_cache')
def set_property(self, key, value):
"""Set a property on the document.
Calling code should use this method to add and modify properties
on the document instead of modifying ``properties`` directly.
If ``key`` is ``"_links"`` or ``"_embedded"`` this method will silently
fail.
If there is no property with the name in ``key``, a new property is
created with the name from ``key`` and the value from ``value``. If
the document already has a property with that name, it's value
is replaced with the value in ``value``.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
return
self.o[key] = value
@mutator('_properties_cache')
def delete_property(self, key):
"""Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key]
def link(self, href, **kwargs):
"""Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri)
@mutator('_links_cache')
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
Calling code should use this method to add links instead of
modifying ``links`` directly.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``Link`` object, it is added to the document and the
keyword arguments are ignored.
If ``target`` is a ``Document`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
If ``target`` is a ``Builder`` object, ``target``'s ``self`` link is
added to this document and the keyword arguments are ignored.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if hasattr(target, 'as_link'):
link = target.as_link()
else:
link = self.link(target, **kwargs)
links = self.o.setdefault(LINKS_KEY, {})
new_link = link.as_object()
collected_links = CanonicalRels(links, self.curies, self.base_uri)
if rel not in collected_links:
if wrap:
links[rel] = [new_link]
else:
links[rel] = new_link
return
original_rel = collected_links.original_key(rel)
current_links = links[original_rel]
if isinstance(current_links, list):
current_links.append(new_link)
else:
links[original_rel] = [current_links, new_link]
@mutator('_links_cache')
def delete_link(self, rel=None, href=lambda _: True):
"""Deletes links from the document.
Calling code should use this method to remove links instead of
modyfying ``links`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
links that will be deleted. If neither of the optional arguments are
given, this method deletes every link in the document. If ``rel`` is
given, only links for the matching link relationship type are deleted.
If ``href`` is given, only links with a matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only links with matching
``href`` for the matching link relationship type are delted.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the links to be deleted.
- ``href``: optionally, a string specifying the ``href`` of the links
to be deleted, or a callable that returns true when its
single argument is in the set of ``href``s to be deleted.
"""
if not LINKS_KEY in self.o:
return
links = self.o[LINKS_KEY]
if rel is None:
for rel in list(links.keys()):
self.delete_link(rel, href)
return
if callable(href):
href_filter = href
else:
href_filter = lambda x: x == href
links_for_rel = links.setdefault(rel, [])
if isinstance(links_for_rel, dict):
links_for_rel = [links_for_rel]
new_links_for_rel = []
for link in links_for_rel:
if not href_filter(link['href']):
new_links_for_rel.append(link)
if new_links_for_rel:
if len(new_links_for_rel) == 1:
new_links_for_rel = new_links_for_rel[0]
links[rel] = new_links_for_rel
else:
del links[rel]
if not self.o[LINKS_KEY]:
del self.o[LINKS_KEY]
@classmethod
def from_object(cls, o, base_uri=None, parent_curies=None, draft=AUTO):
"""Returns a new ``Document`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Document``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``parent_curies``: optional ``CurieCollection`` instance holding the
CURIEs of the parent document in which the new
document is to be embedded. Calling code should
not normally provide this argument.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
if isinstance(o, list):
return [cls.from_object(x, base_uri, parent_curies, draft)
for x in o]
return cls(o, base_uri, parent_curies, draft)
@classmethod
def empty(cls, base_uri=None, draft=AUTO):
"""Returns an empty ``Document``.
Arguments:
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the document.
- ``draft``: a ``Draft`` instance that selects the version of the spec
to which the document should conform. Defaults to
``drafts.AUTO``.
"""
return cls.from_object({}, base_uri=base_uri, draft=draft)
@mutator('_embedded_cache')
def embed(self, rel, other, wrap=False):
"""Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
"""
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap)
@mutator('_embedded_cache')
def delete_embedded(self, rel=None, href=lambda _: True):
"""Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
"""
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
@mutator('_curies_cache')
def set_curie(self, name, href):
"""Sets a CURIE.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
"""
self.draft.set_curie(self, name, href)
@mutator('_curies_cache')
def __iter__(self):
yield self
def __eq__(self, other):
if not isinstance(other, Document):
return False
return self.as_object() == other.as_object()
def __repr__(self):
return "<Document %r>" % self.url()
|
wharris/dougrain | dougrain/drafts.py | DraftIdentifier.detect | python | def detect(self, obj):
links = obj.get(LINKS_KEY, {})
for detector in [LATEST, DRAFT_3]:
if detector.draft.curies_rel in links:
return detector.detect(obj)
return LATEST.detect(obj) | Identifies the HAL draft level of a given JSON object. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/drafts.py#L87-L95 | [
"def detect(self, obj):\n \"\"\"Identify the HAL draft level of obj as this instance's draft.\"\"\"\n return self.draft\n"
] | class DraftIdentifier(object):
"""Identifies HAL draft level of a JSON document.
When created with an existing JSON object, the document guesses the
draft version based on the presence of a link with a relation type of
'curie' or 'curies'.
"""
def __repr__(self):
return "%s()" % self.__class__.__name__
|
wharris/dougrain | dougrain/link.py | extract_variables | python | def extract_variables(href):
patterns = [re.sub(r'\*|:\d+', '', pattern)
for pattern in re.findall(r'{[\+#\./;\?&]?([^}]+)*}', href)]
variables = []
for pattern in patterns:
for part in pattern.split(","):
if not part in variables:
variables.append(part)
return variables | Return a list of variable names used in a URI template. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/link.py#L13-L24 | null | # Copyright (c) 2013 Will Harris
# See the file license.txt for copying permission.
import re
import uritemplate
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
class Link(object):
"""Representation of a HAL link from a ``Document``.
Constructors:
- ``Link.from_object(o, base_uri=None)``:
returns a new ``Link`` based on a JSON object.
Public Instance Attributes:
- ``href``: ``str`` containing the href of the link.
- ``name``: ``str`` containing the name of the link. Absent if the link
has no name.
- ``title``: ``str`` containing the title of the link. Absent if the link
has no title.
- ``type``: ``str`` containing the type of the link. Absent if the link
does not specify a type.
- ``profile``: ``str`` containing the profile URL reference of the link.
Absent if the link does not specify a profile.
- ``hreflang``: ``str`` indicating the language of the target. Absent if
the link does not specify a language.
- ``deprecation``: ``str`` indicating that the link is deprecated. The
value of the string should be a URL that provides
further infomation about the deprecation. Absent if the
link is not marked as deprecated.
- ``variables``: ``list`` of names of template variables that may be
expanded for templated links. Empty if there are no
template variables.
"""
def __init__(self, json_object, base_uri):
self.o = json_object
self.href = json_object['href']
if 'name' in json_object:
self.name = json_object['name']
if 'title' in json_object:
self.title = json_object['title']
if 'type' in json_object:
self.type = json_object['type']
if 'profile' in json_object:
self.profile = json_object['profile']
if 'hreflang' in json_object:
self.hreflang = json_object['hreflang']
if 'deprecation' in json_object:
self.deprecation = json_object['deprecation']
self.is_templated = self.o.get('templated', False) is True
if self.is_templated:
self.variables = extract_variables(self.href)
else:
self.variables = []
if base_uri is None:
self.template = self.href
else:
self.template = urlparse.urljoin(base_uri, self.href)
def url(self, **kwargs):
"""Returns a URL for the link with optional template expansion.
If the link is marked as templated, the href will be expanded according
to RFC6570, using template variables provided in the keyword arguments.
If the href is a valid URI Template, but the link is not marked as
templated, the href will not be expanded even if template variables are
provided.
"""
if self.is_templated:
return uritemplate.expand(self.template, kwargs)
else:
return self.template
def as_object(self):
"""Returns a dictionary representing the HAL JSON link."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the same resource as this link.
This method is trivial, but is provided for symmetry with ``Document``.
"""
return self
@classmethod
def from_object(cls, o, base_uri):
"""Returns a new ``Link`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Link``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the link.
"""
if isinstance(o, list):
if len(o) == 1:
return cls.from_object(o[0], base_uri)
return [cls.from_object(x, base_uri) for x in o]
return cls(o, base_uri)
def __iter__(self):
yield self
def __repr__(self):
if hasattr(self, 'name'):
return "<Link %s=%r>" % (self.name, self.template)
else:
return "<Link %r>" % self.template
def __eq__(self, other):
return (isinstance(other, Link) and
self.as_object() == other.as_object())
|
wharris/dougrain | dougrain/link.py | Link.url | python | def url(self, **kwargs):
if self.is_templated:
return uritemplate.expand(self.template, kwargs)
else:
return self.template | Returns a URL for the link with optional template expansion.
If the link is marked as templated, the href will be expanded according
to RFC6570, using template variables provided in the keyword arguments.
If the href is a valid URI Template, but the link is not marked as
templated, the href will not be expanded even if template variables are
provided. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/link.py#L91-L104 | null | class Link(object):
"""Representation of a HAL link from a ``Document``.
Constructors:
- ``Link.from_object(o, base_uri=None)``:
returns a new ``Link`` based on a JSON object.
Public Instance Attributes:
- ``href``: ``str`` containing the href of the link.
- ``name``: ``str`` containing the name of the link. Absent if the link
has no name.
- ``title``: ``str`` containing the title of the link. Absent if the link
has no title.
- ``type``: ``str`` containing the type of the link. Absent if the link
does not specify a type.
- ``profile``: ``str`` containing the profile URL reference of the link.
Absent if the link does not specify a profile.
- ``hreflang``: ``str`` indicating the language of the target. Absent if
the link does not specify a language.
- ``deprecation``: ``str`` indicating that the link is deprecated. The
value of the string should be a URL that provides
further infomation about the deprecation. Absent if the
link is not marked as deprecated.
- ``variables``: ``list`` of names of template variables that may be
expanded for templated links. Empty if there are no
template variables.
"""
def __init__(self, json_object, base_uri):
self.o = json_object
self.href = json_object['href']
if 'name' in json_object:
self.name = json_object['name']
if 'title' in json_object:
self.title = json_object['title']
if 'type' in json_object:
self.type = json_object['type']
if 'profile' in json_object:
self.profile = json_object['profile']
if 'hreflang' in json_object:
self.hreflang = json_object['hreflang']
if 'deprecation' in json_object:
self.deprecation = json_object['deprecation']
self.is_templated = self.o.get('templated', False) is True
if self.is_templated:
self.variables = extract_variables(self.href)
else:
self.variables = []
if base_uri is None:
self.template = self.href
else:
self.template = urlparse.urljoin(base_uri, self.href)
def as_object(self):
"""Returns a dictionary representing the HAL JSON link."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the same resource as this link.
This method is trivial, but is provided for symmetry with ``Document``.
"""
return self
@classmethod
def from_object(cls, o, base_uri):
"""Returns a new ``Link`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Link``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the link.
"""
if isinstance(o, list):
if len(o) == 1:
return cls.from_object(o[0], base_uri)
return [cls.from_object(x, base_uri) for x in o]
return cls(o, base_uri)
def __iter__(self):
yield self
def __repr__(self):
if hasattr(self, 'name'):
return "<Link %s=%r>" % (self.name, self.template)
else:
return "<Link %r>" % self.template
def __eq__(self, other):
return (isinstance(other, Link) and
self.as_object() == other.as_object())
|
wharris/dougrain | dougrain/link.py | Link.from_object | python | def from_object(cls, o, base_uri):
if isinstance(o, list):
if len(o) == 1:
return cls.from_object(o[0], base_uri)
return [cls.from_object(x, base_uri) for x in o]
return cls(o, base_uri) | Returns a new ``Link`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Link``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the link. | train | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/link.py#L119-L136 | [
"def from_object(cls, o, base_uri):\n \"\"\"Returns a new ``Link`` based on a JSON object or array.\n\n Arguments:\n\n - ``o``: a dictionary holding the deserializated JSON for the new\n ``Link``, or a ``list`` of such documents.\n - ``base_uri``: optional URL used as the basis when expanding\n relative URLs in the link.\n\n \"\"\"\n if isinstance(o, list):\n if len(o) == 1:\n return cls.from_object(o[0], base_uri)\n\n return [cls.from_object(x, base_uri) for x in o]\n\n return cls(o, base_uri)\n"
] | class Link(object):
"""Representation of a HAL link from a ``Document``.
Constructors:
- ``Link.from_object(o, base_uri=None)``:
returns a new ``Link`` based on a JSON object.
Public Instance Attributes:
- ``href``: ``str`` containing the href of the link.
- ``name``: ``str`` containing the name of the link. Absent if the link
has no name.
- ``title``: ``str`` containing the title of the link. Absent if the link
has no title.
- ``type``: ``str`` containing the type of the link. Absent if the link
does not specify a type.
- ``profile``: ``str`` containing the profile URL reference of the link.
Absent if the link does not specify a profile.
- ``hreflang``: ``str`` indicating the language of the target. Absent if
the link does not specify a language.
- ``deprecation``: ``str`` indicating that the link is deprecated. The
value of the string should be a URL that provides
further infomation about the deprecation. Absent if the
link is not marked as deprecated.
- ``variables``: ``list`` of names of template variables that may be
expanded for templated links. Empty if there are no
template variables.
"""
def __init__(self, json_object, base_uri):
self.o = json_object
self.href = json_object['href']
if 'name' in json_object:
self.name = json_object['name']
if 'title' in json_object:
self.title = json_object['title']
if 'type' in json_object:
self.type = json_object['type']
if 'profile' in json_object:
self.profile = json_object['profile']
if 'hreflang' in json_object:
self.hreflang = json_object['hreflang']
if 'deprecation' in json_object:
self.deprecation = json_object['deprecation']
self.is_templated = self.o.get('templated', False) is True
if self.is_templated:
self.variables = extract_variables(self.href)
else:
self.variables = []
if base_uri is None:
self.template = self.href
else:
self.template = urlparse.urljoin(base_uri, self.href)
def url(self, **kwargs):
"""Returns a URL for the link with optional template expansion.
If the link is marked as templated, the href will be expanded according
to RFC6570, using template variables provided in the keyword arguments.
If the href is a valid URI Template, but the link is not marked as
templated, the href will not be expanded even if template variables are
provided.
"""
if self.is_templated:
return uritemplate.expand(self.template, kwargs)
else:
return self.template
def as_object(self):
"""Returns a dictionary representing the HAL JSON link."""
return self.o
def as_link(self):
"""Returns a ``Link`` to the same resource as this link.
This method is trivial, but is provided for symmetry with ``Document``.
"""
return self
@classmethod
def __iter__(self):
yield self
def __repr__(self):
if hasattr(self, 'name'):
return "<Link %s=%r>" % (self.name, self.template)
else:
return "<Link %r>" % self.template
def __eq__(self, other):
return (isinstance(other, Link) and
self.as_object() == other.as_object())
|
abhishek-ram/pyas2-lib | pyas2lib/utils.py | quote_as2name | python | def quote_as2name(unquoted_name):
if re.search(r'[\\" ]', unquoted_name, re.M):
return '"' + email.utils.quote(unquoted_name) + '"'
else:
return unquoted_name | Function converts as2 name from unquoted to quoted format
:param unquoted_name: the as2 name in unquoted format
:return: the as2 name in unquoted format | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/utils.py#L22-L32 | null | from __future__ import absolute_import, unicode_literals
from .compat import BytesIO, BytesGenerator, is_py2, _ver
from .exceptions import AS2Exception
from OpenSSL import crypto
from asn1crypto import pem
import email
import re
import sys
import random
def unquote_as2name(quoted_name):
"""
Function converts as2 name from quoted to unquoted format
:param quoted_name: the as2 name in quoted format
:return: the as2 name in unquoted format
"""
return email.utils.unquote(quoted_name)
def mime_to_bytes(msg, header_len):
"""
Function to convert and email Message to flat string format
:param msg: email.Message to be converted to string
:param header_len: the msx length of the header per line
:return: the byte string representation of the email message
"""
fp = BytesIO()
g = BytesGenerator(fp, maxheaderlen=header_len)
g.flatten(msg)
return fp.getvalue()
def canonicalize(message):
"""
Function to convert an email Message to standard format string
:param message: email.Message to be converted to standard string
:return: the standard representation of the email message in bytes
"""
if message.is_multipart() \
or message.get('Content-Transfer-Encoding') != 'binary':
return mime_to_bytes(message, 0).replace(
b'\r\n', b'\n').replace(b'\r', b'\n').replace(b'\n', b'\r\n')
else:
message_header = ''
message_body = message.get_payload(decode=True)
for k, v in message.items():
message_header += '{}: {}\r\n'.format(k, v)
message_header += '\r\n'
return message_header.encode('utf-8') + message_body
def make_mime_boundary(text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
width = len(repr(sys.maxsize - 1))
fmt = '%%0%dd' % width
token = random.randrange(sys.maxsize)
boundary = ('=' * 15) + (fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
def extract_first_part(message, boundary):
""" Function to extract the first part of a multipart message"""
first_message = message.split(boundary)[1].lstrip()
if first_message.endswith(b'\r\n'):
first_message = first_message[:-2]
else:
first_message = first_message[:-1]
return first_message
def pem_to_der(cert, return_multiple=True):
""" Converts a given certificate or list to PEM format"""
# initialize the certificate array
cert_list = []
# If certificate is in DER then un-armour it
if pem.detect(cert):
for _, _, der_bytes in pem.unarmor(cert, multiple=True):
cert_list.append(der_bytes)
else:
cert_list.append(cert)
# return multiple if return_multiple is set else first element
if return_multiple:
return cert_list
else:
return cert_list.pop()
def split_pem(pem_bytes):
"""
Split a give PEM file with multiple certificates
:param pem_bytes: The pem data in bytes with multiple certs
:return: yields a list of certificates contained in the pem file
"""
started, pem_data = False, b''
for line in pem_bytes.splitlines(False):
if line == b'' and not started:
continue
if line[0:5] in (b'-----', b'---- '):
if not started:
started = True
else:
pem_data = pem_data + line + b'\r\n'
yield pem_data
started = False
pem_data = b''
if started:
pem_data = pem_data + line + b'\r\n'
def verify_certificate_chain(cert_str, trusted_certs, ignore_self_signed=True):
""" Verify a given certificate against a trust store"""
# Load the certificate
certificate = crypto.load_certificate(crypto.FILETYPE_ASN1, cert_str)
# Create a certificate store and add your trusted certs
try:
store = crypto.X509Store()
if ignore_self_signed:
store.add_cert(certificate)
# Assuming the certificates are in PEM format in a trusted_certs list
for _cert in trusted_certs:
store.add_cert(
crypto.load_certificate(crypto.FILETYPE_ASN1, _cert))
# Create a certificate context using the store and the certificate
store_ctx = crypto.X509StoreContext(store, certificate)
# Verify the certificate, returns None if certificate is not valid
store_ctx.verify_certificate()
return True
except crypto.X509StoreContextError as e:
raise AS2Exception('Partner Certificate Invalid: %s' % e.args[-1][-1])
|
abhishek-ram/pyas2-lib | pyas2lib/utils.py | mime_to_bytes | python | def mime_to_bytes(msg, header_len):
fp = BytesIO()
g = BytesGenerator(fp, maxheaderlen=header_len)
g.flatten(msg)
return fp.getvalue() | Function to convert and email Message to flat string format
:param msg: email.Message to be converted to string
:param header_len: the msx length of the header per line
:return: the byte string representation of the email message | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/utils.py#L35-L45 | null | from __future__ import absolute_import, unicode_literals
from .compat import BytesIO, BytesGenerator, is_py2, _ver
from .exceptions import AS2Exception
from OpenSSL import crypto
from asn1crypto import pem
import email
import re
import sys
import random
def unquote_as2name(quoted_name):
"""
Function converts as2 name from quoted to unquoted format
:param quoted_name: the as2 name in quoted format
:return: the as2 name in unquoted format
"""
return email.utils.unquote(quoted_name)
def quote_as2name(unquoted_name):
"""
Function converts as2 name from unquoted to quoted format
:param unquoted_name: the as2 name in unquoted format
:return: the as2 name in unquoted format
"""
if re.search(r'[\\" ]', unquoted_name, re.M):
return '"' + email.utils.quote(unquoted_name) + '"'
else:
return unquoted_name
def canonicalize(message):
"""
Function to convert an email Message to standard format string
:param message: email.Message to be converted to standard string
:return: the standard representation of the email message in bytes
"""
if message.is_multipart() \
or message.get('Content-Transfer-Encoding') != 'binary':
return mime_to_bytes(message, 0).replace(
b'\r\n', b'\n').replace(b'\r', b'\n').replace(b'\n', b'\r\n')
else:
message_header = ''
message_body = message.get_payload(decode=True)
for k, v in message.items():
message_header += '{}: {}\r\n'.format(k, v)
message_header += '\r\n'
return message_header.encode('utf-8') + message_body
def make_mime_boundary(text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
width = len(repr(sys.maxsize - 1))
fmt = '%%0%dd' % width
token = random.randrange(sys.maxsize)
boundary = ('=' * 15) + (fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
def extract_first_part(message, boundary):
""" Function to extract the first part of a multipart message"""
first_message = message.split(boundary)[1].lstrip()
if first_message.endswith(b'\r\n'):
first_message = first_message[:-2]
else:
first_message = first_message[:-1]
return first_message
def pem_to_der(cert, return_multiple=True):
""" Converts a given certificate or list to PEM format"""
# initialize the certificate array
cert_list = []
# If certificate is in DER then un-armour it
if pem.detect(cert):
for _, _, der_bytes in pem.unarmor(cert, multiple=True):
cert_list.append(der_bytes)
else:
cert_list.append(cert)
# return multiple if return_multiple is set else first element
if return_multiple:
return cert_list
else:
return cert_list.pop()
def split_pem(pem_bytes):
"""
Split a give PEM file with multiple certificates
:param pem_bytes: The pem data in bytes with multiple certs
:return: yields a list of certificates contained in the pem file
"""
started, pem_data = False, b''
for line in pem_bytes.splitlines(False):
if line == b'' and not started:
continue
if line[0:5] in (b'-----', b'---- '):
if not started:
started = True
else:
pem_data = pem_data + line + b'\r\n'
yield pem_data
started = False
pem_data = b''
if started:
pem_data = pem_data + line + b'\r\n'
def verify_certificate_chain(cert_str, trusted_certs, ignore_self_signed=True):
""" Verify a given certificate against a trust store"""
# Load the certificate
certificate = crypto.load_certificate(crypto.FILETYPE_ASN1, cert_str)
# Create a certificate store and add your trusted certs
try:
store = crypto.X509Store()
if ignore_self_signed:
store.add_cert(certificate)
# Assuming the certificates are in PEM format in a trusted_certs list
for _cert in trusted_certs:
store.add_cert(
crypto.load_certificate(crypto.FILETYPE_ASN1, _cert))
# Create a certificate context using the store and the certificate
store_ctx = crypto.X509StoreContext(store, certificate)
# Verify the certificate, returns None if certificate is not valid
store_ctx.verify_certificate()
return True
except crypto.X509StoreContextError as e:
raise AS2Exception('Partner Certificate Invalid: %s' % e.args[-1][-1])
|
abhishek-ram/pyas2-lib | pyas2lib/utils.py | canonicalize | python | def canonicalize(message):
if message.is_multipart() \
or message.get('Content-Transfer-Encoding') != 'binary':
return mime_to_bytes(message, 0).replace(
b'\r\n', b'\n').replace(b'\r', b'\n').replace(b'\n', b'\r\n')
else:
message_header = ''
message_body = message.get_payload(decode=True)
for k, v in message.items():
message_header += '{}: {}\r\n'.format(k, v)
message_header += '\r\n'
return message_header.encode('utf-8') + message_body | Function to convert an email Message to standard format string
:param message: email.Message to be converted to standard string
:return: the standard representation of the email message in bytes | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/utils.py#L48-L67 | [
"def mime_to_bytes(msg, header_len):\n \"\"\"\n Function to convert and email Message to flat string format\n :param msg: email.Message to be converted to string\n :param header_len: the msx length of the header per line\n :return: the byte string representation of the email message\n \"\"\"\n fp = BytesIO()\n g = BytesGenerator(fp, maxheaderlen=header_len)\n g.flatten(msg)\n return fp.getvalue()\n"
] | from __future__ import absolute_import, unicode_literals
from .compat import BytesIO, BytesGenerator, is_py2, _ver
from .exceptions import AS2Exception
from OpenSSL import crypto
from asn1crypto import pem
import email
import re
import sys
import random
def unquote_as2name(quoted_name):
"""
Function converts as2 name from quoted to unquoted format
:param quoted_name: the as2 name in quoted format
:return: the as2 name in unquoted format
"""
return email.utils.unquote(quoted_name)
def quote_as2name(unquoted_name):
"""
Function converts as2 name from unquoted to quoted format
:param unquoted_name: the as2 name in unquoted format
:return: the as2 name in unquoted format
"""
if re.search(r'[\\" ]', unquoted_name, re.M):
return '"' + email.utils.quote(unquoted_name) + '"'
else:
return unquoted_name
def mime_to_bytes(msg, header_len):
"""
Function to convert and email Message to flat string format
:param msg: email.Message to be converted to string
:param header_len: the msx length of the header per line
:return: the byte string representation of the email message
"""
fp = BytesIO()
g = BytesGenerator(fp, maxheaderlen=header_len)
g.flatten(msg)
return fp.getvalue()
def make_mime_boundary(text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
width = len(repr(sys.maxsize - 1))
fmt = '%%0%dd' % width
token = random.randrange(sys.maxsize)
boundary = ('=' * 15) + (fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
def extract_first_part(message, boundary):
""" Function to extract the first part of a multipart message"""
first_message = message.split(boundary)[1].lstrip()
if first_message.endswith(b'\r\n'):
first_message = first_message[:-2]
else:
first_message = first_message[:-1]
return first_message
def pem_to_der(cert, return_multiple=True):
""" Converts a given certificate or list to PEM format"""
# initialize the certificate array
cert_list = []
# If certificate is in DER then un-armour it
if pem.detect(cert):
for _, _, der_bytes in pem.unarmor(cert, multiple=True):
cert_list.append(der_bytes)
else:
cert_list.append(cert)
# return multiple if return_multiple is set else first element
if return_multiple:
return cert_list
else:
return cert_list.pop()
def split_pem(pem_bytes):
"""
Split a give PEM file with multiple certificates
:param pem_bytes: The pem data in bytes with multiple certs
:return: yields a list of certificates contained in the pem file
"""
started, pem_data = False, b''
for line in pem_bytes.splitlines(False):
if line == b'' and not started:
continue
if line[0:5] in (b'-----', b'---- '):
if not started:
started = True
else:
pem_data = pem_data + line + b'\r\n'
yield pem_data
started = False
pem_data = b''
if started:
pem_data = pem_data + line + b'\r\n'
def verify_certificate_chain(cert_str, trusted_certs, ignore_self_signed=True):
""" Verify a given certificate against a trust store"""
# Load the certificate
certificate = crypto.load_certificate(crypto.FILETYPE_ASN1, cert_str)
# Create a certificate store and add your trusted certs
try:
store = crypto.X509Store()
if ignore_self_signed:
store.add_cert(certificate)
# Assuming the certificates are in PEM format in a trusted_certs list
for _cert in trusted_certs:
store.add_cert(
crypto.load_certificate(crypto.FILETYPE_ASN1, _cert))
# Create a certificate context using the store and the certificate
store_ctx = crypto.X509StoreContext(store, certificate)
# Verify the certificate, returns None if certificate is not valid
store_ctx.verify_certificate()
return True
except crypto.X509StoreContextError as e:
raise AS2Exception('Partner Certificate Invalid: %s' % e.args[-1][-1])
|
abhishek-ram/pyas2-lib | pyas2lib/utils.py | extract_first_part | python | def extract_first_part(message, boundary):
first_message = message.split(boundary)[1].lstrip()
if first_message.endswith(b'\r\n'):
first_message = first_message[:-2]
else:
first_message = first_message[:-1]
return first_message | Function to extract the first part of a multipart message | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/utils.py#L92-L99 | null | from __future__ import absolute_import, unicode_literals
from .compat import BytesIO, BytesGenerator, is_py2, _ver
from .exceptions import AS2Exception
from OpenSSL import crypto
from asn1crypto import pem
import email
import re
import sys
import random
def unquote_as2name(quoted_name):
"""
Function converts as2 name from quoted to unquoted format
:param quoted_name: the as2 name in quoted format
:return: the as2 name in unquoted format
"""
return email.utils.unquote(quoted_name)
def quote_as2name(unquoted_name):
"""
Function converts as2 name from unquoted to quoted format
:param unquoted_name: the as2 name in unquoted format
:return: the as2 name in unquoted format
"""
if re.search(r'[\\" ]', unquoted_name, re.M):
return '"' + email.utils.quote(unquoted_name) + '"'
else:
return unquoted_name
def mime_to_bytes(msg, header_len):
"""
Function to convert and email Message to flat string format
:param msg: email.Message to be converted to string
:param header_len: the msx length of the header per line
:return: the byte string representation of the email message
"""
fp = BytesIO()
g = BytesGenerator(fp, maxheaderlen=header_len)
g.flatten(msg)
return fp.getvalue()
def canonicalize(message):
"""
Function to convert an email Message to standard format string
:param message: email.Message to be converted to standard string
:return: the standard representation of the email message in bytes
"""
if message.is_multipart() \
or message.get('Content-Transfer-Encoding') != 'binary':
return mime_to_bytes(message, 0).replace(
b'\r\n', b'\n').replace(b'\r', b'\n').replace(b'\n', b'\r\n')
else:
message_header = ''
message_body = message.get_payload(decode=True)
for k, v in message.items():
message_header += '{}: {}\r\n'.format(k, v)
message_header += '\r\n'
return message_header.encode('utf-8') + message_body
def make_mime_boundary(text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
width = len(repr(sys.maxsize - 1))
fmt = '%%0%dd' % width
token = random.randrange(sys.maxsize)
boundary = ('=' * 15) + (fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
def pem_to_der(cert, return_multiple=True):
""" Converts a given certificate or list to PEM format"""
# initialize the certificate array
cert_list = []
# If certificate is in DER then un-armour it
if pem.detect(cert):
for _, _, der_bytes in pem.unarmor(cert, multiple=True):
cert_list.append(der_bytes)
else:
cert_list.append(cert)
# return multiple if return_multiple is set else first element
if return_multiple:
return cert_list
else:
return cert_list.pop()
def split_pem(pem_bytes):
"""
Split a give PEM file with multiple certificates
:param pem_bytes: The pem data in bytes with multiple certs
:return: yields a list of certificates contained in the pem file
"""
started, pem_data = False, b''
for line in pem_bytes.splitlines(False):
if line == b'' and not started:
continue
if line[0:5] in (b'-----', b'---- '):
if not started:
started = True
else:
pem_data = pem_data + line + b'\r\n'
yield pem_data
started = False
pem_data = b''
if started:
pem_data = pem_data + line + b'\r\n'
def verify_certificate_chain(cert_str, trusted_certs, ignore_self_signed=True):
""" Verify a given certificate against a trust store"""
# Load the certificate
certificate = crypto.load_certificate(crypto.FILETYPE_ASN1, cert_str)
# Create a certificate store and add your trusted certs
try:
store = crypto.X509Store()
if ignore_self_signed:
store.add_cert(certificate)
# Assuming the certificates are in PEM format in a trusted_certs list
for _cert in trusted_certs:
store.add_cert(
crypto.load_certificate(crypto.FILETYPE_ASN1, _cert))
# Create a certificate context using the store and the certificate
store_ctx = crypto.X509StoreContext(store, certificate)
# Verify the certificate, returns None if certificate is not valid
store_ctx.verify_certificate()
return True
except crypto.X509StoreContextError as e:
raise AS2Exception('Partner Certificate Invalid: %s' % e.args[-1][-1])
|
abhishek-ram/pyas2-lib | pyas2lib/utils.py | pem_to_der | python | def pem_to_der(cert, return_multiple=True):
# initialize the certificate array
cert_list = []
# If certificate is in DER then un-armour it
if pem.detect(cert):
for _, _, der_bytes in pem.unarmor(cert, multiple=True):
cert_list.append(der_bytes)
else:
cert_list.append(cert)
# return multiple if return_multiple is set else first element
if return_multiple:
return cert_list
else:
return cert_list.pop() | Converts a given certificate or list to PEM format | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/utils.py#L102-L119 | null | from __future__ import absolute_import, unicode_literals
from .compat import BytesIO, BytesGenerator, is_py2, _ver
from .exceptions import AS2Exception
from OpenSSL import crypto
from asn1crypto import pem
import email
import re
import sys
import random
def unquote_as2name(quoted_name):
"""
Function converts as2 name from quoted to unquoted format
:param quoted_name: the as2 name in quoted format
:return: the as2 name in unquoted format
"""
return email.utils.unquote(quoted_name)
def quote_as2name(unquoted_name):
"""
Function converts as2 name from unquoted to quoted format
:param unquoted_name: the as2 name in unquoted format
:return: the as2 name in unquoted format
"""
if re.search(r'[\\" ]', unquoted_name, re.M):
return '"' + email.utils.quote(unquoted_name) + '"'
else:
return unquoted_name
def mime_to_bytes(msg, header_len):
"""
Function to convert and email Message to flat string format
:param msg: email.Message to be converted to string
:param header_len: the msx length of the header per line
:return: the byte string representation of the email message
"""
fp = BytesIO()
g = BytesGenerator(fp, maxheaderlen=header_len)
g.flatten(msg)
return fp.getvalue()
def canonicalize(message):
"""
Function to convert an email Message to standard format string
:param message: email.Message to be converted to standard string
:return: the standard representation of the email message in bytes
"""
if message.is_multipart() \
or message.get('Content-Transfer-Encoding') != 'binary':
return mime_to_bytes(message, 0).replace(
b'\r\n', b'\n').replace(b'\r', b'\n').replace(b'\n', b'\r\n')
else:
message_header = ''
message_body = message.get_payload(decode=True)
for k, v in message.items():
message_header += '{}: {}\r\n'.format(k, v)
message_header += '\r\n'
return message_header.encode('utf-8') + message_body
def make_mime_boundary(text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
width = len(repr(sys.maxsize - 1))
fmt = '%%0%dd' % width
token = random.randrange(sys.maxsize)
boundary = ('=' * 15) + (fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
def extract_first_part(message, boundary):
""" Function to extract the first part of a multipart message"""
first_message = message.split(boundary)[1].lstrip()
if first_message.endswith(b'\r\n'):
first_message = first_message[:-2]
else:
first_message = first_message[:-1]
return first_message
def split_pem(pem_bytes):
"""
Split a give PEM file with multiple certificates
:param pem_bytes: The pem data in bytes with multiple certs
:return: yields a list of certificates contained in the pem file
"""
started, pem_data = False, b''
for line in pem_bytes.splitlines(False):
if line == b'' and not started:
continue
if line[0:5] in (b'-----', b'---- '):
if not started:
started = True
else:
pem_data = pem_data + line + b'\r\n'
yield pem_data
started = False
pem_data = b''
if started:
pem_data = pem_data + line + b'\r\n'
def verify_certificate_chain(cert_str, trusted_certs, ignore_self_signed=True):
""" Verify a given certificate against a trust store"""
# Load the certificate
certificate = crypto.load_certificate(crypto.FILETYPE_ASN1, cert_str)
# Create a certificate store and add your trusted certs
try:
store = crypto.X509Store()
if ignore_self_signed:
store.add_cert(certificate)
# Assuming the certificates are in PEM format in a trusted_certs list
for _cert in trusted_certs:
store.add_cert(
crypto.load_certificate(crypto.FILETYPE_ASN1, _cert))
# Create a certificate context using the store and the certificate
store_ctx = crypto.X509StoreContext(store, certificate)
# Verify the certificate, returns None if certificate is not valid
store_ctx.verify_certificate()
return True
except crypto.X509StoreContextError as e:
raise AS2Exception('Partner Certificate Invalid: %s' % e.args[-1][-1])
|
abhishek-ram/pyas2-lib | pyas2lib/utils.py | split_pem | python | def split_pem(pem_bytes):
started, pem_data = False, b''
for line in pem_bytes.splitlines(False):
if line == b'' and not started:
continue
if line[0:5] in (b'-----', b'---- '):
if not started:
started = True
else:
pem_data = pem_data + line + b'\r\n'
yield pem_data
started = False
pem_data = b''
if started:
pem_data = pem_data + line + b'\r\n' | Split a give PEM file with multiple certificates
:param pem_bytes: The pem data in bytes with multiple certs
:return: yields a list of certificates contained in the pem file | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/utils.py#L122-L145 | null | from __future__ import absolute_import, unicode_literals
from .compat import BytesIO, BytesGenerator, is_py2, _ver
from .exceptions import AS2Exception
from OpenSSL import crypto
from asn1crypto import pem
import email
import re
import sys
import random
def unquote_as2name(quoted_name):
"""
Function converts as2 name from quoted to unquoted format
:param quoted_name: the as2 name in quoted format
:return: the as2 name in unquoted format
"""
return email.utils.unquote(quoted_name)
def quote_as2name(unquoted_name):
"""
Function converts as2 name from unquoted to quoted format
:param unquoted_name: the as2 name in unquoted format
:return: the as2 name in unquoted format
"""
if re.search(r'[\\" ]', unquoted_name, re.M):
return '"' + email.utils.quote(unquoted_name) + '"'
else:
return unquoted_name
def mime_to_bytes(msg, header_len):
"""
Function to convert and email Message to flat string format
:param msg: email.Message to be converted to string
:param header_len: the msx length of the header per line
:return: the byte string representation of the email message
"""
fp = BytesIO()
g = BytesGenerator(fp, maxheaderlen=header_len)
g.flatten(msg)
return fp.getvalue()
def canonicalize(message):
"""
Function to convert an email Message to standard format string
:param message: email.Message to be converted to standard string
:return: the standard representation of the email message in bytes
"""
if message.is_multipart() \
or message.get('Content-Transfer-Encoding') != 'binary':
return mime_to_bytes(message, 0).replace(
b'\r\n', b'\n').replace(b'\r', b'\n').replace(b'\n', b'\r\n')
else:
message_header = ''
message_body = message.get_payload(decode=True)
for k, v in message.items():
message_header += '{}: {}\r\n'.format(k, v)
message_header += '\r\n'
return message_header.encode('utf-8') + message_body
def make_mime_boundary(text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
width = len(repr(sys.maxsize - 1))
fmt = '%%0%dd' % width
token = random.randrange(sys.maxsize)
boundary = ('=' * 15) + (fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
def extract_first_part(message, boundary):
""" Function to extract the first part of a multipart message"""
first_message = message.split(boundary)[1].lstrip()
if first_message.endswith(b'\r\n'):
first_message = first_message[:-2]
else:
first_message = first_message[:-1]
return first_message
def pem_to_der(cert, return_multiple=True):
""" Converts a given certificate or list to PEM format"""
# initialize the certificate array
cert_list = []
# If certificate is in DER then un-armour it
if pem.detect(cert):
for _, _, der_bytes in pem.unarmor(cert, multiple=True):
cert_list.append(der_bytes)
else:
cert_list.append(cert)
# return multiple if return_multiple is set else first element
if return_multiple:
return cert_list
else:
return cert_list.pop()
def verify_certificate_chain(cert_str, trusted_certs, ignore_self_signed=True):
""" Verify a given certificate against a trust store"""
# Load the certificate
certificate = crypto.load_certificate(crypto.FILETYPE_ASN1, cert_str)
# Create a certificate store and add your trusted certs
try:
store = crypto.X509Store()
if ignore_self_signed:
store.add_cert(certificate)
# Assuming the certificates are in PEM format in a trusted_certs list
for _cert in trusted_certs:
store.add_cert(
crypto.load_certificate(crypto.FILETYPE_ASN1, _cert))
# Create a certificate context using the store and the certificate
store_ctx = crypto.X509StoreContext(store, certificate)
# Verify the certificate, returns None if certificate is not valid
store_ctx.verify_certificate()
return True
except crypto.X509StoreContextError as e:
raise AS2Exception('Partner Certificate Invalid: %s' % e.args[-1][-1])
|
abhishek-ram/pyas2-lib | pyas2lib/utils.py | verify_certificate_chain | python | def verify_certificate_chain(cert_str, trusted_certs, ignore_self_signed=True):
# Load the certificate
certificate = crypto.load_certificate(crypto.FILETYPE_ASN1, cert_str)
# Create a certificate store and add your trusted certs
try:
store = crypto.X509Store()
if ignore_self_signed:
store.add_cert(certificate)
# Assuming the certificates are in PEM format in a trusted_certs list
for _cert in trusted_certs:
store.add_cert(
crypto.load_certificate(crypto.FILETYPE_ASN1, _cert))
# Create a certificate context using the store and the certificate
store_ctx = crypto.X509StoreContext(store, certificate)
# Verify the certificate, returns None if certificate is not valid
store_ctx.verify_certificate()
return True
except crypto.X509StoreContextError as e:
raise AS2Exception('Partner Certificate Invalid: %s' % e.args[-1][-1]) | Verify a given certificate against a trust store | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/utils.py#L148-L175 | null | from __future__ import absolute_import, unicode_literals
from .compat import BytesIO, BytesGenerator, is_py2, _ver
from .exceptions import AS2Exception
from OpenSSL import crypto
from asn1crypto import pem
import email
import re
import sys
import random
def unquote_as2name(quoted_name):
"""
Function converts as2 name from quoted to unquoted format
:param quoted_name: the as2 name in quoted format
:return: the as2 name in unquoted format
"""
return email.utils.unquote(quoted_name)
def quote_as2name(unquoted_name):
"""
Function converts as2 name from unquoted to quoted format
:param unquoted_name: the as2 name in unquoted format
:return: the as2 name in unquoted format
"""
if re.search(r'[\\" ]', unquoted_name, re.M):
return '"' + email.utils.quote(unquoted_name) + '"'
else:
return unquoted_name
def mime_to_bytes(msg, header_len):
"""
Function to convert and email Message to flat string format
:param msg: email.Message to be converted to string
:param header_len: the msx length of the header per line
:return: the byte string representation of the email message
"""
fp = BytesIO()
g = BytesGenerator(fp, maxheaderlen=header_len)
g.flatten(msg)
return fp.getvalue()
def canonicalize(message):
"""
Function to convert an email Message to standard format string
:param message: email.Message to be converted to standard string
:return: the standard representation of the email message in bytes
"""
if message.is_multipart() \
or message.get('Content-Transfer-Encoding') != 'binary':
return mime_to_bytes(message, 0).replace(
b'\r\n', b'\n').replace(b'\r', b'\n').replace(b'\n', b'\r\n')
else:
message_header = ''
message_body = message.get_payload(decode=True)
for k, v in message.items():
message_header += '{}: {}\r\n'.format(k, v)
message_header += '\r\n'
return message_header.encode('utf-8') + message_body
def make_mime_boundary(text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
width = len(repr(sys.maxsize - 1))
fmt = '%%0%dd' % width
token = random.randrange(sys.maxsize)
boundary = ('=' * 15) + (fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
def extract_first_part(message, boundary):
""" Function to extract the first part of a multipart message"""
first_message = message.split(boundary)[1].lstrip()
if first_message.endswith(b'\r\n'):
first_message = first_message[:-2]
else:
first_message = first_message[:-1]
return first_message
def pem_to_der(cert, return_multiple=True):
""" Converts a given certificate or list to PEM format"""
# initialize the certificate array
cert_list = []
# If certificate is in DER then un-armour it
if pem.detect(cert):
for _, _, der_bytes in pem.unarmor(cert, multiple=True):
cert_list.append(der_bytes)
else:
cert_list.append(cert)
# return multiple if return_multiple is set else first element
if return_multiple:
return cert_list
else:
return cert_list.pop()
def split_pem(pem_bytes):
"""
Split a give PEM file with multiple certificates
:param pem_bytes: The pem data in bytes with multiple certs
:return: yields a list of certificates contained in the pem file
"""
started, pem_data = False, b''
for line in pem_bytes.splitlines(False):
if line == b'' and not started:
continue
if line[0:5] in (b'-----', b'---- '):
if not started:
started = True
else:
pem_data = pem_data + line + b'\r\n'
yield pem_data
started = False
pem_data = b''
if started:
pem_data = pem_data + line + b'\r\n'
|
abhishek-ram/pyas2-lib | pyas2lib/cms.py | compress_message | python | def compress_message(data_to_compress):
compressed_content = cms.ParsableOctetString(
zlib.compress(data_to_compress))
return cms.ContentInfo({
'content_type': cms.ContentType('compressed_data'),
'content': cms.CompressedData({
'version': cms.CMSVersion('v0'),
'compression_algorithm':
cms.CompressionAlgorithm({
'algorithm': cms.CompressionAlgorithmId('zlib')
}),
'encap_content_info': cms.EncapsulatedContentInfo({
'content_type': cms.ContentType('data'),
'content': compressed_content
})
})
}).dump() | Function compresses data and returns the generated ASN.1
:param data_to_compress: A byte string of the data to be compressed
:return: A CMS ASN.1 byte string of the compressed data. | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/cms.py#L26-L48 | null | from __future__ import absolute_import, unicode_literals
from asn1crypto import cms, core, algos
from oscrypto import asymmetric, symmetric, util
from datetime import datetime
from collections import OrderedDict
from .compat import byte_cls
from .exceptions import *
import hashlib
import zlib
DIGEST_ALGORITHMS = (
'md5',
'sha1',
'sha224',
'sha256',
'sha384',
'sha512'
)
ENCRYPTION_ALGORITHMS = (
'tripledes_192_cbc',
'rc2_128_cbc',
'rc4_128_cbc'
)
def compress_message(data_to_compress):
"""Function compresses data and returns the generated ASN.1
:param data_to_compress: A byte string of the data to be compressed
:return: A CMS ASN.1 byte string of the compressed data.
"""
compressed_content = cms.ParsableOctetString(
zlib.compress(data_to_compress))
return cms.ContentInfo({
'content_type': cms.ContentType('compressed_data'),
'content': cms.CompressedData({
'version': cms.CMSVersion('v0'),
'compression_algorithm':
cms.CompressionAlgorithm({
'algorithm': cms.CompressionAlgorithmId('zlib')
}),
'encap_content_info': cms.EncapsulatedContentInfo({
'content_type': cms.ContentType('data'),
'content': compressed_content
})
})
}).dump()
def decompress_message(compressed_data):
"""Function parses an ASN.1 compressed message and extracts/decompresses
the original message.
:param compressed_data: A CMS ASN.1 byte string containing the compressed
data.
:return: A byte string containing the decompressed original message.
"""
try:
cms_content = cms.ContentInfo.load(compressed_data)
if cms_content['content_type'].native == 'compressed_data':
return cms_content['content'].decompressed
else:
raise DecompressionError('Compressed data not found in ASN.1 ')
except Exception as e:
raise DecompressionError(
'Decompression failed with cause: {}'.format(e))
def encrypt_message(data_to_encrypt, enc_alg, encryption_cert):
"""Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte string of the encrypted data.
"""
enc_alg_list = enc_alg.split('_')
cipher, key_length, mode = enc_alg_list[0], enc_alg_list[1], enc_alg_list[2]
enc_alg_asn1, key, encrypted_content = None, None, None
# Generate the symmetric encryption key and encrypt the message
if cipher == 'tripledes':
key = util.rand_bytes(int(key_length)//8)
iv, encrypted_content = symmetric.tripledes_cbc_pkcs5_encrypt(
key, data_to_encrypt, None)
enc_alg_asn1 = algos.EncryptionAlgorithm({
'algorithm': algos.EncryptionAlgorithmId('tripledes_3key'),
'parameters': cms.OctetString(iv)
})
# Encrypt the key and build the ASN.1 message
encrypted_key = asymmetric.rsa_pkcs1v15_encrypt(encryption_cert, key)
return cms.ContentInfo({
'content_type': cms.ContentType('enveloped_data'),
'content': cms.EnvelopedData({
'version': cms.CMSVersion('v0'),
'recipient_infos': [
cms.KeyTransRecipientInfo({
'version': cms.CMSVersion('v0'),
'rid': cms.RecipientIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': encryption_cert.asn1[
'tbs_certificate']['issuer'],
'serial_number': encryption_cert.asn1[
'tbs_certificate']['serial_number']
})
}),
'key_encryption_algorithm': cms.KeyEncryptionAlgorithm({
'algorithm': cms.KeyEncryptionAlgorithmId('rsa')
}),
'encrypted_key': cms.OctetString(encrypted_key)
})
],
'encrypted_content_info': cms.EncryptedContentInfo({
'content_type': cms.ContentType('data'),
'content_encryption_algorithm': enc_alg_asn1,
'encrypted_content': encrypted_content
})
})
}).dump()
def decrypt_message(encrypted_data, decryption_key):
"""Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param encrypted_data: A CMS ASN.1 byte string containing the encrypted
data.
:param decryption_key: The key to be used for decrypting the data.
:return: A byte string containing the decrypted original message.
"""
cms_content = cms.ContentInfo.load(encrypted_data)
cipher, decrypted_content = None, None
if cms_content['content_type'].native == 'enveloped_data':
recipient_info = cms_content['content']['recipient_infos'][0].parse()
key_enc_alg = recipient_info[
'key_encryption_algorithm']['algorithm'].native
encrypted_key = recipient_info['encrypted_key'].native
if key_enc_alg == 'rsa':
try:
key = asymmetric.rsa_pkcs1v15_decrypt(
decryption_key[0], encrypted_key)
except Exception as e:
raise DecryptionError('Failed to decrypt the payload: '
'Could not extract decryption key.')
alg = cms_content['content']['encrypted_content_info'][
'content_encryption_algorithm']
encapsulated_data = cms_content['content'][
'encrypted_content_info']['encrypted_content'].native
try:
if alg.encryption_cipher == 'tripledes':
cipher = 'tripledes_192_cbc'
decrypted_content = symmetric.tripledes_cbc_pkcs5_decrypt(
key, encapsulated_data, alg.encryption_iv)
else:
raise AS2Exception('Unsupported Encryption Algorithm')
except Exception as e:
raise DecryptionError(
'Failed to decrypt the payload: {}'.format(e))
return cipher, decrypted_content
def sign_message(data_to_sign, digest_alg, sign_key,
use_signed_attributes=True):
"""Function signs the data and returns the generated ASN.1
:param data_to_sign: A byte string of the data to be signed.
:param digest_alg:
The digest algorithm to be used for generating the signature.
:param sign_key: The key to be used for generating the signature.
:param use_signed_attributes: Optional attribute to indicate weather the
CMS signature attributes should be included in the signature or not.
:return: A CMS ASN.1 byte string of the signed data.
"""
if use_signed_attributes:
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_sign)
message_digest = digest_func.digest()
class SmimeCapability(core.Sequence):
_fields = [
('0', core.Any, {'optional': True}),
('1', core.Any, {'optional': True}),
('2', core.Any, {'optional': True}),
('3', core.Any, {'optional': True}),
('4', core.Any, {'optional': True})
]
class SmimeCapabilities(core.Sequence):
_fields = [
('0', SmimeCapability),
('1', SmimeCapability, {'optional': True}),
('2', SmimeCapability, {'optional': True}),
('3', SmimeCapability, {'optional': True}),
('4', SmimeCapability, {'optional': True}),
('5', SmimeCapability, {'optional': True}),
]
smime_cap = OrderedDict([
('0', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.7'))])),
('1', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.2')),
('1', core.Integer(128))])),
('2', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.4')),
('1', core.Integer(128))])),
])
signed_attributes = cms.CMSAttributes([
cms.CMSAttribute({
'type': cms.CMSAttributeType('content_type'),
'values': cms.SetOfContentType([
cms.ContentType('data')
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('signing_time'),
'values': cms.SetOfTime([
cms.Time({
'utc_time': core.UTCTime(datetime.now())
})
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('message_digest'),
'values': cms.SetOfOctetString([
core.OctetString(message_digest)
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('1.2.840.113549.1.9.15'),
'values': cms.SetOfAny([
core.Any(SmimeCapabilities(smime_cap))
])
}),
])
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], signed_attributes.dump(), digest_alg)
else:
signed_attributes = None
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], data_to_sign, digest_alg)
return cms.ContentInfo({
'content_type': cms.ContentType('signed_data'),
'content': cms.SignedData({
'version': cms.CMSVersion('v1'),
'digest_algorithms': cms.DigestAlgorithms([
algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
})
]),
'encap_content_info': cms.ContentInfo({
'content_type': cms.ContentType('data')
}),
'certificates': cms.CertificateSet([
cms.CertificateChoices({
'certificate': sign_key[1].asn1
})
]),
'signer_infos': cms.SignerInfos([
cms.SignerInfo({
'version': cms.CMSVersion('v1'),
'sid': cms.SignerIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': sign_key[1].asn1[
'tbs_certificate']['issuer'],
'serial_number': sign_key[1].asn1[
'tbs_certificate']['serial_number']
})
}),
'digest_algorithm': algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
}),
'signed_attrs': signed_attributes,
'signature_algorithm': algos.SignedDigestAlgorithm({
'algorithm':
algos.SignedDigestAlgorithmId('rsassa_pkcs1v15')
}),
'signature': core.OctetString(signature)
})
])
})
}).dump()
def verify_message(data_to_verify, signature, verify_cert):
"""Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param data_to_verify:
A byte string of the data to be verified against the signature.
:param signature: A CMS ASN.1 byte string containing the signature.
:param verify_cert: The certificate to be used for verifying the signature.
:return: The digest algorithm that was used in the signature.
"""
cms_content = cms.ContentInfo.load(signature)
digest_alg = None
if cms_content['content_type'].native == 'signed_data':
for signer in cms_content['content']['signer_infos']:
signed_attributes = signer['signed_attrs'].copy()
digest_alg = signer['digest_algorithm']['algorithm'].native
if digest_alg not in DIGEST_ALGORITHMS:
raise Exception('Unsupported Digest Algorithm')
sig_alg = signer['signature_algorithm']['algorithm'].native
sig = signer['signature'].native
signed_data = data_to_verify
if signed_attributes:
attr_dict = {}
for attr in signed_attributes.native:
attr_dict[attr['type']] = attr['values']
message_digest = byte_cls()
for d in attr_dict['message_digest']:
message_digest += d
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_verify)
calc_message_digest = digest_func.digest()
if message_digest != calc_message_digest:
raise IntegrityError('Failed to verify message signature: '
'Message Digest does not match.')
signed_data = signed_attributes.untag().dump()
try:
if sig_alg == 'rsassa_pkcs1v15':
asymmetric.rsa_pkcs1v15_verify(
verify_cert, sig, signed_data, digest_alg)
elif sig_alg == 'rsassa_pss':
asymmetric.rsa_pss_verify(
verify_cert, sig, signed_data, digest_alg)
else:
raise AS2Exception('Unsupported Signature Algorithm')
except Exception as e:
raise IntegrityError(
'Failed to verify message signature: {}'.format(e))
return digest_alg
|
abhishek-ram/pyas2-lib | pyas2lib/cms.py | decompress_message | python | def decompress_message(compressed_data):
try:
cms_content = cms.ContentInfo.load(compressed_data)
if cms_content['content_type'].native == 'compressed_data':
return cms_content['content'].decompressed
else:
raise DecompressionError('Compressed data not found in ASN.1 ')
except Exception as e:
raise DecompressionError(
'Decompression failed with cause: {}'.format(e)) | Function parses an ASN.1 compressed message and extracts/decompresses
the original message.
:param compressed_data: A CMS ASN.1 byte string containing the compressed
data.
:return: A byte string containing the decompressed original message. | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/cms.py#L51-L69 | null | from __future__ import absolute_import, unicode_literals
from asn1crypto import cms, core, algos
from oscrypto import asymmetric, symmetric, util
from datetime import datetime
from collections import OrderedDict
from .compat import byte_cls
from .exceptions import *
import hashlib
import zlib
DIGEST_ALGORITHMS = (
'md5',
'sha1',
'sha224',
'sha256',
'sha384',
'sha512'
)
ENCRYPTION_ALGORITHMS = (
'tripledes_192_cbc',
'rc2_128_cbc',
'rc4_128_cbc'
)
def compress_message(data_to_compress):
"""Function compresses data and returns the generated ASN.1
:param data_to_compress: A byte string of the data to be compressed
:return: A CMS ASN.1 byte string of the compressed data.
"""
compressed_content = cms.ParsableOctetString(
zlib.compress(data_to_compress))
return cms.ContentInfo({
'content_type': cms.ContentType('compressed_data'),
'content': cms.CompressedData({
'version': cms.CMSVersion('v0'),
'compression_algorithm':
cms.CompressionAlgorithm({
'algorithm': cms.CompressionAlgorithmId('zlib')
}),
'encap_content_info': cms.EncapsulatedContentInfo({
'content_type': cms.ContentType('data'),
'content': compressed_content
})
})
}).dump()
def decompress_message(compressed_data):
"""Function parses an ASN.1 compressed message and extracts/decompresses
the original message.
:param compressed_data: A CMS ASN.1 byte string containing the compressed
data.
:return: A byte string containing the decompressed original message.
"""
try:
cms_content = cms.ContentInfo.load(compressed_data)
if cms_content['content_type'].native == 'compressed_data':
return cms_content['content'].decompressed
else:
raise DecompressionError('Compressed data not found in ASN.1 ')
except Exception as e:
raise DecompressionError(
'Decompression failed with cause: {}'.format(e))
def encrypt_message(data_to_encrypt, enc_alg, encryption_cert):
"""Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte string of the encrypted data.
"""
enc_alg_list = enc_alg.split('_')
cipher, key_length, mode = enc_alg_list[0], enc_alg_list[1], enc_alg_list[2]
enc_alg_asn1, key, encrypted_content = None, None, None
# Generate the symmetric encryption key and encrypt the message
if cipher == 'tripledes':
key = util.rand_bytes(int(key_length)//8)
iv, encrypted_content = symmetric.tripledes_cbc_pkcs5_encrypt(
key, data_to_encrypt, None)
enc_alg_asn1 = algos.EncryptionAlgorithm({
'algorithm': algos.EncryptionAlgorithmId('tripledes_3key'),
'parameters': cms.OctetString(iv)
})
# Encrypt the key and build the ASN.1 message
encrypted_key = asymmetric.rsa_pkcs1v15_encrypt(encryption_cert, key)
return cms.ContentInfo({
'content_type': cms.ContentType('enveloped_data'),
'content': cms.EnvelopedData({
'version': cms.CMSVersion('v0'),
'recipient_infos': [
cms.KeyTransRecipientInfo({
'version': cms.CMSVersion('v0'),
'rid': cms.RecipientIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': encryption_cert.asn1[
'tbs_certificate']['issuer'],
'serial_number': encryption_cert.asn1[
'tbs_certificate']['serial_number']
})
}),
'key_encryption_algorithm': cms.KeyEncryptionAlgorithm({
'algorithm': cms.KeyEncryptionAlgorithmId('rsa')
}),
'encrypted_key': cms.OctetString(encrypted_key)
})
],
'encrypted_content_info': cms.EncryptedContentInfo({
'content_type': cms.ContentType('data'),
'content_encryption_algorithm': enc_alg_asn1,
'encrypted_content': encrypted_content
})
})
}).dump()
def decrypt_message(encrypted_data, decryption_key):
"""Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param encrypted_data: A CMS ASN.1 byte string containing the encrypted
data.
:param decryption_key: The key to be used for decrypting the data.
:return: A byte string containing the decrypted original message.
"""
cms_content = cms.ContentInfo.load(encrypted_data)
cipher, decrypted_content = None, None
if cms_content['content_type'].native == 'enveloped_data':
recipient_info = cms_content['content']['recipient_infos'][0].parse()
key_enc_alg = recipient_info[
'key_encryption_algorithm']['algorithm'].native
encrypted_key = recipient_info['encrypted_key'].native
if key_enc_alg == 'rsa':
try:
key = asymmetric.rsa_pkcs1v15_decrypt(
decryption_key[0], encrypted_key)
except Exception as e:
raise DecryptionError('Failed to decrypt the payload: '
'Could not extract decryption key.')
alg = cms_content['content']['encrypted_content_info'][
'content_encryption_algorithm']
encapsulated_data = cms_content['content'][
'encrypted_content_info']['encrypted_content'].native
try:
if alg.encryption_cipher == 'tripledes':
cipher = 'tripledes_192_cbc'
decrypted_content = symmetric.tripledes_cbc_pkcs5_decrypt(
key, encapsulated_data, alg.encryption_iv)
else:
raise AS2Exception('Unsupported Encryption Algorithm')
except Exception as e:
raise DecryptionError(
'Failed to decrypt the payload: {}'.format(e))
return cipher, decrypted_content
def sign_message(data_to_sign, digest_alg, sign_key,
use_signed_attributes=True):
"""Function signs the data and returns the generated ASN.1
:param data_to_sign: A byte string of the data to be signed.
:param digest_alg:
The digest algorithm to be used for generating the signature.
:param sign_key: The key to be used for generating the signature.
:param use_signed_attributes: Optional attribute to indicate weather the
CMS signature attributes should be included in the signature or not.
:return: A CMS ASN.1 byte string of the signed data.
"""
if use_signed_attributes:
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_sign)
message_digest = digest_func.digest()
class SmimeCapability(core.Sequence):
_fields = [
('0', core.Any, {'optional': True}),
('1', core.Any, {'optional': True}),
('2', core.Any, {'optional': True}),
('3', core.Any, {'optional': True}),
('4', core.Any, {'optional': True})
]
class SmimeCapabilities(core.Sequence):
_fields = [
('0', SmimeCapability),
('1', SmimeCapability, {'optional': True}),
('2', SmimeCapability, {'optional': True}),
('3', SmimeCapability, {'optional': True}),
('4', SmimeCapability, {'optional': True}),
('5', SmimeCapability, {'optional': True}),
]
smime_cap = OrderedDict([
('0', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.7'))])),
('1', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.2')),
('1', core.Integer(128))])),
('2', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.4')),
('1', core.Integer(128))])),
])
signed_attributes = cms.CMSAttributes([
cms.CMSAttribute({
'type': cms.CMSAttributeType('content_type'),
'values': cms.SetOfContentType([
cms.ContentType('data')
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('signing_time'),
'values': cms.SetOfTime([
cms.Time({
'utc_time': core.UTCTime(datetime.now())
})
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('message_digest'),
'values': cms.SetOfOctetString([
core.OctetString(message_digest)
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('1.2.840.113549.1.9.15'),
'values': cms.SetOfAny([
core.Any(SmimeCapabilities(smime_cap))
])
}),
])
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], signed_attributes.dump(), digest_alg)
else:
signed_attributes = None
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], data_to_sign, digest_alg)
return cms.ContentInfo({
'content_type': cms.ContentType('signed_data'),
'content': cms.SignedData({
'version': cms.CMSVersion('v1'),
'digest_algorithms': cms.DigestAlgorithms([
algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
})
]),
'encap_content_info': cms.ContentInfo({
'content_type': cms.ContentType('data')
}),
'certificates': cms.CertificateSet([
cms.CertificateChoices({
'certificate': sign_key[1].asn1
})
]),
'signer_infos': cms.SignerInfos([
cms.SignerInfo({
'version': cms.CMSVersion('v1'),
'sid': cms.SignerIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': sign_key[1].asn1[
'tbs_certificate']['issuer'],
'serial_number': sign_key[1].asn1[
'tbs_certificate']['serial_number']
})
}),
'digest_algorithm': algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
}),
'signed_attrs': signed_attributes,
'signature_algorithm': algos.SignedDigestAlgorithm({
'algorithm':
algos.SignedDigestAlgorithmId('rsassa_pkcs1v15')
}),
'signature': core.OctetString(signature)
})
])
})
}).dump()
def verify_message(data_to_verify, signature, verify_cert):
"""Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param data_to_verify:
A byte string of the data to be verified against the signature.
:param signature: A CMS ASN.1 byte string containing the signature.
:param verify_cert: The certificate to be used for verifying the signature.
:return: The digest algorithm that was used in the signature.
"""
cms_content = cms.ContentInfo.load(signature)
digest_alg = None
if cms_content['content_type'].native == 'signed_data':
for signer in cms_content['content']['signer_infos']:
signed_attributes = signer['signed_attrs'].copy()
digest_alg = signer['digest_algorithm']['algorithm'].native
if digest_alg not in DIGEST_ALGORITHMS:
raise Exception('Unsupported Digest Algorithm')
sig_alg = signer['signature_algorithm']['algorithm'].native
sig = signer['signature'].native
signed_data = data_to_verify
if signed_attributes:
attr_dict = {}
for attr in signed_attributes.native:
attr_dict[attr['type']] = attr['values']
message_digest = byte_cls()
for d in attr_dict['message_digest']:
message_digest += d
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_verify)
calc_message_digest = digest_func.digest()
if message_digest != calc_message_digest:
raise IntegrityError('Failed to verify message signature: '
'Message Digest does not match.')
signed_data = signed_attributes.untag().dump()
try:
if sig_alg == 'rsassa_pkcs1v15':
asymmetric.rsa_pkcs1v15_verify(
verify_cert, sig, signed_data, digest_alg)
elif sig_alg == 'rsassa_pss':
asymmetric.rsa_pss_verify(
verify_cert, sig, signed_data, digest_alg)
else:
raise AS2Exception('Unsupported Signature Algorithm')
except Exception as e:
raise IntegrityError(
'Failed to verify message signature: {}'.format(e))
return digest_alg
|
abhishek-ram/pyas2-lib | pyas2lib/cms.py | encrypt_message | python | def encrypt_message(data_to_encrypt, enc_alg, encryption_cert):
enc_alg_list = enc_alg.split('_')
cipher, key_length, mode = enc_alg_list[0], enc_alg_list[1], enc_alg_list[2]
enc_alg_asn1, key, encrypted_content = None, None, None
# Generate the symmetric encryption key and encrypt the message
if cipher == 'tripledes':
key = util.rand_bytes(int(key_length)//8)
iv, encrypted_content = symmetric.tripledes_cbc_pkcs5_encrypt(
key, data_to_encrypt, None)
enc_alg_asn1 = algos.EncryptionAlgorithm({
'algorithm': algos.EncryptionAlgorithmId('tripledes_3key'),
'parameters': cms.OctetString(iv)
})
# Encrypt the key and build the ASN.1 message
encrypted_key = asymmetric.rsa_pkcs1v15_encrypt(encryption_cert, key)
return cms.ContentInfo({
'content_type': cms.ContentType('enveloped_data'),
'content': cms.EnvelopedData({
'version': cms.CMSVersion('v0'),
'recipient_infos': [
cms.KeyTransRecipientInfo({
'version': cms.CMSVersion('v0'),
'rid': cms.RecipientIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': encryption_cert.asn1[
'tbs_certificate']['issuer'],
'serial_number': encryption_cert.asn1[
'tbs_certificate']['serial_number']
})
}),
'key_encryption_algorithm': cms.KeyEncryptionAlgorithm({
'algorithm': cms.KeyEncryptionAlgorithmId('rsa')
}),
'encrypted_key': cms.OctetString(encrypted_key)
})
],
'encrypted_content_info': cms.EncryptedContentInfo({
'content_type': cms.ContentType('data'),
'content_encryption_algorithm': enc_alg_asn1,
'encrypted_content': encrypted_content
})
})
}).dump() | Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte string of the encrypted data. | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/cms.py#L72-L128 | null | from __future__ import absolute_import, unicode_literals
from asn1crypto import cms, core, algos
from oscrypto import asymmetric, symmetric, util
from datetime import datetime
from collections import OrderedDict
from .compat import byte_cls
from .exceptions import *
import hashlib
import zlib
DIGEST_ALGORITHMS = (
'md5',
'sha1',
'sha224',
'sha256',
'sha384',
'sha512'
)
ENCRYPTION_ALGORITHMS = (
'tripledes_192_cbc',
'rc2_128_cbc',
'rc4_128_cbc'
)
def compress_message(data_to_compress):
"""Function compresses data and returns the generated ASN.1
:param data_to_compress: A byte string of the data to be compressed
:return: A CMS ASN.1 byte string of the compressed data.
"""
compressed_content = cms.ParsableOctetString(
zlib.compress(data_to_compress))
return cms.ContentInfo({
'content_type': cms.ContentType('compressed_data'),
'content': cms.CompressedData({
'version': cms.CMSVersion('v0'),
'compression_algorithm':
cms.CompressionAlgorithm({
'algorithm': cms.CompressionAlgorithmId('zlib')
}),
'encap_content_info': cms.EncapsulatedContentInfo({
'content_type': cms.ContentType('data'),
'content': compressed_content
})
})
}).dump()
def decompress_message(compressed_data):
"""Function parses an ASN.1 compressed message and extracts/decompresses
the original message.
:param compressed_data: A CMS ASN.1 byte string containing the compressed
data.
:return: A byte string containing the decompressed original message.
"""
try:
cms_content = cms.ContentInfo.load(compressed_data)
if cms_content['content_type'].native == 'compressed_data':
return cms_content['content'].decompressed
else:
raise DecompressionError('Compressed data not found in ASN.1 ')
except Exception as e:
raise DecompressionError(
'Decompression failed with cause: {}'.format(e))
def encrypt_message(data_to_encrypt, enc_alg, encryption_cert):
"""Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte string of the encrypted data.
"""
enc_alg_list = enc_alg.split('_')
cipher, key_length, mode = enc_alg_list[0], enc_alg_list[1], enc_alg_list[2]
enc_alg_asn1, key, encrypted_content = None, None, None
# Generate the symmetric encryption key and encrypt the message
if cipher == 'tripledes':
key = util.rand_bytes(int(key_length)//8)
iv, encrypted_content = symmetric.tripledes_cbc_pkcs5_encrypt(
key, data_to_encrypt, None)
enc_alg_asn1 = algos.EncryptionAlgorithm({
'algorithm': algos.EncryptionAlgorithmId('tripledes_3key'),
'parameters': cms.OctetString(iv)
})
# Encrypt the key and build the ASN.1 message
encrypted_key = asymmetric.rsa_pkcs1v15_encrypt(encryption_cert, key)
return cms.ContentInfo({
'content_type': cms.ContentType('enveloped_data'),
'content': cms.EnvelopedData({
'version': cms.CMSVersion('v0'),
'recipient_infos': [
cms.KeyTransRecipientInfo({
'version': cms.CMSVersion('v0'),
'rid': cms.RecipientIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': encryption_cert.asn1[
'tbs_certificate']['issuer'],
'serial_number': encryption_cert.asn1[
'tbs_certificate']['serial_number']
})
}),
'key_encryption_algorithm': cms.KeyEncryptionAlgorithm({
'algorithm': cms.KeyEncryptionAlgorithmId('rsa')
}),
'encrypted_key': cms.OctetString(encrypted_key)
})
],
'encrypted_content_info': cms.EncryptedContentInfo({
'content_type': cms.ContentType('data'),
'content_encryption_algorithm': enc_alg_asn1,
'encrypted_content': encrypted_content
})
})
}).dump()
def decrypt_message(encrypted_data, decryption_key):
"""Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param encrypted_data: A CMS ASN.1 byte string containing the encrypted
data.
:param decryption_key: The key to be used for decrypting the data.
:return: A byte string containing the decrypted original message.
"""
cms_content = cms.ContentInfo.load(encrypted_data)
cipher, decrypted_content = None, None
if cms_content['content_type'].native == 'enveloped_data':
recipient_info = cms_content['content']['recipient_infos'][0].parse()
key_enc_alg = recipient_info[
'key_encryption_algorithm']['algorithm'].native
encrypted_key = recipient_info['encrypted_key'].native
if key_enc_alg == 'rsa':
try:
key = asymmetric.rsa_pkcs1v15_decrypt(
decryption_key[0], encrypted_key)
except Exception as e:
raise DecryptionError('Failed to decrypt the payload: '
'Could not extract decryption key.')
alg = cms_content['content']['encrypted_content_info'][
'content_encryption_algorithm']
encapsulated_data = cms_content['content'][
'encrypted_content_info']['encrypted_content'].native
try:
if alg.encryption_cipher == 'tripledes':
cipher = 'tripledes_192_cbc'
decrypted_content = symmetric.tripledes_cbc_pkcs5_decrypt(
key, encapsulated_data, alg.encryption_iv)
else:
raise AS2Exception('Unsupported Encryption Algorithm')
except Exception as e:
raise DecryptionError(
'Failed to decrypt the payload: {}'.format(e))
return cipher, decrypted_content
def sign_message(data_to_sign, digest_alg, sign_key,
use_signed_attributes=True):
"""Function signs the data and returns the generated ASN.1
:param data_to_sign: A byte string of the data to be signed.
:param digest_alg:
The digest algorithm to be used for generating the signature.
:param sign_key: The key to be used for generating the signature.
:param use_signed_attributes: Optional attribute to indicate weather the
CMS signature attributes should be included in the signature or not.
:return: A CMS ASN.1 byte string of the signed data.
"""
if use_signed_attributes:
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_sign)
message_digest = digest_func.digest()
class SmimeCapability(core.Sequence):
_fields = [
('0', core.Any, {'optional': True}),
('1', core.Any, {'optional': True}),
('2', core.Any, {'optional': True}),
('3', core.Any, {'optional': True}),
('4', core.Any, {'optional': True})
]
class SmimeCapabilities(core.Sequence):
_fields = [
('0', SmimeCapability),
('1', SmimeCapability, {'optional': True}),
('2', SmimeCapability, {'optional': True}),
('3', SmimeCapability, {'optional': True}),
('4', SmimeCapability, {'optional': True}),
('5', SmimeCapability, {'optional': True}),
]
smime_cap = OrderedDict([
('0', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.7'))])),
('1', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.2')),
('1', core.Integer(128))])),
('2', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.4')),
('1', core.Integer(128))])),
])
signed_attributes = cms.CMSAttributes([
cms.CMSAttribute({
'type': cms.CMSAttributeType('content_type'),
'values': cms.SetOfContentType([
cms.ContentType('data')
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('signing_time'),
'values': cms.SetOfTime([
cms.Time({
'utc_time': core.UTCTime(datetime.now())
})
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('message_digest'),
'values': cms.SetOfOctetString([
core.OctetString(message_digest)
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('1.2.840.113549.1.9.15'),
'values': cms.SetOfAny([
core.Any(SmimeCapabilities(smime_cap))
])
}),
])
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], signed_attributes.dump(), digest_alg)
else:
signed_attributes = None
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], data_to_sign, digest_alg)
return cms.ContentInfo({
'content_type': cms.ContentType('signed_data'),
'content': cms.SignedData({
'version': cms.CMSVersion('v1'),
'digest_algorithms': cms.DigestAlgorithms([
algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
})
]),
'encap_content_info': cms.ContentInfo({
'content_type': cms.ContentType('data')
}),
'certificates': cms.CertificateSet([
cms.CertificateChoices({
'certificate': sign_key[1].asn1
})
]),
'signer_infos': cms.SignerInfos([
cms.SignerInfo({
'version': cms.CMSVersion('v1'),
'sid': cms.SignerIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': sign_key[1].asn1[
'tbs_certificate']['issuer'],
'serial_number': sign_key[1].asn1[
'tbs_certificate']['serial_number']
})
}),
'digest_algorithm': algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
}),
'signed_attrs': signed_attributes,
'signature_algorithm': algos.SignedDigestAlgorithm({
'algorithm':
algos.SignedDigestAlgorithmId('rsassa_pkcs1v15')
}),
'signature': core.OctetString(signature)
})
])
})
}).dump()
def verify_message(data_to_verify, signature, verify_cert):
"""Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param data_to_verify:
A byte string of the data to be verified against the signature.
:param signature: A CMS ASN.1 byte string containing the signature.
:param verify_cert: The certificate to be used for verifying the signature.
:return: The digest algorithm that was used in the signature.
"""
cms_content = cms.ContentInfo.load(signature)
digest_alg = None
if cms_content['content_type'].native == 'signed_data':
for signer in cms_content['content']['signer_infos']:
signed_attributes = signer['signed_attrs'].copy()
digest_alg = signer['digest_algorithm']['algorithm'].native
if digest_alg not in DIGEST_ALGORITHMS:
raise Exception('Unsupported Digest Algorithm')
sig_alg = signer['signature_algorithm']['algorithm'].native
sig = signer['signature'].native
signed_data = data_to_verify
if signed_attributes:
attr_dict = {}
for attr in signed_attributes.native:
attr_dict[attr['type']] = attr['values']
message_digest = byte_cls()
for d in attr_dict['message_digest']:
message_digest += d
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_verify)
calc_message_digest = digest_func.digest()
if message_digest != calc_message_digest:
raise IntegrityError('Failed to verify message signature: '
'Message Digest does not match.')
signed_data = signed_attributes.untag().dump()
try:
if sig_alg == 'rsassa_pkcs1v15':
asymmetric.rsa_pkcs1v15_verify(
verify_cert, sig, signed_data, digest_alg)
elif sig_alg == 'rsassa_pss':
asymmetric.rsa_pss_verify(
verify_cert, sig, signed_data, digest_alg)
else:
raise AS2Exception('Unsupported Signature Algorithm')
except Exception as e:
raise IntegrityError(
'Failed to verify message signature: {}'.format(e))
return digest_alg
|
abhishek-ram/pyas2-lib | pyas2lib/cms.py | decrypt_message | python | def decrypt_message(encrypted_data, decryption_key):
cms_content = cms.ContentInfo.load(encrypted_data)
cipher, decrypted_content = None, None
if cms_content['content_type'].native == 'enveloped_data':
recipient_info = cms_content['content']['recipient_infos'][0].parse()
key_enc_alg = recipient_info[
'key_encryption_algorithm']['algorithm'].native
encrypted_key = recipient_info['encrypted_key'].native
if key_enc_alg == 'rsa':
try:
key = asymmetric.rsa_pkcs1v15_decrypt(
decryption_key[0], encrypted_key)
except Exception as e:
raise DecryptionError('Failed to decrypt the payload: '
'Could not extract decryption key.')
alg = cms_content['content']['encrypted_content_info'][
'content_encryption_algorithm']
encapsulated_data = cms_content['content'][
'encrypted_content_info']['encrypted_content'].native
try:
if alg.encryption_cipher == 'tripledes':
cipher = 'tripledes_192_cbc'
decrypted_content = symmetric.tripledes_cbc_pkcs5_decrypt(
key, encapsulated_data, alg.encryption_iv)
else:
raise AS2Exception('Unsupported Encryption Algorithm')
except Exception as e:
raise DecryptionError(
'Failed to decrypt the payload: {}'.format(e))
return cipher, decrypted_content | Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param encrypted_data: A CMS ASN.1 byte string containing the encrypted
data.
:param decryption_key: The key to be used for decrypting the data.
:return: A byte string containing the decrypted original message. | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/cms.py#L131-L176 | null | from __future__ import absolute_import, unicode_literals
from asn1crypto import cms, core, algos
from oscrypto import asymmetric, symmetric, util
from datetime import datetime
from collections import OrderedDict
from .compat import byte_cls
from .exceptions import *
import hashlib
import zlib
DIGEST_ALGORITHMS = (
'md5',
'sha1',
'sha224',
'sha256',
'sha384',
'sha512'
)
ENCRYPTION_ALGORITHMS = (
'tripledes_192_cbc',
'rc2_128_cbc',
'rc4_128_cbc'
)
def compress_message(data_to_compress):
"""Function compresses data and returns the generated ASN.1
:param data_to_compress: A byte string of the data to be compressed
:return: A CMS ASN.1 byte string of the compressed data.
"""
compressed_content = cms.ParsableOctetString(
zlib.compress(data_to_compress))
return cms.ContentInfo({
'content_type': cms.ContentType('compressed_data'),
'content': cms.CompressedData({
'version': cms.CMSVersion('v0'),
'compression_algorithm':
cms.CompressionAlgorithm({
'algorithm': cms.CompressionAlgorithmId('zlib')
}),
'encap_content_info': cms.EncapsulatedContentInfo({
'content_type': cms.ContentType('data'),
'content': compressed_content
})
})
}).dump()
def decompress_message(compressed_data):
"""Function parses an ASN.1 compressed message and extracts/decompresses
the original message.
:param compressed_data: A CMS ASN.1 byte string containing the compressed
data.
:return: A byte string containing the decompressed original message.
"""
try:
cms_content = cms.ContentInfo.load(compressed_data)
if cms_content['content_type'].native == 'compressed_data':
return cms_content['content'].decompressed
else:
raise DecompressionError('Compressed data not found in ASN.1 ')
except Exception as e:
raise DecompressionError(
'Decompression failed with cause: {}'.format(e))
def encrypt_message(data_to_encrypt, enc_alg, encryption_cert):
"""Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte string of the encrypted data.
"""
enc_alg_list = enc_alg.split('_')
cipher, key_length, mode = enc_alg_list[0], enc_alg_list[1], enc_alg_list[2]
enc_alg_asn1, key, encrypted_content = None, None, None
# Generate the symmetric encryption key and encrypt the message
if cipher == 'tripledes':
key = util.rand_bytes(int(key_length)//8)
iv, encrypted_content = symmetric.tripledes_cbc_pkcs5_encrypt(
key, data_to_encrypt, None)
enc_alg_asn1 = algos.EncryptionAlgorithm({
'algorithm': algos.EncryptionAlgorithmId('tripledes_3key'),
'parameters': cms.OctetString(iv)
})
# Encrypt the key and build the ASN.1 message
encrypted_key = asymmetric.rsa_pkcs1v15_encrypt(encryption_cert, key)
return cms.ContentInfo({
'content_type': cms.ContentType('enveloped_data'),
'content': cms.EnvelopedData({
'version': cms.CMSVersion('v0'),
'recipient_infos': [
cms.KeyTransRecipientInfo({
'version': cms.CMSVersion('v0'),
'rid': cms.RecipientIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': encryption_cert.asn1[
'tbs_certificate']['issuer'],
'serial_number': encryption_cert.asn1[
'tbs_certificate']['serial_number']
})
}),
'key_encryption_algorithm': cms.KeyEncryptionAlgorithm({
'algorithm': cms.KeyEncryptionAlgorithmId('rsa')
}),
'encrypted_key': cms.OctetString(encrypted_key)
})
],
'encrypted_content_info': cms.EncryptedContentInfo({
'content_type': cms.ContentType('data'),
'content_encryption_algorithm': enc_alg_asn1,
'encrypted_content': encrypted_content
})
})
}).dump()
def decrypt_message(encrypted_data, decryption_key):
"""Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param encrypted_data: A CMS ASN.1 byte string containing the encrypted
data.
:param decryption_key: The key to be used for decrypting the data.
:return: A byte string containing the decrypted original message.
"""
cms_content = cms.ContentInfo.load(encrypted_data)
cipher, decrypted_content = None, None
if cms_content['content_type'].native == 'enveloped_data':
recipient_info = cms_content['content']['recipient_infos'][0].parse()
key_enc_alg = recipient_info[
'key_encryption_algorithm']['algorithm'].native
encrypted_key = recipient_info['encrypted_key'].native
if key_enc_alg == 'rsa':
try:
key = asymmetric.rsa_pkcs1v15_decrypt(
decryption_key[0], encrypted_key)
except Exception as e:
raise DecryptionError('Failed to decrypt the payload: '
'Could not extract decryption key.')
alg = cms_content['content']['encrypted_content_info'][
'content_encryption_algorithm']
encapsulated_data = cms_content['content'][
'encrypted_content_info']['encrypted_content'].native
try:
if alg.encryption_cipher == 'tripledes':
cipher = 'tripledes_192_cbc'
decrypted_content = symmetric.tripledes_cbc_pkcs5_decrypt(
key, encapsulated_data, alg.encryption_iv)
else:
raise AS2Exception('Unsupported Encryption Algorithm')
except Exception as e:
raise DecryptionError(
'Failed to decrypt the payload: {}'.format(e))
return cipher, decrypted_content
def sign_message(data_to_sign, digest_alg, sign_key,
use_signed_attributes=True):
"""Function signs the data and returns the generated ASN.1
:param data_to_sign: A byte string of the data to be signed.
:param digest_alg:
The digest algorithm to be used for generating the signature.
:param sign_key: The key to be used for generating the signature.
:param use_signed_attributes: Optional attribute to indicate weather the
CMS signature attributes should be included in the signature or not.
:return: A CMS ASN.1 byte string of the signed data.
"""
if use_signed_attributes:
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_sign)
message_digest = digest_func.digest()
class SmimeCapability(core.Sequence):
_fields = [
('0', core.Any, {'optional': True}),
('1', core.Any, {'optional': True}),
('2', core.Any, {'optional': True}),
('3', core.Any, {'optional': True}),
('4', core.Any, {'optional': True})
]
class SmimeCapabilities(core.Sequence):
_fields = [
('0', SmimeCapability),
('1', SmimeCapability, {'optional': True}),
('2', SmimeCapability, {'optional': True}),
('3', SmimeCapability, {'optional': True}),
('4', SmimeCapability, {'optional': True}),
('5', SmimeCapability, {'optional': True}),
]
smime_cap = OrderedDict([
('0', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.7'))])),
('1', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.2')),
('1', core.Integer(128))])),
('2', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.4')),
('1', core.Integer(128))])),
])
signed_attributes = cms.CMSAttributes([
cms.CMSAttribute({
'type': cms.CMSAttributeType('content_type'),
'values': cms.SetOfContentType([
cms.ContentType('data')
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('signing_time'),
'values': cms.SetOfTime([
cms.Time({
'utc_time': core.UTCTime(datetime.now())
})
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('message_digest'),
'values': cms.SetOfOctetString([
core.OctetString(message_digest)
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('1.2.840.113549.1.9.15'),
'values': cms.SetOfAny([
core.Any(SmimeCapabilities(smime_cap))
])
}),
])
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], signed_attributes.dump(), digest_alg)
else:
signed_attributes = None
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], data_to_sign, digest_alg)
return cms.ContentInfo({
'content_type': cms.ContentType('signed_data'),
'content': cms.SignedData({
'version': cms.CMSVersion('v1'),
'digest_algorithms': cms.DigestAlgorithms([
algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
})
]),
'encap_content_info': cms.ContentInfo({
'content_type': cms.ContentType('data')
}),
'certificates': cms.CertificateSet([
cms.CertificateChoices({
'certificate': sign_key[1].asn1
})
]),
'signer_infos': cms.SignerInfos([
cms.SignerInfo({
'version': cms.CMSVersion('v1'),
'sid': cms.SignerIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': sign_key[1].asn1[
'tbs_certificate']['issuer'],
'serial_number': sign_key[1].asn1[
'tbs_certificate']['serial_number']
})
}),
'digest_algorithm': algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
}),
'signed_attrs': signed_attributes,
'signature_algorithm': algos.SignedDigestAlgorithm({
'algorithm':
algos.SignedDigestAlgorithmId('rsassa_pkcs1v15')
}),
'signature': core.OctetString(signature)
})
])
})
}).dump()
def verify_message(data_to_verify, signature, verify_cert):
"""Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param data_to_verify:
A byte string of the data to be verified against the signature.
:param signature: A CMS ASN.1 byte string containing the signature.
:param verify_cert: The certificate to be used for verifying the signature.
:return: The digest algorithm that was used in the signature.
"""
cms_content = cms.ContentInfo.load(signature)
digest_alg = None
if cms_content['content_type'].native == 'signed_data':
for signer in cms_content['content']['signer_infos']:
signed_attributes = signer['signed_attrs'].copy()
digest_alg = signer['digest_algorithm']['algorithm'].native
if digest_alg not in DIGEST_ALGORITHMS:
raise Exception('Unsupported Digest Algorithm')
sig_alg = signer['signature_algorithm']['algorithm'].native
sig = signer['signature'].native
signed_data = data_to_verify
if signed_attributes:
attr_dict = {}
for attr in signed_attributes.native:
attr_dict[attr['type']] = attr['values']
message_digest = byte_cls()
for d in attr_dict['message_digest']:
message_digest += d
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_verify)
calc_message_digest = digest_func.digest()
if message_digest != calc_message_digest:
raise IntegrityError('Failed to verify message signature: '
'Message Digest does not match.')
signed_data = signed_attributes.untag().dump()
try:
if sig_alg == 'rsassa_pkcs1v15':
asymmetric.rsa_pkcs1v15_verify(
verify_cert, sig, signed_data, digest_alg)
elif sig_alg == 'rsassa_pss':
asymmetric.rsa_pss_verify(
verify_cert, sig, signed_data, digest_alg)
else:
raise AS2Exception('Unsupported Signature Algorithm')
except Exception as e:
raise IntegrityError(
'Failed to verify message signature: {}'.format(e))
return digest_alg
|
abhishek-ram/pyas2-lib | pyas2lib/cms.py | sign_message | python | def sign_message(data_to_sign, digest_alg, sign_key,
use_signed_attributes=True):
if use_signed_attributes:
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_sign)
message_digest = digest_func.digest()
class SmimeCapability(core.Sequence):
_fields = [
('0', core.Any, {'optional': True}),
('1', core.Any, {'optional': True}),
('2', core.Any, {'optional': True}),
('3', core.Any, {'optional': True}),
('4', core.Any, {'optional': True})
]
class SmimeCapabilities(core.Sequence):
_fields = [
('0', SmimeCapability),
('1', SmimeCapability, {'optional': True}),
('2', SmimeCapability, {'optional': True}),
('3', SmimeCapability, {'optional': True}),
('4', SmimeCapability, {'optional': True}),
('5', SmimeCapability, {'optional': True}),
]
smime_cap = OrderedDict([
('0', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.7'))])),
('1', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.2')),
('1', core.Integer(128))])),
('2', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.4')),
('1', core.Integer(128))])),
])
signed_attributes = cms.CMSAttributes([
cms.CMSAttribute({
'type': cms.CMSAttributeType('content_type'),
'values': cms.SetOfContentType([
cms.ContentType('data')
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('signing_time'),
'values': cms.SetOfTime([
cms.Time({
'utc_time': core.UTCTime(datetime.now())
})
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('message_digest'),
'values': cms.SetOfOctetString([
core.OctetString(message_digest)
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('1.2.840.113549.1.9.15'),
'values': cms.SetOfAny([
core.Any(SmimeCapabilities(smime_cap))
])
}),
])
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], signed_attributes.dump(), digest_alg)
else:
signed_attributes = None
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], data_to_sign, digest_alg)
return cms.ContentInfo({
'content_type': cms.ContentType('signed_data'),
'content': cms.SignedData({
'version': cms.CMSVersion('v1'),
'digest_algorithms': cms.DigestAlgorithms([
algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
})
]),
'encap_content_info': cms.ContentInfo({
'content_type': cms.ContentType('data')
}),
'certificates': cms.CertificateSet([
cms.CertificateChoices({
'certificate': sign_key[1].asn1
})
]),
'signer_infos': cms.SignerInfos([
cms.SignerInfo({
'version': cms.CMSVersion('v1'),
'sid': cms.SignerIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': sign_key[1].asn1[
'tbs_certificate']['issuer'],
'serial_number': sign_key[1].asn1[
'tbs_certificate']['serial_number']
})
}),
'digest_algorithm': algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
}),
'signed_attrs': signed_attributes,
'signature_algorithm': algos.SignedDigestAlgorithm({
'algorithm':
algos.SignedDigestAlgorithmId('rsassa_pkcs1v15')
}),
'signature': core.OctetString(signature)
})
])
})
}).dump() | Function signs the data and returns the generated ASN.1
:param data_to_sign: A byte string of the data to be signed.
:param digest_alg:
The digest algorithm to be used for generating the signature.
:param sign_key: The key to be used for generating the signature.
:param use_signed_attributes: Optional attribute to indicate weather the
CMS signature attributes should be included in the signature or not.
:return: A CMS ASN.1 byte string of the signed data. | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/cms.py#L179-L306 | null | from __future__ import absolute_import, unicode_literals
from asn1crypto import cms, core, algos
from oscrypto import asymmetric, symmetric, util
from datetime import datetime
from collections import OrderedDict
from .compat import byte_cls
from .exceptions import *
import hashlib
import zlib
DIGEST_ALGORITHMS = (
'md5',
'sha1',
'sha224',
'sha256',
'sha384',
'sha512'
)
ENCRYPTION_ALGORITHMS = (
'tripledes_192_cbc',
'rc2_128_cbc',
'rc4_128_cbc'
)
def compress_message(data_to_compress):
"""Function compresses data and returns the generated ASN.1
:param data_to_compress: A byte string of the data to be compressed
:return: A CMS ASN.1 byte string of the compressed data.
"""
compressed_content = cms.ParsableOctetString(
zlib.compress(data_to_compress))
return cms.ContentInfo({
'content_type': cms.ContentType('compressed_data'),
'content': cms.CompressedData({
'version': cms.CMSVersion('v0'),
'compression_algorithm':
cms.CompressionAlgorithm({
'algorithm': cms.CompressionAlgorithmId('zlib')
}),
'encap_content_info': cms.EncapsulatedContentInfo({
'content_type': cms.ContentType('data'),
'content': compressed_content
})
})
}).dump()
def decompress_message(compressed_data):
"""Function parses an ASN.1 compressed message and extracts/decompresses
the original message.
:param compressed_data: A CMS ASN.1 byte string containing the compressed
data.
:return: A byte string containing the decompressed original message.
"""
try:
cms_content = cms.ContentInfo.load(compressed_data)
if cms_content['content_type'].native == 'compressed_data':
return cms_content['content'].decompressed
else:
raise DecompressionError('Compressed data not found in ASN.1 ')
except Exception as e:
raise DecompressionError(
'Decompression failed with cause: {}'.format(e))
def encrypt_message(data_to_encrypt, enc_alg, encryption_cert):
"""Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte string of the encrypted data.
"""
enc_alg_list = enc_alg.split('_')
cipher, key_length, mode = enc_alg_list[0], enc_alg_list[1], enc_alg_list[2]
enc_alg_asn1, key, encrypted_content = None, None, None
# Generate the symmetric encryption key and encrypt the message
if cipher == 'tripledes':
key = util.rand_bytes(int(key_length)//8)
iv, encrypted_content = symmetric.tripledes_cbc_pkcs5_encrypt(
key, data_to_encrypt, None)
enc_alg_asn1 = algos.EncryptionAlgorithm({
'algorithm': algos.EncryptionAlgorithmId('tripledes_3key'),
'parameters': cms.OctetString(iv)
})
# Encrypt the key and build the ASN.1 message
encrypted_key = asymmetric.rsa_pkcs1v15_encrypt(encryption_cert, key)
return cms.ContentInfo({
'content_type': cms.ContentType('enveloped_data'),
'content': cms.EnvelopedData({
'version': cms.CMSVersion('v0'),
'recipient_infos': [
cms.KeyTransRecipientInfo({
'version': cms.CMSVersion('v0'),
'rid': cms.RecipientIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': encryption_cert.asn1[
'tbs_certificate']['issuer'],
'serial_number': encryption_cert.asn1[
'tbs_certificate']['serial_number']
})
}),
'key_encryption_algorithm': cms.KeyEncryptionAlgorithm({
'algorithm': cms.KeyEncryptionAlgorithmId('rsa')
}),
'encrypted_key': cms.OctetString(encrypted_key)
})
],
'encrypted_content_info': cms.EncryptedContentInfo({
'content_type': cms.ContentType('data'),
'content_encryption_algorithm': enc_alg_asn1,
'encrypted_content': encrypted_content
})
})
}).dump()
def decrypt_message(encrypted_data, decryption_key):
"""Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param encrypted_data: A CMS ASN.1 byte string containing the encrypted
data.
:param decryption_key: The key to be used for decrypting the data.
:return: A byte string containing the decrypted original message.
"""
cms_content = cms.ContentInfo.load(encrypted_data)
cipher, decrypted_content = None, None
if cms_content['content_type'].native == 'enveloped_data':
recipient_info = cms_content['content']['recipient_infos'][0].parse()
key_enc_alg = recipient_info[
'key_encryption_algorithm']['algorithm'].native
encrypted_key = recipient_info['encrypted_key'].native
if key_enc_alg == 'rsa':
try:
key = asymmetric.rsa_pkcs1v15_decrypt(
decryption_key[0], encrypted_key)
except Exception as e:
raise DecryptionError('Failed to decrypt the payload: '
'Could not extract decryption key.')
alg = cms_content['content']['encrypted_content_info'][
'content_encryption_algorithm']
encapsulated_data = cms_content['content'][
'encrypted_content_info']['encrypted_content'].native
try:
if alg.encryption_cipher == 'tripledes':
cipher = 'tripledes_192_cbc'
decrypted_content = symmetric.tripledes_cbc_pkcs5_decrypt(
key, encapsulated_data, alg.encryption_iv)
else:
raise AS2Exception('Unsupported Encryption Algorithm')
except Exception as e:
raise DecryptionError(
'Failed to decrypt the payload: {}'.format(e))
return cipher, decrypted_content
def sign_message(data_to_sign, digest_alg, sign_key,
use_signed_attributes=True):
"""Function signs the data and returns the generated ASN.1
:param data_to_sign: A byte string of the data to be signed.
:param digest_alg:
The digest algorithm to be used for generating the signature.
:param sign_key: The key to be used for generating the signature.
:param use_signed_attributes: Optional attribute to indicate weather the
CMS signature attributes should be included in the signature or not.
:return: A CMS ASN.1 byte string of the signed data.
"""
if use_signed_attributes:
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_sign)
message_digest = digest_func.digest()
class SmimeCapability(core.Sequence):
_fields = [
('0', core.Any, {'optional': True}),
('1', core.Any, {'optional': True}),
('2', core.Any, {'optional': True}),
('3', core.Any, {'optional': True}),
('4', core.Any, {'optional': True})
]
class SmimeCapabilities(core.Sequence):
_fields = [
('0', SmimeCapability),
('1', SmimeCapability, {'optional': True}),
('2', SmimeCapability, {'optional': True}),
('3', SmimeCapability, {'optional': True}),
('4', SmimeCapability, {'optional': True}),
('5', SmimeCapability, {'optional': True}),
]
smime_cap = OrderedDict([
('0', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.7'))])),
('1', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.2')),
('1', core.Integer(128))])),
('2', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.4')),
('1', core.Integer(128))])),
])
signed_attributes = cms.CMSAttributes([
cms.CMSAttribute({
'type': cms.CMSAttributeType('content_type'),
'values': cms.SetOfContentType([
cms.ContentType('data')
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('signing_time'),
'values': cms.SetOfTime([
cms.Time({
'utc_time': core.UTCTime(datetime.now())
})
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('message_digest'),
'values': cms.SetOfOctetString([
core.OctetString(message_digest)
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('1.2.840.113549.1.9.15'),
'values': cms.SetOfAny([
core.Any(SmimeCapabilities(smime_cap))
])
}),
])
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], signed_attributes.dump(), digest_alg)
else:
signed_attributes = None
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], data_to_sign, digest_alg)
return cms.ContentInfo({
'content_type': cms.ContentType('signed_data'),
'content': cms.SignedData({
'version': cms.CMSVersion('v1'),
'digest_algorithms': cms.DigestAlgorithms([
algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
})
]),
'encap_content_info': cms.ContentInfo({
'content_type': cms.ContentType('data')
}),
'certificates': cms.CertificateSet([
cms.CertificateChoices({
'certificate': sign_key[1].asn1
})
]),
'signer_infos': cms.SignerInfos([
cms.SignerInfo({
'version': cms.CMSVersion('v1'),
'sid': cms.SignerIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': sign_key[1].asn1[
'tbs_certificate']['issuer'],
'serial_number': sign_key[1].asn1[
'tbs_certificate']['serial_number']
})
}),
'digest_algorithm': algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
}),
'signed_attrs': signed_attributes,
'signature_algorithm': algos.SignedDigestAlgorithm({
'algorithm':
algos.SignedDigestAlgorithmId('rsassa_pkcs1v15')
}),
'signature': core.OctetString(signature)
})
])
})
}).dump()
def verify_message(data_to_verify, signature, verify_cert):
"""Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param data_to_verify:
A byte string of the data to be verified against the signature.
:param signature: A CMS ASN.1 byte string containing the signature.
:param verify_cert: The certificate to be used for verifying the signature.
:return: The digest algorithm that was used in the signature.
"""
cms_content = cms.ContentInfo.load(signature)
digest_alg = None
if cms_content['content_type'].native == 'signed_data':
for signer in cms_content['content']['signer_infos']:
signed_attributes = signer['signed_attrs'].copy()
digest_alg = signer['digest_algorithm']['algorithm'].native
if digest_alg not in DIGEST_ALGORITHMS:
raise Exception('Unsupported Digest Algorithm')
sig_alg = signer['signature_algorithm']['algorithm'].native
sig = signer['signature'].native
signed_data = data_to_verify
if signed_attributes:
attr_dict = {}
for attr in signed_attributes.native:
attr_dict[attr['type']] = attr['values']
message_digest = byte_cls()
for d in attr_dict['message_digest']:
message_digest += d
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_verify)
calc_message_digest = digest_func.digest()
if message_digest != calc_message_digest:
raise IntegrityError('Failed to verify message signature: '
'Message Digest does not match.')
signed_data = signed_attributes.untag().dump()
try:
if sig_alg == 'rsassa_pkcs1v15':
asymmetric.rsa_pkcs1v15_verify(
verify_cert, sig, signed_data, digest_alg)
elif sig_alg == 'rsassa_pss':
asymmetric.rsa_pss_verify(
verify_cert, sig, signed_data, digest_alg)
else:
raise AS2Exception('Unsupported Signature Algorithm')
except Exception as e:
raise IntegrityError(
'Failed to verify message signature: {}'.format(e))
return digest_alg
|
abhishek-ram/pyas2-lib | pyas2lib/cms.py | verify_message | python | def verify_message(data_to_verify, signature, verify_cert):
cms_content = cms.ContentInfo.load(signature)
digest_alg = None
if cms_content['content_type'].native == 'signed_data':
for signer in cms_content['content']['signer_infos']:
signed_attributes = signer['signed_attrs'].copy()
digest_alg = signer['digest_algorithm']['algorithm'].native
if digest_alg not in DIGEST_ALGORITHMS:
raise Exception('Unsupported Digest Algorithm')
sig_alg = signer['signature_algorithm']['algorithm'].native
sig = signer['signature'].native
signed_data = data_to_verify
if signed_attributes:
attr_dict = {}
for attr in signed_attributes.native:
attr_dict[attr['type']] = attr['values']
message_digest = byte_cls()
for d in attr_dict['message_digest']:
message_digest += d
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_verify)
calc_message_digest = digest_func.digest()
if message_digest != calc_message_digest:
raise IntegrityError('Failed to verify message signature: '
'Message Digest does not match.')
signed_data = signed_attributes.untag().dump()
try:
if sig_alg == 'rsassa_pkcs1v15':
asymmetric.rsa_pkcs1v15_verify(
verify_cert, sig, signed_data, digest_alg)
elif sig_alg == 'rsassa_pss':
asymmetric.rsa_pss_verify(
verify_cert, sig, signed_data, digest_alg)
else:
raise AS2Exception('Unsupported Signature Algorithm')
except Exception as e:
raise IntegrityError(
'Failed to verify message signature: {}'.format(e))
return digest_alg | Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param data_to_verify:
A byte string of the data to be verified against the signature.
:param signature: A CMS ASN.1 byte string containing the signature.
:param verify_cert: The certificate to be used for verifying the signature.
:return: The digest algorithm that was used in the signature. | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/cms.py#L309-L371 | null | from __future__ import absolute_import, unicode_literals
from asn1crypto import cms, core, algos
from oscrypto import asymmetric, symmetric, util
from datetime import datetime
from collections import OrderedDict
from .compat import byte_cls
from .exceptions import *
import hashlib
import zlib
DIGEST_ALGORITHMS = (
'md5',
'sha1',
'sha224',
'sha256',
'sha384',
'sha512'
)
ENCRYPTION_ALGORITHMS = (
'tripledes_192_cbc',
'rc2_128_cbc',
'rc4_128_cbc'
)
def compress_message(data_to_compress):
"""Function compresses data and returns the generated ASN.1
:param data_to_compress: A byte string of the data to be compressed
:return: A CMS ASN.1 byte string of the compressed data.
"""
compressed_content = cms.ParsableOctetString(
zlib.compress(data_to_compress))
return cms.ContentInfo({
'content_type': cms.ContentType('compressed_data'),
'content': cms.CompressedData({
'version': cms.CMSVersion('v0'),
'compression_algorithm':
cms.CompressionAlgorithm({
'algorithm': cms.CompressionAlgorithmId('zlib')
}),
'encap_content_info': cms.EncapsulatedContentInfo({
'content_type': cms.ContentType('data'),
'content': compressed_content
})
})
}).dump()
def decompress_message(compressed_data):
"""Function parses an ASN.1 compressed message and extracts/decompresses
the original message.
:param compressed_data: A CMS ASN.1 byte string containing the compressed
data.
:return: A byte string containing the decompressed original message.
"""
try:
cms_content = cms.ContentInfo.load(compressed_data)
if cms_content['content_type'].native == 'compressed_data':
return cms_content['content'].decompressed
else:
raise DecompressionError('Compressed data not found in ASN.1 ')
except Exception as e:
raise DecompressionError(
'Decompression failed with cause: {}'.format(e))
def encrypt_message(data_to_encrypt, enc_alg, encryption_cert):
"""Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte string of the encrypted data.
"""
enc_alg_list = enc_alg.split('_')
cipher, key_length, mode = enc_alg_list[0], enc_alg_list[1], enc_alg_list[2]
enc_alg_asn1, key, encrypted_content = None, None, None
# Generate the symmetric encryption key and encrypt the message
if cipher == 'tripledes':
key = util.rand_bytes(int(key_length)//8)
iv, encrypted_content = symmetric.tripledes_cbc_pkcs5_encrypt(
key, data_to_encrypt, None)
enc_alg_asn1 = algos.EncryptionAlgorithm({
'algorithm': algos.EncryptionAlgorithmId('tripledes_3key'),
'parameters': cms.OctetString(iv)
})
# Encrypt the key and build the ASN.1 message
encrypted_key = asymmetric.rsa_pkcs1v15_encrypt(encryption_cert, key)
return cms.ContentInfo({
'content_type': cms.ContentType('enveloped_data'),
'content': cms.EnvelopedData({
'version': cms.CMSVersion('v0'),
'recipient_infos': [
cms.KeyTransRecipientInfo({
'version': cms.CMSVersion('v0'),
'rid': cms.RecipientIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': encryption_cert.asn1[
'tbs_certificate']['issuer'],
'serial_number': encryption_cert.asn1[
'tbs_certificate']['serial_number']
})
}),
'key_encryption_algorithm': cms.KeyEncryptionAlgorithm({
'algorithm': cms.KeyEncryptionAlgorithmId('rsa')
}),
'encrypted_key': cms.OctetString(encrypted_key)
})
],
'encrypted_content_info': cms.EncryptedContentInfo({
'content_type': cms.ContentType('data'),
'content_encryption_algorithm': enc_alg_asn1,
'encrypted_content': encrypted_content
})
})
}).dump()
def decrypt_message(encrypted_data, decryption_key):
"""Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param encrypted_data: A CMS ASN.1 byte string containing the encrypted
data.
:param decryption_key: The key to be used for decrypting the data.
:return: A byte string containing the decrypted original message.
"""
cms_content = cms.ContentInfo.load(encrypted_data)
cipher, decrypted_content = None, None
if cms_content['content_type'].native == 'enveloped_data':
recipient_info = cms_content['content']['recipient_infos'][0].parse()
key_enc_alg = recipient_info[
'key_encryption_algorithm']['algorithm'].native
encrypted_key = recipient_info['encrypted_key'].native
if key_enc_alg == 'rsa':
try:
key = asymmetric.rsa_pkcs1v15_decrypt(
decryption_key[0], encrypted_key)
except Exception as e:
raise DecryptionError('Failed to decrypt the payload: '
'Could not extract decryption key.')
alg = cms_content['content']['encrypted_content_info'][
'content_encryption_algorithm']
encapsulated_data = cms_content['content'][
'encrypted_content_info']['encrypted_content'].native
try:
if alg.encryption_cipher == 'tripledes':
cipher = 'tripledes_192_cbc'
decrypted_content = symmetric.tripledes_cbc_pkcs5_decrypt(
key, encapsulated_data, alg.encryption_iv)
else:
raise AS2Exception('Unsupported Encryption Algorithm')
except Exception as e:
raise DecryptionError(
'Failed to decrypt the payload: {}'.format(e))
return cipher, decrypted_content
def sign_message(data_to_sign, digest_alg, sign_key,
use_signed_attributes=True):
"""Function signs the data and returns the generated ASN.1
:param data_to_sign: A byte string of the data to be signed.
:param digest_alg:
The digest algorithm to be used for generating the signature.
:param sign_key: The key to be used for generating the signature.
:param use_signed_attributes: Optional attribute to indicate weather the
CMS signature attributes should be included in the signature or not.
:return: A CMS ASN.1 byte string of the signed data.
"""
if use_signed_attributes:
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_sign)
message_digest = digest_func.digest()
class SmimeCapability(core.Sequence):
_fields = [
('0', core.Any, {'optional': True}),
('1', core.Any, {'optional': True}),
('2', core.Any, {'optional': True}),
('3', core.Any, {'optional': True}),
('4', core.Any, {'optional': True})
]
class SmimeCapabilities(core.Sequence):
_fields = [
('0', SmimeCapability),
('1', SmimeCapability, {'optional': True}),
('2', SmimeCapability, {'optional': True}),
('3', SmimeCapability, {'optional': True}),
('4', SmimeCapability, {'optional': True}),
('5', SmimeCapability, {'optional': True}),
]
smime_cap = OrderedDict([
('0', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.7'))])),
('1', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.2')),
('1', core.Integer(128))])),
('2', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.4')),
('1', core.Integer(128))])),
])
signed_attributes = cms.CMSAttributes([
cms.CMSAttribute({
'type': cms.CMSAttributeType('content_type'),
'values': cms.SetOfContentType([
cms.ContentType('data')
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('signing_time'),
'values': cms.SetOfTime([
cms.Time({
'utc_time': core.UTCTime(datetime.now())
})
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('message_digest'),
'values': cms.SetOfOctetString([
core.OctetString(message_digest)
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('1.2.840.113549.1.9.15'),
'values': cms.SetOfAny([
core.Any(SmimeCapabilities(smime_cap))
])
}),
])
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], signed_attributes.dump(), digest_alg)
else:
signed_attributes = None
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], data_to_sign, digest_alg)
return cms.ContentInfo({
'content_type': cms.ContentType('signed_data'),
'content': cms.SignedData({
'version': cms.CMSVersion('v1'),
'digest_algorithms': cms.DigestAlgorithms([
algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
})
]),
'encap_content_info': cms.ContentInfo({
'content_type': cms.ContentType('data')
}),
'certificates': cms.CertificateSet([
cms.CertificateChoices({
'certificate': sign_key[1].asn1
})
]),
'signer_infos': cms.SignerInfos([
cms.SignerInfo({
'version': cms.CMSVersion('v1'),
'sid': cms.SignerIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': sign_key[1].asn1[
'tbs_certificate']['issuer'],
'serial_number': sign_key[1].asn1[
'tbs_certificate']['serial_number']
})
}),
'digest_algorithm': algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
}),
'signed_attrs': signed_attributes,
'signature_algorithm': algos.SignedDigestAlgorithm({
'algorithm':
algos.SignedDigestAlgorithmId('rsassa_pkcs1v15')
}),
'signature': core.OctetString(signature)
})
])
})
}).dump()
def verify_message(data_to_verify, signature, verify_cert):
"""Function parses an ASN.1 encrypted message and extracts/decrypts
the original message.
:param data_to_verify:
A byte string of the data to be verified against the signature.
:param signature: A CMS ASN.1 byte string containing the signature.
:param verify_cert: The certificate to be used for verifying the signature.
:return: The digest algorithm that was used in the signature.
"""
cms_content = cms.ContentInfo.load(signature)
digest_alg = None
if cms_content['content_type'].native == 'signed_data':
for signer in cms_content['content']['signer_infos']:
signed_attributes = signer['signed_attrs'].copy()
digest_alg = signer['digest_algorithm']['algorithm'].native
if digest_alg not in DIGEST_ALGORITHMS:
raise Exception('Unsupported Digest Algorithm')
sig_alg = signer['signature_algorithm']['algorithm'].native
sig = signer['signature'].native
signed_data = data_to_verify
if signed_attributes:
attr_dict = {}
for attr in signed_attributes.native:
attr_dict[attr['type']] = attr['values']
message_digest = byte_cls()
for d in attr_dict['message_digest']:
message_digest += d
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_verify)
calc_message_digest = digest_func.digest()
if message_digest != calc_message_digest:
raise IntegrityError('Failed to verify message signature: '
'Message Digest does not match.')
signed_data = signed_attributes.untag().dump()
try:
if sig_alg == 'rsassa_pkcs1v15':
asymmetric.rsa_pkcs1v15_verify(
verify_cert, sig, signed_data, digest_alg)
elif sig_alg == 'rsassa_pss':
asymmetric.rsa_pss_verify(
verify_cert, sig, signed_data, digest_alg)
else:
raise AS2Exception('Unsupported Signature Algorithm')
except Exception as e:
raise IntegrityError(
'Failed to verify message signature: {}'.format(e))
return digest_alg
|
abhishek-ram/pyas2-lib | pyas2lib/as2.py | Organization.load_key | python | def load_key(key_str, key_pass):
try:
# First try to parse as a p12 file
key, cert, _ = asymmetric.load_pkcs12(key_str, key_pass)
except ValueError as e:
# If it fails due to invalid password raise error here
if e.args[0] == 'Password provided is invalid':
raise AS2Exception('Password not valid for Private Key.')
# if not try to parse as a pem file
key, cert = None, None
for kc in split_pem(key_str):
try:
cert = asymmetric.load_certificate(kc)
except (ValueError, TypeError):
try:
key = asymmetric.load_private_key(kc, key_pass)
except OSError:
raise AS2Exception(
'Invalid Private Key or password is not correct.')
if not key or not cert:
raise AS2Exception(
'Invalid Private key file or Public key not included.')
return key, cert | Function to load password protected key file in p12 or pem format | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/as2.py#L77-L104 | null | class Organization(object):
"""Class represents an AS2 organization and defines the certificates and
settings to be used when sending and receiving messages. """
def __init__(self, as2_name, sign_key=None, sign_key_pass=None,
decrypt_key=None, decrypt_key_pass=None, mdn_url=None,
mdn_confirm_text=MDN_CONFIRM_TEXT):
"""
:param as2_name: The unique AS2 name for this organization
:param sign_key: A byte string of the pkcs12 encoded key pair
used for signing outbound messages and MDNs.
:param sign_key_pass: The password for decrypting the `sign_key`
:param decrypt_key: A byte string of the pkcs12 encoded key pair
used for decrypting inbound messages.
:param decrypt_key_pass: The password for decrypting the `decrypt_key`
:param mdn_url: The URL where the receiver is expected to post
asynchronous MDNs.
"""
self.sign_key = self.load_key(
sign_key, sign_key_pass) if sign_key else None
self.decrypt_key = self.load_key(
decrypt_key, decrypt_key_pass) if decrypt_key else None
self.as2_name = as2_name
self.mdn_url = mdn_url
self.mdn_confirm_text = mdn_confirm_text
@staticmethod
|
abhishek-ram/pyas2-lib | pyas2lib/as2.py | Message.content | python | def content(self):
if not self.payload:
return ''
if self.payload.is_multipart():
message_bytes = mime_to_bytes(
self.payload, 0).replace(b'\n', b'\r\n')
boundary = b'--' + self.payload.get_boundary().encode('utf-8')
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp)
else:
content = self.payload.get_payload()
if isinstance(content, str_cls):
content = content.encode('utf-8')
return content | Function returns the body of the as2 payload as a bytes object | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/as2.py#L263-L280 | [
"def mime_to_bytes(msg, header_len):\n \"\"\"\n Function to convert and email Message to flat string format\n :param msg: email.Message to be converted to string\n :param header_len: the msx length of the header per line\n :return: the byte string representation of the email message\n \"\"\"\n fp = BytesIO()\n g = BytesGenerator(fp, maxheaderlen=header_len)\n g.flatten(msg)\n return fp.getvalue()\n"
] | class Message(object):
"""Class for handling AS2 messages. Includes functions for both
parsing and building messages.
"""
def __init__(self, sender=None, receiver=None):
"""
:param sender:
An object of type <pyas2lib.as2.Organization>, representing the
sender of the message.
:param receiver:
An object of type <pyas2lib.as2.Partner>, representing the
receiver of the message .
"""
self.sender = sender
self.receiver = receiver
self.compressed = False
self.signed = False
self.digest_alg = None
self.encrypted = False
self.enc_alg = None
self.message_id = None
self.payload = None
self.mic = None
@property
@property
def headers(self):
if self.payload:
return dict(self.payload.items())
else:
return {}
@property
def headers_str(self):
message_header = ''
if self.payload:
for k, v in self.headers.items():
message_header += '{}: {}\r\n'.format(k, v)
return message_header.encode('utf-8')
def build(self, data, filename=None, subject='AS2 Message',
content_type='application/edi-consent', additional_headers=None):
"""Function builds the AS2 message. Compresses, signs and encrypts
the payload if applicable.
:param data: A byte string of the data to be transmitted.
:param filename:
Optional filename to be included in the Content-disposition header.
:param subject:
The subject for the AS2 message, used by some AS2 servers for
additional routing of messages. (default "AS2 Message")
:param content_type:
The content type for the AS2 message, to be used in the MIME
header. (default "application/edi-consent")
:param additional_headers:
Any additional headers to be included as part of the AS2 message.
"""
# Validations
assert type(data) is byte_cls, \
'Parameter data must be of type {}'.format(byte_cls)
additional_headers = additional_headers if additional_headers else {}
assert type(additional_headers) is dict
if self.receiver.sign and not self.sender.sign_key:
raise ImproperlyConfigured(
'Signing of messages is enabled but sign key is not set '
'for the sender.')
if self.receiver.encrypt and not self.receiver.encrypt_cert:
raise ImproperlyConfigured(
'Encryption of messages is enabled but encrypt key is not set '
'for the receiver.')
# Generate message id using UUID 1 as it uses both hostname and time
self.message_id = email_utils.make_msgid().lstrip('<').rstrip('>')
# Set up the message headers
as2_headers = {
'AS2-Version': AS2_VERSION,
'ediint-features': EDIINT_FEATURES,
'Message-ID': '<{}>'.format(self.message_id),
'AS2-From': quote_as2name(self.sender.as2_name),
'AS2-To': quote_as2name(self.receiver.as2_name),
'Subject': subject,
'Date': email_utils.formatdate(localtime=True),
# 'recipient-address': message.partner.target_url,
}
as2_headers.update(additional_headers)
# Read the input and convert to bytes if value is unicode/str
# using utf-8 encoding and finally Canonicalize the payload
self.payload = email_message.Message()
self.payload.set_payload(data)
self.payload.set_type(content_type)
encoders.encode_7or8bit(self.payload)
if filename:
self.payload.add_header(
'Content-Disposition', 'attachment', filename=filename)
del self.payload['MIME-Version']
if self.receiver.compress:
self.compressed = True
compressed_message = email_message.Message()
compressed_message.set_type('application/pkcs7-mime')
compressed_message.set_param('name', 'smime.p7z')
compressed_message.set_param('smime-type', 'compressed-data')
compressed_message.add_header(
'Content-Disposition', 'attachment', filename='smime.p7z')
compressed_message.set_payload(
compress_message(canonicalize(self.payload)))
encoders.encode_base64(compressed_message)
self.payload = compressed_message
# logger.debug(b'Compressed message %s payload as:\n%s' % (
# self.message_id, self.payload.as_string()))
if self.receiver.sign:
self.signed, self.digest_alg = True, self.receiver.digest_alg
signed_message = MIMEMultipart(
'signed', protocol="application/pkcs7-signature")
del signed_message['MIME-Version']
signed_message.attach(self.payload)
# Calculate the MIC Hash of the message to be verified
mic_content = canonicalize(self.payload)
digest_func = hashlib.new(self.digest_alg)
digest_func.update(mic_content)
self.mic = binascii.b2a_base64(digest_func.digest()).strip()
# Create the signature mime message
signature = email_message.Message()
signature.set_type('application/pkcs7-signature')
signature.set_param('name', 'smime.p7s')
signature.set_param('smime-type', 'signed-data')
signature.add_header(
'Content-Disposition', 'attachment', filename='smime.p7s')
del signature['MIME-Version']
signature.set_payload(sign_message(
mic_content, self.digest_alg, self.sender.sign_key))
encoders.encode_base64(signature)
signed_message.set_param('micalg', self.digest_alg)
signed_message.attach(signature)
self.payload = signed_message
# logger.debug(b'Signed message %s payload as:\n%s' % (
# self.message_id, self.payload.as_string()))
if self.receiver.encrypt:
self.encrypted, self.enc_alg = True, self.receiver.enc_alg
encrypted_message = email_message.Message()
encrypted_message.set_type('application/pkcs7-mime')
encrypted_message.set_param('name', 'smime.p7m')
encrypted_message.set_param('smime-type', 'enveloped-data')
encrypted_message.add_header(
'Content-Disposition', 'attachment', filename='smime.p7m')
encrypt_cert = self.receiver.load_encrypt_cert()
encrypted_message.set_payload(encrypt_message(
canonicalize(self.payload),
self.enc_alg,
encrypt_cert
))
encoders.encode_base64(encrypted_message)
self.payload = encrypted_message
# logger.debug(b'Encrypted message %s payload as:\n%s' % (
# self.message_id, self.payload.as_string()))
if self.receiver.mdn_mode:
as2_headers['disposition-notification-to'] = 'no-reply@pyas2.com'
if self.receiver.mdn_digest_alg:
as2_headers['disposition-notification-options'] = \
'signed-receipt-protocol=required, pkcs7-signature; ' \
'signed-receipt-micalg=optional, {}'.format(
self.receiver.mdn_digest_alg)
if self.receiver.mdn_mode == 'ASYNC':
if not self.sender.mdn_url:
raise ImproperlyConfigured(
'MDN URL must be set in the organization when MDN mode '
'is set to ASYNC')
as2_headers['receipt-delivery-option'] = self.sender.mdn_url
# Update the headers of the final payload and set its boundary
for k, v in as2_headers.items():
if self.payload.get(k):
self.payload.replace_header(k, v)
else:
self.payload.add_header(k, v)
if self.payload.is_multipart():
self.payload.set_boundary(make_mime_boundary())
@staticmethod
def decompress_data(payload):
if payload.get_content_type() == 'application/pkcs7-mime' \
and payload.get_param('smime-type') == 'compressed-data':
compressed_data = payload.get_payload(decode=True)
decompressed_data = decompress_message(compressed_data)
return True, parse_mime(decompressed_data)
return False, payload
def parse(self, raw_content, find_org_cb, find_partner_cb,
find_message_cb=None):
"""Function parses the RAW AS2 message; decrypts, verifies and
decompresses it and extracts the payload.
:param raw_content:
A byte string of the received HTTP headers followed by the body.
:param find_org_cb:
A callback the returns an Organization object if exists. The
as2-to header value is passed as an argument to it.
:param find_partner_cb:
A callback the returns an Partner object if exists. The
as2-from header value is passed as an argument to it.
:param find_message_cb:
An optional callback the returns an Message object if exists in
order to check for duplicates. The message id and partner id is
passed as arguments to it.
:return:
A three element tuple containing (status, (exception, traceback)
, mdn). The status is a string indicating the status of the
transaction. The exception is populated with any exception raised
during processing and the mdn is an MDN object or None in case
the partner did not request it.
"""
# Parse the raw MIME message and extract its content and headers
status, detailed_status, exception, mdn = \
'processed', None, (None, None), None
self.payload = parse_mime(raw_content)
as2_headers = {}
for k, v in self.payload.items():
k = k.lower()
if k == 'message-id':
self.message_id = v.lstrip('<').rstrip('>')
as2_headers[k] = v
try:
# Get the organization and partner for this transmission
org_id = unquote_as2name(as2_headers['as2-to'])
self.receiver = find_org_cb(org_id)
if not self.receiver:
raise PartnerNotFound(
'Unknown AS2 organization with id {}'.format(org_id))
partner_id = unquote_as2name(as2_headers['as2-from'])
self.sender = find_partner_cb(partner_id)
if not self.sender:
raise PartnerNotFound(
'Unknown AS2 partner with id {}'.format(partner_id))
if find_message_cb and \
find_message_cb(self.message_id, partner_id):
raise DuplicateDocument(
'Duplicate message received, message with this ID '
'already processed.')
if self.sender.encrypt and \
self.payload.get_content_type() != 'application/pkcs7-mime':
raise InsufficientSecurityError(
'Incoming messages from partner {} are must be encrypted'
' but encrypted message not found.'.format(partner_id))
if self.payload.get_content_type() == 'application/pkcs7-mime' \
and self.payload.get_param('smime-type') == 'enveloped-data':
encrypted_data = self.payload.get_payload(decode=True)
# logger.debug(
# 'Decrypting the payload :\n%s' % self.payload.as_string())
self.encrypted = True
self.enc_alg, decrypted_content = decrypt_message(
encrypted_data,
self.receiver.decrypt_key
)
raw_content = decrypted_content
self.payload = parse_mime(decrypted_content)
if self.payload.get_content_type() == 'text/plain':
self.payload = email_message.Message()
self.payload.set_payload(decrypted_content)
self.payload.set_type('application/edi-consent')
# Check for compressed data here
self.compressed, self.payload = self.decompress_data(self.payload)
if self.sender.sign and \
self.payload.get_content_type() != 'multipart/signed':
raise InsufficientSecurityError(
'Incoming messages from partner {} are must be signed '
'but signed message not found.'.format(partner_id))
if self.payload.get_content_type() == 'multipart/signed':
# logger.debug(b'Verifying the signed payload:\n{0:s}'.format(
# self.payload.as_string()))
self.signed = True
signature = None
message_boundary = (
'--' + self.payload.get_boundary()).encode('utf-8')
for part in self.payload.walk():
if part.get_content_type() == "application/pkcs7-signature":
signature = part.get_payload(decode=True)
else:
self.payload = part
# Verify the message, first using raw message and if it fails
# then convert to canonical form and try again
mic_content = canonicalize(self.payload)
verify_cert = self.sender.load_verify_cert()
try:
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
except IntegrityError:
mic_content = raw_content.split(message_boundary)[
1].replace(b'\n', b'\r\n')
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
# Calculate the MIC Hash of the message to be verified
digest_func = hashlib.new(self.digest_alg)
digest_func.update(mic_content)
self.mic = binascii.b2a_base64(digest_func.digest()).strip()
# Check for compressed data here
if not self.compressed:
self.compressed, self.payload = self.decompress_data(self.payload)
except Exception as e:
status = getattr(e, 'disposition_type', 'processed/Error')
detailed_status = getattr(
e, 'disposition_modifier', 'unexpected-processing-error')
print(traceback.format_exc())
exception = (e, traceback.format_exc())
finally:
# Update the payload headers with the original headers
for k, v in as2_headers.items():
if self.payload.get(k) and k.lower() != 'content-disposition':
del self.payload[k]
self.payload.add_header(k, v)
if as2_headers.get('disposition-notification-to'):
mdn_mode = SYNCHRONOUS_MDN
mdn_url = as2_headers.get('receipt-delivery-option')
if mdn_url:
mdn_mode = ASYNCHRONOUS_MDN
digest_alg = as2_headers.get('disposition-notification-options')
if digest_alg:
digest_alg = digest_alg.split(';')[-1].split(',')[
-1].strip()
mdn = Mdn(
mdn_mode=mdn_mode, mdn_url=mdn_url, digest_alg=digest_alg)
mdn.build(message=self,
status=status,
detailed_status=detailed_status)
return status, exception, mdn
|
abhishek-ram/pyas2-lib | pyas2lib/as2.py | Message.build | python | def build(self, data, filename=None, subject='AS2 Message',
content_type='application/edi-consent', additional_headers=None):
# Validations
assert type(data) is byte_cls, \
'Parameter data must be of type {}'.format(byte_cls)
additional_headers = additional_headers if additional_headers else {}
assert type(additional_headers) is dict
if self.receiver.sign and not self.sender.sign_key:
raise ImproperlyConfigured(
'Signing of messages is enabled but sign key is not set '
'for the sender.')
if self.receiver.encrypt and not self.receiver.encrypt_cert:
raise ImproperlyConfigured(
'Encryption of messages is enabled but encrypt key is not set '
'for the receiver.')
# Generate message id using UUID 1 as it uses both hostname and time
self.message_id = email_utils.make_msgid().lstrip('<').rstrip('>')
# Set up the message headers
as2_headers = {
'AS2-Version': AS2_VERSION,
'ediint-features': EDIINT_FEATURES,
'Message-ID': '<{}>'.format(self.message_id),
'AS2-From': quote_as2name(self.sender.as2_name),
'AS2-To': quote_as2name(self.receiver.as2_name),
'Subject': subject,
'Date': email_utils.formatdate(localtime=True),
# 'recipient-address': message.partner.target_url,
}
as2_headers.update(additional_headers)
# Read the input and convert to bytes if value is unicode/str
# using utf-8 encoding and finally Canonicalize the payload
self.payload = email_message.Message()
self.payload.set_payload(data)
self.payload.set_type(content_type)
encoders.encode_7or8bit(self.payload)
if filename:
self.payload.add_header(
'Content-Disposition', 'attachment', filename=filename)
del self.payload['MIME-Version']
if self.receiver.compress:
self.compressed = True
compressed_message = email_message.Message()
compressed_message.set_type('application/pkcs7-mime')
compressed_message.set_param('name', 'smime.p7z')
compressed_message.set_param('smime-type', 'compressed-data')
compressed_message.add_header(
'Content-Disposition', 'attachment', filename='smime.p7z')
compressed_message.set_payload(
compress_message(canonicalize(self.payload)))
encoders.encode_base64(compressed_message)
self.payload = compressed_message
# logger.debug(b'Compressed message %s payload as:\n%s' % (
# self.message_id, self.payload.as_string()))
if self.receiver.sign:
self.signed, self.digest_alg = True, self.receiver.digest_alg
signed_message = MIMEMultipart(
'signed', protocol="application/pkcs7-signature")
del signed_message['MIME-Version']
signed_message.attach(self.payload)
# Calculate the MIC Hash of the message to be verified
mic_content = canonicalize(self.payload)
digest_func = hashlib.new(self.digest_alg)
digest_func.update(mic_content)
self.mic = binascii.b2a_base64(digest_func.digest()).strip()
# Create the signature mime message
signature = email_message.Message()
signature.set_type('application/pkcs7-signature')
signature.set_param('name', 'smime.p7s')
signature.set_param('smime-type', 'signed-data')
signature.add_header(
'Content-Disposition', 'attachment', filename='smime.p7s')
del signature['MIME-Version']
signature.set_payload(sign_message(
mic_content, self.digest_alg, self.sender.sign_key))
encoders.encode_base64(signature)
signed_message.set_param('micalg', self.digest_alg)
signed_message.attach(signature)
self.payload = signed_message
# logger.debug(b'Signed message %s payload as:\n%s' % (
# self.message_id, self.payload.as_string()))
if self.receiver.encrypt:
self.encrypted, self.enc_alg = True, self.receiver.enc_alg
encrypted_message = email_message.Message()
encrypted_message.set_type('application/pkcs7-mime')
encrypted_message.set_param('name', 'smime.p7m')
encrypted_message.set_param('smime-type', 'enveloped-data')
encrypted_message.add_header(
'Content-Disposition', 'attachment', filename='smime.p7m')
encrypt_cert = self.receiver.load_encrypt_cert()
encrypted_message.set_payload(encrypt_message(
canonicalize(self.payload),
self.enc_alg,
encrypt_cert
))
encoders.encode_base64(encrypted_message)
self.payload = encrypted_message
# logger.debug(b'Encrypted message %s payload as:\n%s' % (
# self.message_id, self.payload.as_string()))
if self.receiver.mdn_mode:
as2_headers['disposition-notification-to'] = 'no-reply@pyas2.com'
if self.receiver.mdn_digest_alg:
as2_headers['disposition-notification-options'] = \
'signed-receipt-protocol=required, pkcs7-signature; ' \
'signed-receipt-micalg=optional, {}'.format(
self.receiver.mdn_digest_alg)
if self.receiver.mdn_mode == 'ASYNC':
if not self.sender.mdn_url:
raise ImproperlyConfigured(
'MDN URL must be set in the organization when MDN mode '
'is set to ASYNC')
as2_headers['receipt-delivery-option'] = self.sender.mdn_url
# Update the headers of the final payload and set its boundary
for k, v in as2_headers.items():
if self.payload.get(k):
self.payload.replace_header(k, v)
else:
self.payload.add_header(k, v)
if self.payload.is_multipart():
self.payload.set_boundary(make_mime_boundary()) | Function builds the AS2 message. Compresses, signs and encrypts
the payload if applicable.
:param data: A byte string of the data to be transmitted.
:param filename:
Optional filename to be included in the Content-disposition header.
:param subject:
The subject for the AS2 message, used by some AS2 servers for
additional routing of messages. (default "AS2 Message")
:param content_type:
The content type for the AS2 message, to be used in the MIME
header. (default "application/edi-consent")
:param additional_headers:
Any additional headers to be included as part of the AS2 message. | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/as2.py#L297-L453 | [
"def canonicalize(message):\n \"\"\"\n Function to convert an email Message to standard format string\n\n :param message: email.Message to be converted to standard string\n :return: the standard representation of the email message in bytes\n \"\"\"\n\n if message.is_multipart() \\\n or message.get('Content-Transfer-Encoding') != 'binary':\n\n return mime_to_bytes(message, 0).replace(\n b'\\r\\n', b'\\n').replace(b'\\r', b'\\n').replace(b'\\n', b'\\r\\n')\n else:\n message_header = ''\n message_body = message.get_payload(decode=True)\n for k, v in message.items():\n message_header += '{}: {}\\r\\n'.format(k, v)\n message_header += '\\r\\n'\n return message_header.encode('utf-8') + message_body\n",
"def compress_message(data_to_compress):\n \"\"\"Function compresses data and returns the generated ASN.1\n\n :param data_to_compress: A byte string of the data to be compressed\n\n :return: A CMS ASN.1 byte string of the compressed data. \n \"\"\"\n compressed_content = cms.ParsableOctetString(\n zlib.compress(data_to_compress))\n return cms.ContentInfo({\n 'content_type': cms.ContentType('compressed_data'),\n 'content': cms.CompressedData({\n 'version': cms.CMSVersion('v0'),\n 'compression_algorithm':\n cms.CompressionAlgorithm({\n 'algorithm': cms.CompressionAlgorithmId('zlib')\n }),\n 'encap_content_info': cms.EncapsulatedContentInfo({\n 'content_type': cms.ContentType('data'),\n 'content': compressed_content\n })\n })\n }).dump()\n",
"def encrypt_message(data_to_encrypt, enc_alg, encryption_cert):\n \"\"\"Function encrypts data and returns the generated ASN.1\n\n :param data_to_encrypt: A byte string of the data to be encrypted\n\n :param enc_alg: The algorithm to be used for encrypting the data\n\n :param encryption_cert: The certificate to be used for encrypting the data\n\n :return: A CMS ASN.1 byte string of the encrypted data. \n \"\"\"\n\n enc_alg_list = enc_alg.split('_')\n cipher, key_length, mode = enc_alg_list[0], enc_alg_list[1], enc_alg_list[2]\n enc_alg_asn1, key, encrypted_content = None, None, None\n\n # Generate the symmetric encryption key and encrypt the message\n if cipher == 'tripledes':\n key = util.rand_bytes(int(key_length)//8)\n iv, encrypted_content = symmetric.tripledes_cbc_pkcs5_encrypt(\n key, data_to_encrypt, None)\n enc_alg_asn1 = algos.EncryptionAlgorithm({\n 'algorithm': algos.EncryptionAlgorithmId('tripledes_3key'),\n 'parameters': cms.OctetString(iv)\n })\n\n # Encrypt the key and build the ASN.1 message\n encrypted_key = asymmetric.rsa_pkcs1v15_encrypt(encryption_cert, key)\n\n return cms.ContentInfo({\n 'content_type': cms.ContentType('enveloped_data'),\n 'content': cms.EnvelopedData({\n 'version': cms.CMSVersion('v0'),\n 'recipient_infos': [\n cms.KeyTransRecipientInfo({\n 'version': cms.CMSVersion('v0'),\n 'rid': cms.RecipientIdentifier({\n 'issuer_and_serial_number': cms.IssuerAndSerialNumber({\n 'issuer': encryption_cert.asn1[\n 'tbs_certificate']['issuer'],\n 'serial_number': encryption_cert.asn1[\n 'tbs_certificate']['serial_number']\n })\n }),\n 'key_encryption_algorithm': cms.KeyEncryptionAlgorithm({\n 'algorithm': cms.KeyEncryptionAlgorithmId('rsa')\n }),\n 'encrypted_key': cms.OctetString(encrypted_key)\n })\n ],\n 'encrypted_content_info': cms.EncryptedContentInfo({\n 'content_type': cms.ContentType('data'),\n 'content_encryption_algorithm': enc_alg_asn1,\n 'encrypted_content': encrypted_content\n })\n })\n }).dump()\n",
"def sign_message(data_to_sign, digest_alg, sign_key,\n use_signed_attributes=True):\n \"\"\"Function signs the data and returns the generated ASN.1\n\n :param data_to_sign: A byte string of the data to be signed.\n\n :param digest_alg: \n The digest algorithm to be used for generating the signature.\n\n :param sign_key: The key to be used for generating the signature.\n\n :param use_signed_attributes: Optional attribute to indicate weather the \n CMS signature attributes should be included in the signature or not.\n\n :return: A CMS ASN.1 byte string of the signed data. \n \"\"\"\n\n if use_signed_attributes:\n digest_func = hashlib.new(digest_alg)\n digest_func.update(data_to_sign)\n message_digest = digest_func.digest()\n\n class SmimeCapability(core.Sequence):\n _fields = [\n ('0', core.Any, {'optional': True}),\n ('1', core.Any, {'optional': True}),\n ('2', core.Any, {'optional': True}),\n ('3', core.Any, {'optional': True}),\n ('4', core.Any, {'optional': True})\n ]\n\n class SmimeCapabilities(core.Sequence):\n _fields = [\n ('0', SmimeCapability),\n ('1', SmimeCapability, {'optional': True}),\n ('2', SmimeCapability, {'optional': True}),\n ('3', SmimeCapability, {'optional': True}),\n ('4', SmimeCapability, {'optional': True}),\n ('5', SmimeCapability, {'optional': True}),\n ]\n\n smime_cap = OrderedDict([\n ('0', OrderedDict([\n ('0', core.ObjectIdentifier('1.2.840.113549.3.7'))])),\n ('1', OrderedDict([\n ('0', core.ObjectIdentifier('1.2.840.113549.3.2')),\n ('1', core.Integer(128))])),\n ('2', OrderedDict([\n ('0', core.ObjectIdentifier('1.2.840.113549.3.4')),\n ('1', core.Integer(128))])),\n ])\n\n signed_attributes = cms.CMSAttributes([\n cms.CMSAttribute({\n 'type': cms.CMSAttributeType('content_type'),\n 'values': cms.SetOfContentType([\n cms.ContentType('data')\n ])\n }),\n cms.CMSAttribute({\n 'type': cms.CMSAttributeType('signing_time'),\n 'values': cms.SetOfTime([\n cms.Time({\n 'utc_time': core.UTCTime(datetime.now())\n })\n ])\n }),\n cms.CMSAttribute({\n 'type': cms.CMSAttributeType('message_digest'),\n 'values': cms.SetOfOctetString([\n core.OctetString(message_digest)\n ])\n }),\n cms.CMSAttribute({\n 'type': cms.CMSAttributeType('1.2.840.113549.1.9.15'),\n 'values': cms.SetOfAny([\n core.Any(SmimeCapabilities(smime_cap))\n ])\n }),\n ])\n signature = asymmetric.rsa_pkcs1v15_sign(\n sign_key[0], signed_attributes.dump(), digest_alg)\n else:\n signed_attributes = None\n signature = asymmetric.rsa_pkcs1v15_sign(\n sign_key[0], data_to_sign, digest_alg)\n\n return cms.ContentInfo({\n 'content_type': cms.ContentType('signed_data'),\n 'content': cms.SignedData({\n 'version': cms.CMSVersion('v1'),\n 'digest_algorithms': cms.DigestAlgorithms([\n algos.DigestAlgorithm({\n 'algorithm': algos.DigestAlgorithmId(digest_alg)\n })\n ]),\n 'encap_content_info': cms.ContentInfo({\n 'content_type': cms.ContentType('data')\n }),\n 'certificates': cms.CertificateSet([\n cms.CertificateChoices({\n 'certificate': sign_key[1].asn1\n })\n ]),\n 'signer_infos': cms.SignerInfos([\n cms.SignerInfo({\n 'version': cms.CMSVersion('v1'),\n 'sid': cms.SignerIdentifier({\n 'issuer_and_serial_number': cms.IssuerAndSerialNumber({\n 'issuer': sign_key[1].asn1[\n 'tbs_certificate']['issuer'],\n 'serial_number': sign_key[1].asn1[\n 'tbs_certificate']['serial_number']\n })\n }),\n 'digest_algorithm': algos.DigestAlgorithm({\n 'algorithm': algos.DigestAlgorithmId(digest_alg)\n }),\n 'signed_attrs': signed_attributes,\n 'signature_algorithm': algos.SignedDigestAlgorithm({\n 'algorithm':\n algos.SignedDigestAlgorithmId('rsassa_pkcs1v15')\n }),\n 'signature': core.OctetString(signature)\n })\n ])\n })\n }).dump()\n",
"def quote_as2name(unquoted_name):\n \"\"\"\n Function converts as2 name from unquoted to quoted format\n :param unquoted_name: the as2 name in unquoted format\n :return: the as2 name in unquoted format\n \"\"\"\n\n if re.search(r'[\\\\\" ]', unquoted_name, re.M):\n return '\"' + email.utils.quote(unquoted_name) + '\"'\n else:\n return unquoted_name\n",
"def make_mime_boundary(text=None):\n # Craft a random boundary. If text is given, ensure that the chosen\n # boundary doesn't appear in the text.\n\n width = len(repr(sys.maxsize - 1))\n fmt = '%%0%dd' % width\n\n token = random.randrange(sys.maxsize)\n boundary = ('=' * 15) + (fmt % token) + '=='\n if text is None:\n return boundary\n b = boundary\n counter = 0\n while True:\n cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)\n if not cre.search(text):\n break\n b = boundary + '.' + str(counter)\n counter += 1\n return b\n"
] | class Message(object):
"""Class for handling AS2 messages. Includes functions for both
parsing and building messages.
"""
def __init__(self, sender=None, receiver=None):
"""
:param sender:
An object of type <pyas2lib.as2.Organization>, representing the
sender of the message.
:param receiver:
An object of type <pyas2lib.as2.Partner>, representing the
receiver of the message .
"""
self.sender = sender
self.receiver = receiver
self.compressed = False
self.signed = False
self.digest_alg = None
self.encrypted = False
self.enc_alg = None
self.message_id = None
self.payload = None
self.mic = None
@property
def content(self):
"""Function returns the body of the as2 payload as a bytes object"""
if not self.payload:
return ''
if self.payload.is_multipart():
message_bytes = mime_to_bytes(
self.payload, 0).replace(b'\n', b'\r\n')
boundary = b'--' + self.payload.get_boundary().encode('utf-8')
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp)
else:
content = self.payload.get_payload()
if isinstance(content, str_cls):
content = content.encode('utf-8')
return content
@property
def headers(self):
if self.payload:
return dict(self.payload.items())
else:
return {}
@property
def headers_str(self):
message_header = ''
if self.payload:
for k, v in self.headers.items():
message_header += '{}: {}\r\n'.format(k, v)
return message_header.encode('utf-8')
@staticmethod
def decompress_data(payload):
if payload.get_content_type() == 'application/pkcs7-mime' \
and payload.get_param('smime-type') == 'compressed-data':
compressed_data = payload.get_payload(decode=True)
decompressed_data = decompress_message(compressed_data)
return True, parse_mime(decompressed_data)
return False, payload
def parse(self, raw_content, find_org_cb, find_partner_cb,
find_message_cb=None):
"""Function parses the RAW AS2 message; decrypts, verifies and
decompresses it and extracts the payload.
:param raw_content:
A byte string of the received HTTP headers followed by the body.
:param find_org_cb:
A callback the returns an Organization object if exists. The
as2-to header value is passed as an argument to it.
:param find_partner_cb:
A callback the returns an Partner object if exists. The
as2-from header value is passed as an argument to it.
:param find_message_cb:
An optional callback the returns an Message object if exists in
order to check for duplicates. The message id and partner id is
passed as arguments to it.
:return:
A three element tuple containing (status, (exception, traceback)
, mdn). The status is a string indicating the status of the
transaction. The exception is populated with any exception raised
during processing and the mdn is an MDN object or None in case
the partner did not request it.
"""
# Parse the raw MIME message and extract its content and headers
status, detailed_status, exception, mdn = \
'processed', None, (None, None), None
self.payload = parse_mime(raw_content)
as2_headers = {}
for k, v in self.payload.items():
k = k.lower()
if k == 'message-id':
self.message_id = v.lstrip('<').rstrip('>')
as2_headers[k] = v
try:
# Get the organization and partner for this transmission
org_id = unquote_as2name(as2_headers['as2-to'])
self.receiver = find_org_cb(org_id)
if not self.receiver:
raise PartnerNotFound(
'Unknown AS2 organization with id {}'.format(org_id))
partner_id = unquote_as2name(as2_headers['as2-from'])
self.sender = find_partner_cb(partner_id)
if not self.sender:
raise PartnerNotFound(
'Unknown AS2 partner with id {}'.format(partner_id))
if find_message_cb and \
find_message_cb(self.message_id, partner_id):
raise DuplicateDocument(
'Duplicate message received, message with this ID '
'already processed.')
if self.sender.encrypt and \
self.payload.get_content_type() != 'application/pkcs7-mime':
raise InsufficientSecurityError(
'Incoming messages from partner {} are must be encrypted'
' but encrypted message not found.'.format(partner_id))
if self.payload.get_content_type() == 'application/pkcs7-mime' \
and self.payload.get_param('smime-type') == 'enveloped-data':
encrypted_data = self.payload.get_payload(decode=True)
# logger.debug(
# 'Decrypting the payload :\n%s' % self.payload.as_string())
self.encrypted = True
self.enc_alg, decrypted_content = decrypt_message(
encrypted_data,
self.receiver.decrypt_key
)
raw_content = decrypted_content
self.payload = parse_mime(decrypted_content)
if self.payload.get_content_type() == 'text/plain':
self.payload = email_message.Message()
self.payload.set_payload(decrypted_content)
self.payload.set_type('application/edi-consent')
# Check for compressed data here
self.compressed, self.payload = self.decompress_data(self.payload)
if self.sender.sign and \
self.payload.get_content_type() != 'multipart/signed':
raise InsufficientSecurityError(
'Incoming messages from partner {} are must be signed '
'but signed message not found.'.format(partner_id))
if self.payload.get_content_type() == 'multipart/signed':
# logger.debug(b'Verifying the signed payload:\n{0:s}'.format(
# self.payload.as_string()))
self.signed = True
signature = None
message_boundary = (
'--' + self.payload.get_boundary()).encode('utf-8')
for part in self.payload.walk():
if part.get_content_type() == "application/pkcs7-signature":
signature = part.get_payload(decode=True)
else:
self.payload = part
# Verify the message, first using raw message and if it fails
# then convert to canonical form and try again
mic_content = canonicalize(self.payload)
verify_cert = self.sender.load_verify_cert()
try:
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
except IntegrityError:
mic_content = raw_content.split(message_boundary)[
1].replace(b'\n', b'\r\n')
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
# Calculate the MIC Hash of the message to be verified
digest_func = hashlib.new(self.digest_alg)
digest_func.update(mic_content)
self.mic = binascii.b2a_base64(digest_func.digest()).strip()
# Check for compressed data here
if not self.compressed:
self.compressed, self.payload = self.decompress_data(self.payload)
except Exception as e:
status = getattr(e, 'disposition_type', 'processed/Error')
detailed_status = getattr(
e, 'disposition_modifier', 'unexpected-processing-error')
print(traceback.format_exc())
exception = (e, traceback.format_exc())
finally:
# Update the payload headers with the original headers
for k, v in as2_headers.items():
if self.payload.get(k) and k.lower() != 'content-disposition':
del self.payload[k]
self.payload.add_header(k, v)
if as2_headers.get('disposition-notification-to'):
mdn_mode = SYNCHRONOUS_MDN
mdn_url = as2_headers.get('receipt-delivery-option')
if mdn_url:
mdn_mode = ASYNCHRONOUS_MDN
digest_alg = as2_headers.get('disposition-notification-options')
if digest_alg:
digest_alg = digest_alg.split(';')[-1].split(',')[
-1].strip()
mdn = Mdn(
mdn_mode=mdn_mode, mdn_url=mdn_url, digest_alg=digest_alg)
mdn.build(message=self,
status=status,
detailed_status=detailed_status)
return status, exception, mdn
|
abhishek-ram/pyas2-lib | pyas2lib/as2.py | Message.parse | python | def parse(self, raw_content, find_org_cb, find_partner_cb,
find_message_cb=None):
# Parse the raw MIME message and extract its content and headers
status, detailed_status, exception, mdn = \
'processed', None, (None, None), None
self.payload = parse_mime(raw_content)
as2_headers = {}
for k, v in self.payload.items():
k = k.lower()
if k == 'message-id':
self.message_id = v.lstrip('<').rstrip('>')
as2_headers[k] = v
try:
# Get the organization and partner for this transmission
org_id = unquote_as2name(as2_headers['as2-to'])
self.receiver = find_org_cb(org_id)
if not self.receiver:
raise PartnerNotFound(
'Unknown AS2 organization with id {}'.format(org_id))
partner_id = unquote_as2name(as2_headers['as2-from'])
self.sender = find_partner_cb(partner_id)
if not self.sender:
raise PartnerNotFound(
'Unknown AS2 partner with id {}'.format(partner_id))
if find_message_cb and \
find_message_cb(self.message_id, partner_id):
raise DuplicateDocument(
'Duplicate message received, message with this ID '
'already processed.')
if self.sender.encrypt and \
self.payload.get_content_type() != 'application/pkcs7-mime':
raise InsufficientSecurityError(
'Incoming messages from partner {} are must be encrypted'
' but encrypted message not found.'.format(partner_id))
if self.payload.get_content_type() == 'application/pkcs7-mime' \
and self.payload.get_param('smime-type') == 'enveloped-data':
encrypted_data = self.payload.get_payload(decode=True)
# logger.debug(
# 'Decrypting the payload :\n%s' % self.payload.as_string())
self.encrypted = True
self.enc_alg, decrypted_content = decrypt_message(
encrypted_data,
self.receiver.decrypt_key
)
raw_content = decrypted_content
self.payload = parse_mime(decrypted_content)
if self.payload.get_content_type() == 'text/plain':
self.payload = email_message.Message()
self.payload.set_payload(decrypted_content)
self.payload.set_type('application/edi-consent')
# Check for compressed data here
self.compressed, self.payload = self.decompress_data(self.payload)
if self.sender.sign and \
self.payload.get_content_type() != 'multipart/signed':
raise InsufficientSecurityError(
'Incoming messages from partner {} are must be signed '
'but signed message not found.'.format(partner_id))
if self.payload.get_content_type() == 'multipart/signed':
# logger.debug(b'Verifying the signed payload:\n{0:s}'.format(
# self.payload.as_string()))
self.signed = True
signature = None
message_boundary = (
'--' + self.payload.get_boundary()).encode('utf-8')
for part in self.payload.walk():
if part.get_content_type() == "application/pkcs7-signature":
signature = part.get_payload(decode=True)
else:
self.payload = part
# Verify the message, first using raw message and if it fails
# then convert to canonical form and try again
mic_content = canonicalize(self.payload)
verify_cert = self.sender.load_verify_cert()
try:
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
except IntegrityError:
mic_content = raw_content.split(message_boundary)[
1].replace(b'\n', b'\r\n')
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
# Calculate the MIC Hash of the message to be verified
digest_func = hashlib.new(self.digest_alg)
digest_func.update(mic_content)
self.mic = binascii.b2a_base64(digest_func.digest()).strip()
# Check for compressed data here
if not self.compressed:
self.compressed, self.payload = self.decompress_data(self.payload)
except Exception as e:
status = getattr(e, 'disposition_type', 'processed/Error')
detailed_status = getattr(
e, 'disposition_modifier', 'unexpected-processing-error')
print(traceback.format_exc())
exception = (e, traceback.format_exc())
finally:
# Update the payload headers with the original headers
for k, v in as2_headers.items():
if self.payload.get(k) and k.lower() != 'content-disposition':
del self.payload[k]
self.payload.add_header(k, v)
if as2_headers.get('disposition-notification-to'):
mdn_mode = SYNCHRONOUS_MDN
mdn_url = as2_headers.get('receipt-delivery-option')
if mdn_url:
mdn_mode = ASYNCHRONOUS_MDN
digest_alg = as2_headers.get('disposition-notification-options')
if digest_alg:
digest_alg = digest_alg.split(';')[-1].split(',')[
-1].strip()
mdn = Mdn(
mdn_mode=mdn_mode, mdn_url=mdn_url, digest_alg=digest_alg)
mdn.build(message=self,
status=status,
detailed_status=detailed_status)
return status, exception, mdn | Function parses the RAW AS2 message; decrypts, verifies and
decompresses it and extracts the payload.
:param raw_content:
A byte string of the received HTTP headers followed by the body.
:param find_org_cb:
A callback the returns an Organization object if exists. The
as2-to header value is passed as an argument to it.
:param find_partner_cb:
A callback the returns an Partner object if exists. The
as2-from header value is passed as an argument to it.
:param find_message_cb:
An optional callback the returns an Message object if exists in
order to check for duplicates. The message id and partner id is
passed as arguments to it.
:return:
A three element tuple containing (status, (exception, traceback)
, mdn). The status is a string indicating the status of the
transaction. The exception is populated with any exception raised
during processing and the mdn is an MDN object or None in case
the partner did not request it. | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/as2.py#L464-L623 | [
"def decrypt_message(encrypted_data, decryption_key):\n \"\"\"Function parses an ASN.1 encrypted message and extracts/decrypts \n the original message.\n\n :param encrypted_data: A CMS ASN.1 byte string containing the encrypted \n data.\n\n :param decryption_key: The key to be used for decrypting the data.\n\n :return: A byte string containing the decrypted original message. \n \"\"\"\n\n cms_content = cms.ContentInfo.load(encrypted_data)\n cipher, decrypted_content = None, None\n\n if cms_content['content_type'].native == 'enveloped_data':\n recipient_info = cms_content['content']['recipient_infos'][0].parse()\n key_enc_alg = recipient_info[\n 'key_encryption_algorithm']['algorithm'].native\n encrypted_key = recipient_info['encrypted_key'].native\n\n if key_enc_alg == 'rsa':\n try:\n key = asymmetric.rsa_pkcs1v15_decrypt(\n decryption_key[0], encrypted_key)\n except Exception as e:\n raise DecryptionError('Failed to decrypt the payload: '\n 'Could not extract decryption key.')\n alg = cms_content['content']['encrypted_content_info'][\n 'content_encryption_algorithm']\n\n encapsulated_data = cms_content['content'][\n 'encrypted_content_info']['encrypted_content'].native\n\n try:\n if alg.encryption_cipher == 'tripledes':\n cipher = 'tripledes_192_cbc'\n decrypted_content = symmetric.tripledes_cbc_pkcs5_decrypt(\n key, encapsulated_data, alg.encryption_iv)\n else:\n raise AS2Exception('Unsupported Encryption Algorithm')\n except Exception as e:\n raise DecryptionError(\n 'Failed to decrypt the payload: {}'.format(e))\n\n return cipher, decrypted_content\n",
"def unquote_as2name(quoted_name):\n \"\"\"\n Function converts as2 name from quoted to unquoted format\n\n :param quoted_name: the as2 name in quoted format\n :return: the as2 name in unquoted format\n \"\"\"\n return email.utils.unquote(quoted_name)\n",
"def build(self, message, status, detailed_status=None):\n \"\"\"Function builds and signs an AS2 MDN message.\n\n :param message: The received AS2 message for which this is an MDN.\n\n :param status: The status of processing of the received AS2 message.\n\n :param detailed_status:\n The optional detailed status of processing of the received AS2\n message. Used to give additional error info (default \"None\")\n\n \"\"\"\n\n # Generate message id using UUID 1 as it uses both hostname and time\n self.message_id = email_utils.make_msgid().lstrip('<').rstrip('>')\n self.orig_message_id = message.message_id\n\n # Set up the message headers\n mdn_headers = {\n 'AS2-Version': AS2_VERSION,\n 'ediint-features': EDIINT_FEATURES,\n 'Message-ID': '<{}>'.format(self.message_id),\n 'AS2-From': quote_as2name(message.headers.get('as2-to')),\n 'AS2-To': quote_as2name(message.headers.get('as2-from')),\n 'Date': email_utils.formatdate(localtime=True),\n 'user-agent': 'pyAS2 Open Source AS2 Software'\n }\n\n # Set the confirmation text message here\n confirmation_text = MDN_CONFIRM_TEXT\n\n # overwrite with organization specific message\n if message.receiver and message.receiver.mdn_confirm_text:\n confirmation_text = message.receiver.mdn_confirm_text\n\n # overwrite with partner specific message\n if message.sender and message.sender.mdn_confirm_text:\n confirmation_text = message.sender.mdn_confirm_text\n\n if status != 'processed':\n confirmation_text = MDN_FAILED_TEXT\n\n self.payload = MIMEMultipart(\n 'report', report_type='disposition-notification')\n\n # Create and attach the MDN Text Message\n mdn_text = email_message.Message()\n mdn_text.set_payload('%s\\n' % confirmation_text)\n mdn_text.set_type('text/plain')\n del mdn_text['MIME-Version']\n encoders.encode_7or8bit(mdn_text)\n self.payload.attach(mdn_text)\n\n # Create and attache the MDN Report Message\n mdn_base = email_message.Message()\n mdn_base.set_type('message/disposition-notification')\n mdn_report = 'Reporting-UA: pyAS2 Open Source AS2 Software\\n'\n mdn_report += 'Original-Recipient: rfc822; {}\\n'.format(\n message.headers.get('as2-to'))\n mdn_report += 'Final-Recipient: rfc822; {}\\n'.format(\n message.headers.get('as2-to'))\n mdn_report += 'Original-Message-ID: <{}>\\n'.format(message.message_id)\n mdn_report += 'Disposition: automatic-action/' \\\n 'MDN-sent-automatically; {}'.format(status)\n if detailed_status:\n mdn_report += ': {}'.format(detailed_status)\n mdn_report += '\\n'\n if message.mic:\n mdn_report += 'Received-content-MIC: {}, {}\\n'.format(\n message.mic.decode(), message.digest_alg)\n mdn_base.set_payload(mdn_report)\n del mdn_base['MIME-Version']\n encoders.encode_7or8bit(mdn_base)\n self.payload.attach(mdn_base)\n\n # logger.debug('MDN for message %s created:\\n%s' % (\n # message.message_id, mdn_base.as_string()))\n\n # Sign the MDN if it is requested by the sender\n if message.headers.get('disposition-notification-options') and \\\n message.receiver and message.receiver.sign_key:\n self.digest_alg = \\\n message.headers['disposition-notification-options'].split(\n ';')[-1].split(',')[-1].strip().replace('-', '')\n signed_mdn = MIMEMultipart(\n 'signed', protocol=\"application/pkcs7-signature\")\n del signed_mdn['MIME-Version']\n signed_mdn.attach(self.payload)\n\n # Create the signature mime message\n signature = email_message.Message()\n signature.set_type('application/pkcs7-signature')\n signature.set_param('name', 'smime.p7s')\n signature.set_param('smime-type', 'signed-data')\n signature.add_header(\n 'Content-Disposition', 'attachment', filename='smime.p7s')\n del signature['MIME-Version']\n signature.set_payload(sign_message(\n canonicalize(self.payload),\n self.digest_alg,\n message.receiver.sign_key\n ))\n encoders.encode_base64(signature)\n # logger.debug(\n # 'Signature for MDN created:\\n%s' % signature.as_string())\n signed_mdn.set_param('micalg', self.digest_alg)\n signed_mdn.attach(signature)\n\n self.payload = signed_mdn\n\n # Update the headers of the final payload and set message boundary\n for k, v in mdn_headers.items():\n if self.payload.get(k):\n self.payload.replace_header(k, v)\n else:\n self.payload.add_header(k, v)\n if self.payload.is_multipart():\n self.payload.set_boundary(make_mime_boundary())\n",
"def find_org(self, headers):\n return self.org\n",
"def find_partner(self, headers):\n return self.partner\n",
"def find_none(self, as2_id):\n return None\n",
"def find_org(self, as2_id):\n return self.org\n",
"def find_partner(self, as2_id):\n return self.partner\n",
"def find_org(self, as2_id):\n return self.org\n",
"def find_partner(self, as2_id):\n return self.partner\n",
"def find_org(self, headers):\n return self.org\n",
"def find_partner(self, headers):\n return self.partner\n",
"find_message_cb=lambda x, y: False\n",
"find_message_cb=lambda x, y: False\n",
"find_message_cb=lambda x, y: False\n",
"find_message_cb=lambda x, y: True\n",
"find_message_cb=lambda x, y: False\n",
"find_message_cb=lambda x, y: False\n",
"find_message_cb=lambda x, y: False\n",
"lambda x: self.org,\n",
"lambda y: self.partner,\n",
"lambda x, y: False\n"
] | class Message(object):
"""Class for handling AS2 messages. Includes functions for both
parsing and building messages.
"""
def __init__(self, sender=None, receiver=None):
"""
:param sender:
An object of type <pyas2lib.as2.Organization>, representing the
sender of the message.
:param receiver:
An object of type <pyas2lib.as2.Partner>, representing the
receiver of the message .
"""
self.sender = sender
self.receiver = receiver
self.compressed = False
self.signed = False
self.digest_alg = None
self.encrypted = False
self.enc_alg = None
self.message_id = None
self.payload = None
self.mic = None
@property
def content(self):
"""Function returns the body of the as2 payload as a bytes object"""
if not self.payload:
return ''
if self.payload.is_multipart():
message_bytes = mime_to_bytes(
self.payload, 0).replace(b'\n', b'\r\n')
boundary = b'--' + self.payload.get_boundary().encode('utf-8')
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp)
else:
content = self.payload.get_payload()
if isinstance(content, str_cls):
content = content.encode('utf-8')
return content
@property
def headers(self):
if self.payload:
return dict(self.payload.items())
else:
return {}
@property
def headers_str(self):
message_header = ''
if self.payload:
for k, v in self.headers.items():
message_header += '{}: {}\r\n'.format(k, v)
return message_header.encode('utf-8')
def build(self, data, filename=None, subject='AS2 Message',
content_type='application/edi-consent', additional_headers=None):
"""Function builds the AS2 message. Compresses, signs and encrypts
the payload if applicable.
:param data: A byte string of the data to be transmitted.
:param filename:
Optional filename to be included in the Content-disposition header.
:param subject:
The subject for the AS2 message, used by some AS2 servers for
additional routing of messages. (default "AS2 Message")
:param content_type:
The content type for the AS2 message, to be used in the MIME
header. (default "application/edi-consent")
:param additional_headers:
Any additional headers to be included as part of the AS2 message.
"""
# Validations
assert type(data) is byte_cls, \
'Parameter data must be of type {}'.format(byte_cls)
additional_headers = additional_headers if additional_headers else {}
assert type(additional_headers) is dict
if self.receiver.sign and not self.sender.sign_key:
raise ImproperlyConfigured(
'Signing of messages is enabled but sign key is not set '
'for the sender.')
if self.receiver.encrypt and not self.receiver.encrypt_cert:
raise ImproperlyConfigured(
'Encryption of messages is enabled but encrypt key is not set '
'for the receiver.')
# Generate message id using UUID 1 as it uses both hostname and time
self.message_id = email_utils.make_msgid().lstrip('<').rstrip('>')
# Set up the message headers
as2_headers = {
'AS2-Version': AS2_VERSION,
'ediint-features': EDIINT_FEATURES,
'Message-ID': '<{}>'.format(self.message_id),
'AS2-From': quote_as2name(self.sender.as2_name),
'AS2-To': quote_as2name(self.receiver.as2_name),
'Subject': subject,
'Date': email_utils.formatdate(localtime=True),
# 'recipient-address': message.partner.target_url,
}
as2_headers.update(additional_headers)
# Read the input and convert to bytes if value is unicode/str
# using utf-8 encoding and finally Canonicalize the payload
self.payload = email_message.Message()
self.payload.set_payload(data)
self.payload.set_type(content_type)
encoders.encode_7or8bit(self.payload)
if filename:
self.payload.add_header(
'Content-Disposition', 'attachment', filename=filename)
del self.payload['MIME-Version']
if self.receiver.compress:
self.compressed = True
compressed_message = email_message.Message()
compressed_message.set_type('application/pkcs7-mime')
compressed_message.set_param('name', 'smime.p7z')
compressed_message.set_param('smime-type', 'compressed-data')
compressed_message.add_header(
'Content-Disposition', 'attachment', filename='smime.p7z')
compressed_message.set_payload(
compress_message(canonicalize(self.payload)))
encoders.encode_base64(compressed_message)
self.payload = compressed_message
# logger.debug(b'Compressed message %s payload as:\n%s' % (
# self.message_id, self.payload.as_string()))
if self.receiver.sign:
self.signed, self.digest_alg = True, self.receiver.digest_alg
signed_message = MIMEMultipart(
'signed', protocol="application/pkcs7-signature")
del signed_message['MIME-Version']
signed_message.attach(self.payload)
# Calculate the MIC Hash of the message to be verified
mic_content = canonicalize(self.payload)
digest_func = hashlib.new(self.digest_alg)
digest_func.update(mic_content)
self.mic = binascii.b2a_base64(digest_func.digest()).strip()
# Create the signature mime message
signature = email_message.Message()
signature.set_type('application/pkcs7-signature')
signature.set_param('name', 'smime.p7s')
signature.set_param('smime-type', 'signed-data')
signature.add_header(
'Content-Disposition', 'attachment', filename='smime.p7s')
del signature['MIME-Version']
signature.set_payload(sign_message(
mic_content, self.digest_alg, self.sender.sign_key))
encoders.encode_base64(signature)
signed_message.set_param('micalg', self.digest_alg)
signed_message.attach(signature)
self.payload = signed_message
# logger.debug(b'Signed message %s payload as:\n%s' % (
# self.message_id, self.payload.as_string()))
if self.receiver.encrypt:
self.encrypted, self.enc_alg = True, self.receiver.enc_alg
encrypted_message = email_message.Message()
encrypted_message.set_type('application/pkcs7-mime')
encrypted_message.set_param('name', 'smime.p7m')
encrypted_message.set_param('smime-type', 'enveloped-data')
encrypted_message.add_header(
'Content-Disposition', 'attachment', filename='smime.p7m')
encrypt_cert = self.receiver.load_encrypt_cert()
encrypted_message.set_payload(encrypt_message(
canonicalize(self.payload),
self.enc_alg,
encrypt_cert
))
encoders.encode_base64(encrypted_message)
self.payload = encrypted_message
# logger.debug(b'Encrypted message %s payload as:\n%s' % (
# self.message_id, self.payload.as_string()))
if self.receiver.mdn_mode:
as2_headers['disposition-notification-to'] = 'no-reply@pyas2.com'
if self.receiver.mdn_digest_alg:
as2_headers['disposition-notification-options'] = \
'signed-receipt-protocol=required, pkcs7-signature; ' \
'signed-receipt-micalg=optional, {}'.format(
self.receiver.mdn_digest_alg)
if self.receiver.mdn_mode == 'ASYNC':
if not self.sender.mdn_url:
raise ImproperlyConfigured(
'MDN URL must be set in the organization when MDN mode '
'is set to ASYNC')
as2_headers['receipt-delivery-option'] = self.sender.mdn_url
# Update the headers of the final payload and set its boundary
for k, v in as2_headers.items():
if self.payload.get(k):
self.payload.replace_header(k, v)
else:
self.payload.add_header(k, v)
if self.payload.is_multipart():
self.payload.set_boundary(make_mime_boundary())
@staticmethod
def decompress_data(payload):
if payload.get_content_type() == 'application/pkcs7-mime' \
and payload.get_param('smime-type') == 'compressed-data':
compressed_data = payload.get_payload(decode=True)
decompressed_data = decompress_message(compressed_data)
return True, parse_mime(decompressed_data)
return False, payload
|
abhishek-ram/pyas2-lib | pyas2lib/as2.py | Mdn.content | python | def content(self):
if self.payload:
message_bytes = mime_to_bytes(
self.payload, 0).replace(b'\n', b'\r\n')
boundary = b'--' + self.payload.get_boundary().encode('utf-8')
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp)
else:
return '' | Function returns the body of the mdn message as a byte string | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/as2.py#L640-L651 | [
"def mime_to_bytes(msg, header_len):\n \"\"\"\n Function to convert and email Message to flat string format\n :param msg: email.Message to be converted to string\n :param header_len: the msx length of the header per line\n :return: the byte string representation of the email message\n \"\"\"\n fp = BytesIO()\n g = BytesGenerator(fp, maxheaderlen=header_len)\n g.flatten(msg)\n return fp.getvalue()\n"
] | class Mdn(object):
"""Class for handling AS2 MDNs. Includes functions for both
parsing and building them.
"""
def __init__(self, mdn_mode=None, digest_alg=None, mdn_url=None):
self.message_id = None
self.orig_message_id = None
self.payload = None
self.mdn_mode = mdn_mode
self.digest_alg = digest_alg
self.mdn_url = mdn_url
@property
@property
def headers(self):
if self.payload:
return dict(self.payload.items())
else:
return {}
@property
def headers_str(self):
message_header = ''
if self.payload:
for k, v in self.headers.items():
message_header += '{}: {}\r\n'.format(k, v)
return message_header.encode('utf-8')
def build(self, message, status, detailed_status=None):
"""Function builds and signs an AS2 MDN message.
:param message: The received AS2 message for which this is an MDN.
:param status: The status of processing of the received AS2 message.
:param detailed_status:
The optional detailed status of processing of the received AS2
message. Used to give additional error info (default "None")
"""
# Generate message id using UUID 1 as it uses both hostname and time
self.message_id = email_utils.make_msgid().lstrip('<').rstrip('>')
self.orig_message_id = message.message_id
# Set up the message headers
mdn_headers = {
'AS2-Version': AS2_VERSION,
'ediint-features': EDIINT_FEATURES,
'Message-ID': '<{}>'.format(self.message_id),
'AS2-From': quote_as2name(message.headers.get('as2-to')),
'AS2-To': quote_as2name(message.headers.get('as2-from')),
'Date': email_utils.formatdate(localtime=True),
'user-agent': 'pyAS2 Open Source AS2 Software'
}
# Set the confirmation text message here
confirmation_text = MDN_CONFIRM_TEXT
# overwrite with organization specific message
if message.receiver and message.receiver.mdn_confirm_text:
confirmation_text = message.receiver.mdn_confirm_text
# overwrite with partner specific message
if message.sender and message.sender.mdn_confirm_text:
confirmation_text = message.sender.mdn_confirm_text
if status != 'processed':
confirmation_text = MDN_FAILED_TEXT
self.payload = MIMEMultipart(
'report', report_type='disposition-notification')
# Create and attach the MDN Text Message
mdn_text = email_message.Message()
mdn_text.set_payload('%s\n' % confirmation_text)
mdn_text.set_type('text/plain')
del mdn_text['MIME-Version']
encoders.encode_7or8bit(mdn_text)
self.payload.attach(mdn_text)
# Create and attache the MDN Report Message
mdn_base = email_message.Message()
mdn_base.set_type('message/disposition-notification')
mdn_report = 'Reporting-UA: pyAS2 Open Source AS2 Software\n'
mdn_report += 'Original-Recipient: rfc822; {}\n'.format(
message.headers.get('as2-to'))
mdn_report += 'Final-Recipient: rfc822; {}\n'.format(
message.headers.get('as2-to'))
mdn_report += 'Original-Message-ID: <{}>\n'.format(message.message_id)
mdn_report += 'Disposition: automatic-action/' \
'MDN-sent-automatically; {}'.format(status)
if detailed_status:
mdn_report += ': {}'.format(detailed_status)
mdn_report += '\n'
if message.mic:
mdn_report += 'Received-content-MIC: {}, {}\n'.format(
message.mic.decode(), message.digest_alg)
mdn_base.set_payload(mdn_report)
del mdn_base['MIME-Version']
encoders.encode_7or8bit(mdn_base)
self.payload.attach(mdn_base)
# logger.debug('MDN for message %s created:\n%s' % (
# message.message_id, mdn_base.as_string()))
# Sign the MDN if it is requested by the sender
if message.headers.get('disposition-notification-options') and \
message.receiver and message.receiver.sign_key:
self.digest_alg = \
message.headers['disposition-notification-options'].split(
';')[-1].split(',')[-1].strip().replace('-', '')
signed_mdn = MIMEMultipart(
'signed', protocol="application/pkcs7-signature")
del signed_mdn['MIME-Version']
signed_mdn.attach(self.payload)
# Create the signature mime message
signature = email_message.Message()
signature.set_type('application/pkcs7-signature')
signature.set_param('name', 'smime.p7s')
signature.set_param('smime-type', 'signed-data')
signature.add_header(
'Content-Disposition', 'attachment', filename='smime.p7s')
del signature['MIME-Version']
signature.set_payload(sign_message(
canonicalize(self.payload),
self.digest_alg,
message.receiver.sign_key
))
encoders.encode_base64(signature)
# logger.debug(
# 'Signature for MDN created:\n%s' % signature.as_string())
signed_mdn.set_param('micalg', self.digest_alg)
signed_mdn.attach(signature)
self.payload = signed_mdn
# Update the headers of the final payload and set message boundary
for k, v in mdn_headers.items():
if self.payload.get(k):
self.payload.replace_header(k, v)
else:
self.payload.add_header(k, v)
if self.payload.is_multipart():
self.payload.set_boundary(make_mime_boundary())
def parse(self, raw_content, find_message_cb):
"""Function parses the RAW AS2 MDN, verifies it and extracts the
processing status of the orginal AS2 message.
:param raw_content:
A byte string of the received HTTP headers followed by the body.
:param find_message_cb:
A callback the must returns the original Message Object. The
original message-id and original recipient AS2 ID are passed
as arguments to it.
:returns:
A two element tuple containing (status, detailed_status). The
status is a string indicating the status of the transaction. The
optional detailed_status gives additional information about the
processing status.
"""
status, detailed_status = None, None
self.payload = parse_mime(raw_content)
self.orig_message_id, orig_recipient = self.detect_mdn()
# Call the find message callback which should return a Message instance
orig_message = find_message_cb(self.orig_message_id, orig_recipient)
# Extract the headers and save it
mdn_headers = {}
for k, v in self.payload.items():
k = k.lower()
if k == 'message-id':
self.message_id = v.lstrip('<').rstrip('>')
mdn_headers[k] = v
if orig_message.receiver.mdn_digest_alg \
and self.payload.get_content_type() != 'multipart/signed':
status = 'failed/Failure'
detailed_status = 'Expected signed MDN but unsigned MDN returned'
return status, detailed_status
if self.payload.get_content_type() == 'multipart/signed':
signature = None
message_boundary = (
'--' + self.payload.get_boundary()).encode('utf-8')
for part in self.payload.walk():
if part.get_content_type() == 'application/pkcs7-signature':
signature = part.get_payload(decode=True)
elif part.get_content_type() == 'multipart/report':
self.payload = part
# Verify the message, first using raw message and if it fails
# then convert to canonical form and try again
mic_content = extract_first_part(raw_content, message_boundary)
verify_cert = orig_message.receiver.load_verify_cert()
try:
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
except IntegrityError:
mic_content = canonicalize(self.payload)
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
for part in self.payload.walk():
if part.get_content_type() == 'message/disposition-notification':
# logger.debug('Found MDN report for message %s:\n%s' % (
# orig_message.message_id, part.as_string()))
mdn = part.get_payload()[-1]
mdn_status = mdn['Disposition'].split(
';').pop().strip().split(':')
status = mdn_status[0]
if status == 'processed':
mdn_mic = mdn.get('Received-Content-MIC', '').split(',')[0]
# TODO: Check MIC for all cases
if mdn_mic and orig_message.mic \
and mdn_mic != orig_message.mic.decode():
status = 'processed/warning'
detailed_status = 'Message Integrity check failed.'
else:
detailed_status = ' '.join(mdn_status[1:]).strip()
return status, detailed_status
def detect_mdn(self):
""" Function checks if the received raw message is an AS2 MDN or not.
:raises MDNNotFound: If the received payload is not an MDN then this
exception is raised.
:return:
A two element tuple containing (message_id, message_recipient). The
message_id is the original AS2 message id and the message_recipient
is the original AS2 message recipient.
"""
mdn_message = None
if self.payload.get_content_type() == 'multipart/report':
mdn_message = self.payload
elif self.payload.get_content_type() == 'multipart/signed':
for part in self.payload.walk():
if part.get_content_type() == 'multipart/report':
mdn_message = self.payload
if not mdn_message:
raise MDNNotFound('No MDN found in the received message')
message_id, message_recipient = None, None
for part in mdn_message.walk():
if part.get_content_type() == 'message/disposition-notification':
mdn = part.get_payload()[0]
message_id = mdn.get('Original-Message-ID').strip('<>')
message_recipient = mdn.get(
'Original-Recipient').split(';')[1].strip()
return message_id, message_recipient
|
abhishek-ram/pyas2-lib | pyas2lib/as2.py | Mdn.build | python | def build(self, message, status, detailed_status=None):
# Generate message id using UUID 1 as it uses both hostname and time
self.message_id = email_utils.make_msgid().lstrip('<').rstrip('>')
self.orig_message_id = message.message_id
# Set up the message headers
mdn_headers = {
'AS2-Version': AS2_VERSION,
'ediint-features': EDIINT_FEATURES,
'Message-ID': '<{}>'.format(self.message_id),
'AS2-From': quote_as2name(message.headers.get('as2-to')),
'AS2-To': quote_as2name(message.headers.get('as2-from')),
'Date': email_utils.formatdate(localtime=True),
'user-agent': 'pyAS2 Open Source AS2 Software'
}
# Set the confirmation text message here
confirmation_text = MDN_CONFIRM_TEXT
# overwrite with organization specific message
if message.receiver and message.receiver.mdn_confirm_text:
confirmation_text = message.receiver.mdn_confirm_text
# overwrite with partner specific message
if message.sender and message.sender.mdn_confirm_text:
confirmation_text = message.sender.mdn_confirm_text
if status != 'processed':
confirmation_text = MDN_FAILED_TEXT
self.payload = MIMEMultipart(
'report', report_type='disposition-notification')
# Create and attach the MDN Text Message
mdn_text = email_message.Message()
mdn_text.set_payload('%s\n' % confirmation_text)
mdn_text.set_type('text/plain')
del mdn_text['MIME-Version']
encoders.encode_7or8bit(mdn_text)
self.payload.attach(mdn_text)
# Create and attache the MDN Report Message
mdn_base = email_message.Message()
mdn_base.set_type('message/disposition-notification')
mdn_report = 'Reporting-UA: pyAS2 Open Source AS2 Software\n'
mdn_report += 'Original-Recipient: rfc822; {}\n'.format(
message.headers.get('as2-to'))
mdn_report += 'Final-Recipient: rfc822; {}\n'.format(
message.headers.get('as2-to'))
mdn_report += 'Original-Message-ID: <{}>\n'.format(message.message_id)
mdn_report += 'Disposition: automatic-action/' \
'MDN-sent-automatically; {}'.format(status)
if detailed_status:
mdn_report += ': {}'.format(detailed_status)
mdn_report += '\n'
if message.mic:
mdn_report += 'Received-content-MIC: {}, {}\n'.format(
message.mic.decode(), message.digest_alg)
mdn_base.set_payload(mdn_report)
del mdn_base['MIME-Version']
encoders.encode_7or8bit(mdn_base)
self.payload.attach(mdn_base)
# logger.debug('MDN for message %s created:\n%s' % (
# message.message_id, mdn_base.as_string()))
# Sign the MDN if it is requested by the sender
if message.headers.get('disposition-notification-options') and \
message.receiver and message.receiver.sign_key:
self.digest_alg = \
message.headers['disposition-notification-options'].split(
';')[-1].split(',')[-1].strip().replace('-', '')
signed_mdn = MIMEMultipart(
'signed', protocol="application/pkcs7-signature")
del signed_mdn['MIME-Version']
signed_mdn.attach(self.payload)
# Create the signature mime message
signature = email_message.Message()
signature.set_type('application/pkcs7-signature')
signature.set_param('name', 'smime.p7s')
signature.set_param('smime-type', 'signed-data')
signature.add_header(
'Content-Disposition', 'attachment', filename='smime.p7s')
del signature['MIME-Version']
signature.set_payload(sign_message(
canonicalize(self.payload),
self.digest_alg,
message.receiver.sign_key
))
encoders.encode_base64(signature)
# logger.debug(
# 'Signature for MDN created:\n%s' % signature.as_string())
signed_mdn.set_param('micalg', self.digest_alg)
signed_mdn.attach(signature)
self.payload = signed_mdn
# Update the headers of the final payload and set message boundary
for k, v in mdn_headers.items():
if self.payload.get(k):
self.payload.replace_header(k, v)
else:
self.payload.add_header(k, v)
if self.payload.is_multipart():
self.payload.set_boundary(make_mime_boundary()) | Function builds and signs an AS2 MDN message.
:param message: The received AS2 message for which this is an MDN.
:param status: The status of processing of the received AS2 message.
:param detailed_status:
The optional detailed status of processing of the received AS2
message. Used to give additional error info (default "None") | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/as2.py#L668-L785 | [
"def quote_as2name(unquoted_name):\n \"\"\"\n Function converts as2 name from unquoted to quoted format\n :param unquoted_name: the as2 name in unquoted format\n :return: the as2 name in unquoted format\n \"\"\"\n\n if re.search(r'[\\\\\" ]', unquoted_name, re.M):\n return '\"' + email.utils.quote(unquoted_name) + '\"'\n else:\n return unquoted_name\n",
"def make_mime_boundary(text=None):\n # Craft a random boundary. If text is given, ensure that the chosen\n # boundary doesn't appear in the text.\n\n width = len(repr(sys.maxsize - 1))\n fmt = '%%0%dd' % width\n\n token = random.randrange(sys.maxsize)\n boundary = ('=' * 15) + (fmt % token) + '=='\n if text is None:\n return boundary\n b = boundary\n counter = 0\n while True:\n cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)\n if not cre.search(text):\n break\n b = boundary + '.' + str(counter)\n counter += 1\n return b\n"
] | class Mdn(object):
"""Class for handling AS2 MDNs. Includes functions for both
parsing and building them.
"""
def __init__(self, mdn_mode=None, digest_alg=None, mdn_url=None):
self.message_id = None
self.orig_message_id = None
self.payload = None
self.mdn_mode = mdn_mode
self.digest_alg = digest_alg
self.mdn_url = mdn_url
@property
def content(self):
"""Function returns the body of the mdn message as a byte string"""
if self.payload:
message_bytes = mime_to_bytes(
self.payload, 0).replace(b'\n', b'\r\n')
boundary = b'--' + self.payload.get_boundary().encode('utf-8')
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp)
else:
return ''
@property
def headers(self):
if self.payload:
return dict(self.payload.items())
else:
return {}
@property
def headers_str(self):
message_header = ''
if self.payload:
for k, v in self.headers.items():
message_header += '{}: {}\r\n'.format(k, v)
return message_header.encode('utf-8')
def parse(self, raw_content, find_message_cb):
"""Function parses the RAW AS2 MDN, verifies it and extracts the
processing status of the orginal AS2 message.
:param raw_content:
A byte string of the received HTTP headers followed by the body.
:param find_message_cb:
A callback the must returns the original Message Object. The
original message-id and original recipient AS2 ID are passed
as arguments to it.
:returns:
A two element tuple containing (status, detailed_status). The
status is a string indicating the status of the transaction. The
optional detailed_status gives additional information about the
processing status.
"""
status, detailed_status = None, None
self.payload = parse_mime(raw_content)
self.orig_message_id, orig_recipient = self.detect_mdn()
# Call the find message callback which should return a Message instance
orig_message = find_message_cb(self.orig_message_id, orig_recipient)
# Extract the headers and save it
mdn_headers = {}
for k, v in self.payload.items():
k = k.lower()
if k == 'message-id':
self.message_id = v.lstrip('<').rstrip('>')
mdn_headers[k] = v
if orig_message.receiver.mdn_digest_alg \
and self.payload.get_content_type() != 'multipart/signed':
status = 'failed/Failure'
detailed_status = 'Expected signed MDN but unsigned MDN returned'
return status, detailed_status
if self.payload.get_content_type() == 'multipart/signed':
signature = None
message_boundary = (
'--' + self.payload.get_boundary()).encode('utf-8')
for part in self.payload.walk():
if part.get_content_type() == 'application/pkcs7-signature':
signature = part.get_payload(decode=True)
elif part.get_content_type() == 'multipart/report':
self.payload = part
# Verify the message, first using raw message and if it fails
# then convert to canonical form and try again
mic_content = extract_first_part(raw_content, message_boundary)
verify_cert = orig_message.receiver.load_verify_cert()
try:
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
except IntegrityError:
mic_content = canonicalize(self.payload)
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
for part in self.payload.walk():
if part.get_content_type() == 'message/disposition-notification':
# logger.debug('Found MDN report for message %s:\n%s' % (
# orig_message.message_id, part.as_string()))
mdn = part.get_payload()[-1]
mdn_status = mdn['Disposition'].split(
';').pop().strip().split(':')
status = mdn_status[0]
if status == 'processed':
mdn_mic = mdn.get('Received-Content-MIC', '').split(',')[0]
# TODO: Check MIC for all cases
if mdn_mic and orig_message.mic \
and mdn_mic != orig_message.mic.decode():
status = 'processed/warning'
detailed_status = 'Message Integrity check failed.'
else:
detailed_status = ' '.join(mdn_status[1:]).strip()
return status, detailed_status
def detect_mdn(self):
""" Function checks if the received raw message is an AS2 MDN or not.
:raises MDNNotFound: If the received payload is not an MDN then this
exception is raised.
:return:
A two element tuple containing (message_id, message_recipient). The
message_id is the original AS2 message id and the message_recipient
is the original AS2 message recipient.
"""
mdn_message = None
if self.payload.get_content_type() == 'multipart/report':
mdn_message = self.payload
elif self.payload.get_content_type() == 'multipart/signed':
for part in self.payload.walk():
if part.get_content_type() == 'multipart/report':
mdn_message = self.payload
if not mdn_message:
raise MDNNotFound('No MDN found in the received message')
message_id, message_recipient = None, None
for part in mdn_message.walk():
if part.get_content_type() == 'message/disposition-notification':
mdn = part.get_payload()[0]
message_id = mdn.get('Original-Message-ID').strip('<>')
message_recipient = mdn.get(
'Original-Recipient').split(';')[1].strip()
return message_id, message_recipient
|
abhishek-ram/pyas2-lib | pyas2lib/as2.py | Mdn.parse | python | def parse(self, raw_content, find_message_cb):
status, detailed_status = None, None
self.payload = parse_mime(raw_content)
self.orig_message_id, orig_recipient = self.detect_mdn()
# Call the find message callback which should return a Message instance
orig_message = find_message_cb(self.orig_message_id, orig_recipient)
# Extract the headers and save it
mdn_headers = {}
for k, v in self.payload.items():
k = k.lower()
if k == 'message-id':
self.message_id = v.lstrip('<').rstrip('>')
mdn_headers[k] = v
if orig_message.receiver.mdn_digest_alg \
and self.payload.get_content_type() != 'multipart/signed':
status = 'failed/Failure'
detailed_status = 'Expected signed MDN but unsigned MDN returned'
return status, detailed_status
if self.payload.get_content_type() == 'multipart/signed':
signature = None
message_boundary = (
'--' + self.payload.get_boundary()).encode('utf-8')
for part in self.payload.walk():
if part.get_content_type() == 'application/pkcs7-signature':
signature = part.get_payload(decode=True)
elif part.get_content_type() == 'multipart/report':
self.payload = part
# Verify the message, first using raw message and if it fails
# then convert to canonical form and try again
mic_content = extract_first_part(raw_content, message_boundary)
verify_cert = orig_message.receiver.load_verify_cert()
try:
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
except IntegrityError:
mic_content = canonicalize(self.payload)
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
for part in self.payload.walk():
if part.get_content_type() == 'message/disposition-notification':
# logger.debug('Found MDN report for message %s:\n%s' % (
# orig_message.message_id, part.as_string()))
mdn = part.get_payload()[-1]
mdn_status = mdn['Disposition'].split(
';').pop().strip().split(':')
status = mdn_status[0]
if status == 'processed':
mdn_mic = mdn.get('Received-Content-MIC', '').split(',')[0]
# TODO: Check MIC for all cases
if mdn_mic and orig_message.mic \
and mdn_mic != orig_message.mic.decode():
status = 'processed/warning'
detailed_status = 'Message Integrity check failed.'
else:
detailed_status = ' '.join(mdn_status[1:]).strip()
return status, detailed_status | Function parses the RAW AS2 MDN, verifies it and extracts the
processing status of the orginal AS2 message.
:param raw_content:
A byte string of the received HTTP headers followed by the body.
:param find_message_cb:
A callback the must returns the original Message Object. The
original message-id and original recipient AS2 ID are passed
as arguments to it.
:returns:
A two element tuple containing (status, detailed_status). The
status is a string indicating the status of the transaction. The
optional detailed_status gives additional information about the
processing status. | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/as2.py#L787-L869 | [
"def canonicalize(message):\n \"\"\"\n Function to convert an email Message to standard format string\n\n :param message: email.Message to be converted to standard string\n :return: the standard representation of the email message in bytes\n \"\"\"\n\n if message.is_multipart() \\\n or message.get('Content-Transfer-Encoding') != 'binary':\n\n return mime_to_bytes(message, 0).replace(\n b'\\r\\n', b'\\n').replace(b'\\r', b'\\n').replace(b'\\n', b'\\r\\n')\n else:\n message_header = ''\n message_body = message.get_payload(decode=True)\n for k, v in message.items():\n message_header += '{}: {}\\r\\n'.format(k, v)\n message_header += '\\r\\n'\n return message_header.encode('utf-8') + message_body\n",
"def verify_message(data_to_verify, signature, verify_cert):\n \"\"\"Function parses an ASN.1 encrypted message and extracts/decrypts \n the original message.\n\n :param data_to_verify: \n A byte string of the data to be verified against the signature. \n\n :param signature: A CMS ASN.1 byte string containing the signature.\n\n :param verify_cert: The certificate to be used for verifying the signature.\n\n :return: The digest algorithm that was used in the signature. \n \"\"\"\n\n cms_content = cms.ContentInfo.load(signature)\n digest_alg = None\n\n if cms_content['content_type'].native == 'signed_data':\n for signer in cms_content['content']['signer_infos']:\n\n signed_attributes = signer['signed_attrs'].copy()\n digest_alg = signer['digest_algorithm']['algorithm'].native\n\n if digest_alg not in DIGEST_ALGORITHMS:\n raise Exception('Unsupported Digest Algorithm')\n\n sig_alg = signer['signature_algorithm']['algorithm'].native\n sig = signer['signature'].native\n signed_data = data_to_verify\n\n if signed_attributes:\n attr_dict = {}\n for attr in signed_attributes.native:\n attr_dict[attr['type']] = attr['values']\n\n message_digest = byte_cls()\n for d in attr_dict['message_digest']:\n message_digest += d\n\n digest_func = hashlib.new(digest_alg)\n digest_func.update(data_to_verify)\n calc_message_digest = digest_func.digest()\n\n if message_digest != calc_message_digest:\n raise IntegrityError('Failed to verify message signature: '\n 'Message Digest does not match.')\n\n signed_data = signed_attributes.untag().dump()\n\n try:\n if sig_alg == 'rsassa_pkcs1v15':\n asymmetric.rsa_pkcs1v15_verify(\n verify_cert, sig, signed_data, digest_alg)\n elif sig_alg == 'rsassa_pss':\n asymmetric.rsa_pss_verify(\n verify_cert, sig, signed_data, digest_alg)\n else:\n raise AS2Exception('Unsupported Signature Algorithm')\n except Exception as e:\n raise IntegrityError(\n 'Failed to verify message signature: {}'.format(e))\n\n return digest_alg\n",
"def extract_first_part(message, boundary):\n \"\"\" Function to extract the first part of a multipart message\"\"\"\n first_message = message.split(boundary)[1].lstrip()\n if first_message.endswith(b'\\r\\n'):\n first_message = first_message[:-2]\n else:\n first_message = first_message[:-1]\n return first_message\n",
"def detect_mdn(self):\n \"\"\" Function checks if the received raw message is an AS2 MDN or not.\n\n :raises MDNNotFound: If the received payload is not an MDN then this\n exception is raised.\n\n :return:\n A two element tuple containing (message_id, message_recipient). The\n message_id is the original AS2 message id and the message_recipient\n is the original AS2 message recipient.\n \"\"\"\n mdn_message = None\n if self.payload.get_content_type() == 'multipart/report':\n mdn_message = self.payload\n elif self.payload.get_content_type() == 'multipart/signed':\n for part in self.payload.walk():\n if part.get_content_type() == 'multipart/report':\n mdn_message = self.payload\n\n if not mdn_message:\n raise MDNNotFound('No MDN found in the received message')\n\n message_id, message_recipient = None, None\n for part in mdn_message.walk():\n if part.get_content_type() == 'message/disposition-notification':\n mdn = part.get_payload()[0]\n message_id = mdn.get('Original-Message-ID').strip('<>')\n message_recipient = mdn.get(\n 'Original-Recipient').split(';')[1].strip()\n return message_id, message_recipient\n",
"def find_message(self, message_id, message_recipient):\n return self.out_message\n",
"def find_message(self, message_id, message_recipient):\n return self.out_message\n",
"def find_message(self, message_id, message_recipient):\n return self.out_message\n",
"def find_message(self, message_id, message_recipient):\n return self.out_message\n",
"def find_message(self, message_id, message_recipient):\n message = as2.Message()\n message.sender = self.org\n message.receiver = self.partner\n message.mic = b'O4bvrm5t2YunRfwvZicNdEUmPaPZ9vUslX8loVLDck0='\n return message\n",
"mdn.read(), lambda x, y: message)\n"
] | class Mdn(object):
"""Class for handling AS2 MDNs. Includes functions for both
parsing and building them.
"""
def __init__(self, mdn_mode=None, digest_alg=None, mdn_url=None):
self.message_id = None
self.orig_message_id = None
self.payload = None
self.mdn_mode = mdn_mode
self.digest_alg = digest_alg
self.mdn_url = mdn_url
@property
def content(self):
"""Function returns the body of the mdn message as a byte string"""
if self.payload:
message_bytes = mime_to_bytes(
self.payload, 0).replace(b'\n', b'\r\n')
boundary = b'--' + self.payload.get_boundary().encode('utf-8')
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp)
else:
return ''
@property
def headers(self):
if self.payload:
return dict(self.payload.items())
else:
return {}
@property
def headers_str(self):
message_header = ''
if self.payload:
for k, v in self.headers.items():
message_header += '{}: {}\r\n'.format(k, v)
return message_header.encode('utf-8')
def build(self, message, status, detailed_status=None):
"""Function builds and signs an AS2 MDN message.
:param message: The received AS2 message for which this is an MDN.
:param status: The status of processing of the received AS2 message.
:param detailed_status:
The optional detailed status of processing of the received AS2
message. Used to give additional error info (default "None")
"""
# Generate message id using UUID 1 as it uses both hostname and time
self.message_id = email_utils.make_msgid().lstrip('<').rstrip('>')
self.orig_message_id = message.message_id
# Set up the message headers
mdn_headers = {
'AS2-Version': AS2_VERSION,
'ediint-features': EDIINT_FEATURES,
'Message-ID': '<{}>'.format(self.message_id),
'AS2-From': quote_as2name(message.headers.get('as2-to')),
'AS2-To': quote_as2name(message.headers.get('as2-from')),
'Date': email_utils.formatdate(localtime=True),
'user-agent': 'pyAS2 Open Source AS2 Software'
}
# Set the confirmation text message here
confirmation_text = MDN_CONFIRM_TEXT
# overwrite with organization specific message
if message.receiver and message.receiver.mdn_confirm_text:
confirmation_text = message.receiver.mdn_confirm_text
# overwrite with partner specific message
if message.sender and message.sender.mdn_confirm_text:
confirmation_text = message.sender.mdn_confirm_text
if status != 'processed':
confirmation_text = MDN_FAILED_TEXT
self.payload = MIMEMultipart(
'report', report_type='disposition-notification')
# Create and attach the MDN Text Message
mdn_text = email_message.Message()
mdn_text.set_payload('%s\n' % confirmation_text)
mdn_text.set_type('text/plain')
del mdn_text['MIME-Version']
encoders.encode_7or8bit(mdn_text)
self.payload.attach(mdn_text)
# Create and attache the MDN Report Message
mdn_base = email_message.Message()
mdn_base.set_type('message/disposition-notification')
mdn_report = 'Reporting-UA: pyAS2 Open Source AS2 Software\n'
mdn_report += 'Original-Recipient: rfc822; {}\n'.format(
message.headers.get('as2-to'))
mdn_report += 'Final-Recipient: rfc822; {}\n'.format(
message.headers.get('as2-to'))
mdn_report += 'Original-Message-ID: <{}>\n'.format(message.message_id)
mdn_report += 'Disposition: automatic-action/' \
'MDN-sent-automatically; {}'.format(status)
if detailed_status:
mdn_report += ': {}'.format(detailed_status)
mdn_report += '\n'
if message.mic:
mdn_report += 'Received-content-MIC: {}, {}\n'.format(
message.mic.decode(), message.digest_alg)
mdn_base.set_payload(mdn_report)
del mdn_base['MIME-Version']
encoders.encode_7or8bit(mdn_base)
self.payload.attach(mdn_base)
# logger.debug('MDN for message %s created:\n%s' % (
# message.message_id, mdn_base.as_string()))
# Sign the MDN if it is requested by the sender
if message.headers.get('disposition-notification-options') and \
message.receiver and message.receiver.sign_key:
self.digest_alg = \
message.headers['disposition-notification-options'].split(
';')[-1].split(',')[-1].strip().replace('-', '')
signed_mdn = MIMEMultipart(
'signed', protocol="application/pkcs7-signature")
del signed_mdn['MIME-Version']
signed_mdn.attach(self.payload)
# Create the signature mime message
signature = email_message.Message()
signature.set_type('application/pkcs7-signature')
signature.set_param('name', 'smime.p7s')
signature.set_param('smime-type', 'signed-data')
signature.add_header(
'Content-Disposition', 'attachment', filename='smime.p7s')
del signature['MIME-Version']
signature.set_payload(sign_message(
canonicalize(self.payload),
self.digest_alg,
message.receiver.sign_key
))
encoders.encode_base64(signature)
# logger.debug(
# 'Signature for MDN created:\n%s' % signature.as_string())
signed_mdn.set_param('micalg', self.digest_alg)
signed_mdn.attach(signature)
self.payload = signed_mdn
# Update the headers of the final payload and set message boundary
for k, v in mdn_headers.items():
if self.payload.get(k):
self.payload.replace_header(k, v)
else:
self.payload.add_header(k, v)
if self.payload.is_multipart():
self.payload.set_boundary(make_mime_boundary())
def detect_mdn(self):
""" Function checks if the received raw message is an AS2 MDN or not.
:raises MDNNotFound: If the received payload is not an MDN then this
exception is raised.
:return:
A two element tuple containing (message_id, message_recipient). The
message_id is the original AS2 message id and the message_recipient
is the original AS2 message recipient.
"""
mdn_message = None
if self.payload.get_content_type() == 'multipart/report':
mdn_message = self.payload
elif self.payload.get_content_type() == 'multipart/signed':
for part in self.payload.walk():
if part.get_content_type() == 'multipart/report':
mdn_message = self.payload
if not mdn_message:
raise MDNNotFound('No MDN found in the received message')
message_id, message_recipient = None, None
for part in mdn_message.walk():
if part.get_content_type() == 'message/disposition-notification':
mdn = part.get_payload()[0]
message_id = mdn.get('Original-Message-ID').strip('<>')
message_recipient = mdn.get(
'Original-Recipient').split(';')[1].strip()
return message_id, message_recipient
|
abhishek-ram/pyas2-lib | pyas2lib/as2.py | Mdn.detect_mdn | python | def detect_mdn(self):
mdn_message = None
if self.payload.get_content_type() == 'multipart/report':
mdn_message = self.payload
elif self.payload.get_content_type() == 'multipart/signed':
for part in self.payload.walk():
if part.get_content_type() == 'multipart/report':
mdn_message = self.payload
if not mdn_message:
raise MDNNotFound('No MDN found in the received message')
message_id, message_recipient = None, None
for part in mdn_message.walk():
if part.get_content_type() == 'message/disposition-notification':
mdn = part.get_payload()[0]
message_id = mdn.get('Original-Message-ID').strip('<>')
message_recipient = mdn.get(
'Original-Recipient').split(';')[1].strip()
return message_id, message_recipient | Function checks if the received raw message is an AS2 MDN or not.
:raises MDNNotFound: If the received payload is not an MDN then this
exception is raised.
:return:
A two element tuple containing (message_id, message_recipient). The
message_id is the original AS2 message id and the message_recipient
is the original AS2 message recipient. | train | https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/as2.py#L871-L900 | null | class Mdn(object):
"""Class for handling AS2 MDNs. Includes functions for both
parsing and building them.
"""
def __init__(self, mdn_mode=None, digest_alg=None, mdn_url=None):
self.message_id = None
self.orig_message_id = None
self.payload = None
self.mdn_mode = mdn_mode
self.digest_alg = digest_alg
self.mdn_url = mdn_url
@property
def content(self):
"""Function returns the body of the mdn message as a byte string"""
if self.payload:
message_bytes = mime_to_bytes(
self.payload, 0).replace(b'\n', b'\r\n')
boundary = b'--' + self.payload.get_boundary().encode('utf-8')
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp)
else:
return ''
@property
def headers(self):
if self.payload:
return dict(self.payload.items())
else:
return {}
@property
def headers_str(self):
message_header = ''
if self.payload:
for k, v in self.headers.items():
message_header += '{}: {}\r\n'.format(k, v)
return message_header.encode('utf-8')
def build(self, message, status, detailed_status=None):
"""Function builds and signs an AS2 MDN message.
:param message: The received AS2 message for which this is an MDN.
:param status: The status of processing of the received AS2 message.
:param detailed_status:
The optional detailed status of processing of the received AS2
message. Used to give additional error info (default "None")
"""
# Generate message id using UUID 1 as it uses both hostname and time
self.message_id = email_utils.make_msgid().lstrip('<').rstrip('>')
self.orig_message_id = message.message_id
# Set up the message headers
mdn_headers = {
'AS2-Version': AS2_VERSION,
'ediint-features': EDIINT_FEATURES,
'Message-ID': '<{}>'.format(self.message_id),
'AS2-From': quote_as2name(message.headers.get('as2-to')),
'AS2-To': quote_as2name(message.headers.get('as2-from')),
'Date': email_utils.formatdate(localtime=True),
'user-agent': 'pyAS2 Open Source AS2 Software'
}
# Set the confirmation text message here
confirmation_text = MDN_CONFIRM_TEXT
# overwrite with organization specific message
if message.receiver and message.receiver.mdn_confirm_text:
confirmation_text = message.receiver.mdn_confirm_text
# overwrite with partner specific message
if message.sender and message.sender.mdn_confirm_text:
confirmation_text = message.sender.mdn_confirm_text
if status != 'processed':
confirmation_text = MDN_FAILED_TEXT
self.payload = MIMEMultipart(
'report', report_type='disposition-notification')
# Create and attach the MDN Text Message
mdn_text = email_message.Message()
mdn_text.set_payload('%s\n' % confirmation_text)
mdn_text.set_type('text/plain')
del mdn_text['MIME-Version']
encoders.encode_7or8bit(mdn_text)
self.payload.attach(mdn_text)
# Create and attache the MDN Report Message
mdn_base = email_message.Message()
mdn_base.set_type('message/disposition-notification')
mdn_report = 'Reporting-UA: pyAS2 Open Source AS2 Software\n'
mdn_report += 'Original-Recipient: rfc822; {}\n'.format(
message.headers.get('as2-to'))
mdn_report += 'Final-Recipient: rfc822; {}\n'.format(
message.headers.get('as2-to'))
mdn_report += 'Original-Message-ID: <{}>\n'.format(message.message_id)
mdn_report += 'Disposition: automatic-action/' \
'MDN-sent-automatically; {}'.format(status)
if detailed_status:
mdn_report += ': {}'.format(detailed_status)
mdn_report += '\n'
if message.mic:
mdn_report += 'Received-content-MIC: {}, {}\n'.format(
message.mic.decode(), message.digest_alg)
mdn_base.set_payload(mdn_report)
del mdn_base['MIME-Version']
encoders.encode_7or8bit(mdn_base)
self.payload.attach(mdn_base)
# logger.debug('MDN for message %s created:\n%s' % (
# message.message_id, mdn_base.as_string()))
# Sign the MDN if it is requested by the sender
if message.headers.get('disposition-notification-options') and \
message.receiver and message.receiver.sign_key:
self.digest_alg = \
message.headers['disposition-notification-options'].split(
';')[-1].split(',')[-1].strip().replace('-', '')
signed_mdn = MIMEMultipart(
'signed', protocol="application/pkcs7-signature")
del signed_mdn['MIME-Version']
signed_mdn.attach(self.payload)
# Create the signature mime message
signature = email_message.Message()
signature.set_type('application/pkcs7-signature')
signature.set_param('name', 'smime.p7s')
signature.set_param('smime-type', 'signed-data')
signature.add_header(
'Content-Disposition', 'attachment', filename='smime.p7s')
del signature['MIME-Version']
signature.set_payload(sign_message(
canonicalize(self.payload),
self.digest_alg,
message.receiver.sign_key
))
encoders.encode_base64(signature)
# logger.debug(
# 'Signature for MDN created:\n%s' % signature.as_string())
signed_mdn.set_param('micalg', self.digest_alg)
signed_mdn.attach(signature)
self.payload = signed_mdn
# Update the headers of the final payload and set message boundary
for k, v in mdn_headers.items():
if self.payload.get(k):
self.payload.replace_header(k, v)
else:
self.payload.add_header(k, v)
if self.payload.is_multipart():
self.payload.set_boundary(make_mime_boundary())
def parse(self, raw_content, find_message_cb):
"""Function parses the RAW AS2 MDN, verifies it and extracts the
processing status of the orginal AS2 message.
:param raw_content:
A byte string of the received HTTP headers followed by the body.
:param find_message_cb:
A callback the must returns the original Message Object. The
original message-id and original recipient AS2 ID are passed
as arguments to it.
:returns:
A two element tuple containing (status, detailed_status). The
status is a string indicating the status of the transaction. The
optional detailed_status gives additional information about the
processing status.
"""
status, detailed_status = None, None
self.payload = parse_mime(raw_content)
self.orig_message_id, orig_recipient = self.detect_mdn()
# Call the find message callback which should return a Message instance
orig_message = find_message_cb(self.orig_message_id, orig_recipient)
# Extract the headers and save it
mdn_headers = {}
for k, v in self.payload.items():
k = k.lower()
if k == 'message-id':
self.message_id = v.lstrip('<').rstrip('>')
mdn_headers[k] = v
if orig_message.receiver.mdn_digest_alg \
and self.payload.get_content_type() != 'multipart/signed':
status = 'failed/Failure'
detailed_status = 'Expected signed MDN but unsigned MDN returned'
return status, detailed_status
if self.payload.get_content_type() == 'multipart/signed':
signature = None
message_boundary = (
'--' + self.payload.get_boundary()).encode('utf-8')
for part in self.payload.walk():
if part.get_content_type() == 'application/pkcs7-signature':
signature = part.get_payload(decode=True)
elif part.get_content_type() == 'multipart/report':
self.payload = part
# Verify the message, first using raw message and if it fails
# then convert to canonical form and try again
mic_content = extract_first_part(raw_content, message_boundary)
verify_cert = orig_message.receiver.load_verify_cert()
try:
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
except IntegrityError:
mic_content = canonicalize(self.payload)
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
for part in self.payload.walk():
if part.get_content_type() == 'message/disposition-notification':
# logger.debug('Found MDN report for message %s:\n%s' % (
# orig_message.message_id, part.as_string()))
mdn = part.get_payload()[-1]
mdn_status = mdn['Disposition'].split(
';').pop().strip().split(':')
status = mdn_status[0]
if status == 'processed':
mdn_mic = mdn.get('Received-Content-MIC', '').split(',')[0]
# TODO: Check MIC for all cases
if mdn_mic and orig_message.mic \
and mdn_mic != orig_message.mic.decode():
status = 'processed/warning'
detailed_status = 'Message Integrity check failed.'
else:
detailed_status = ' '.join(mdn_status[1:]).strip()
return status, detailed_status
|
olt/scriptine | doc/example_hello.py | hello_command | python | def hello_command(name, print_counter=False, repeat=10):
for i in range(repeat):
if print_counter:
print i+1,
print 'Hello, %s!' % name | Print nice greetings. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/doc/example_hello.py#L3-L8 | null | #! /usr/bin/env python
if __name__ == '__main__':
import scriptine
scriptine.run() |
olt/scriptine | scriptine/command.py | autocmds | python | def autocmds(namespace=None, args=None, command_suffix='_command',
add_dry_run_option=True, add_verbosity_option=True):
if namespace is None:
namespace = inspect.currentframe().f_back.f_globals
elif type(namespace) is types.ModuleType:
namespace = namespace.__dict__
if args is None:
args = sys.argv
if len(args) < 2 or args[1] in ('-h', '--help'):
print_help(namespace, command_suffix)
return
command_name = args.pop(1).replace('-', '_')
function = namespace[command_name + command_suffix]
parse_and_run_function(function, args, command_name,
add_dry_run_option=add_dry_run_option,
add_verbosity_option=add_verbosity_option) | Parse and run commands.
Will search ``namespace`` for functions that end with ``command_suffix``.
:param namespace: the namespace/module to search for commands
:param args: the arguments for the command parser. defaults to
:data:`sys.argv`
:param command_suffix: function name suffix that indicates that a
function is a command. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/command.py#L167-L196 | [
"def print_help(namespace, command_suffix):\n group_commands = defaultdict(list)\n for func_name, func in namespace.iteritems():\n if func_name.endswith(command_suffix):\n func = namespace[func_name]\n group = getattr(func, 'group', None)\n command_name = func_name[:-len(command_suffix)].replace('_', '-')\n group_commands[group].append((command_name, func.__doc__))\n\n if not group_commands:\n print >>sys.stderr, 'no commands found in', sys.argv[0]\n return\n\n usage = 'usage: %prog command [options]'\n parser = optparse.OptionParser(usage)\n parser.print_help(sys.stderr)\n\n default_commands = group_commands.pop(None, None)\n if default_commands:\n print_commands(None, default_commands)\n for group_name, commands in group_commands.iteritems():\n print_commands(group_name, commands)\n",
"def parse_and_run_function(function, args=None, command_name=None,\n add_dry_run_option=True, add_verbosity_option=True):\n #TODO refactor me, I'm too long\n if args is None:\n args = sys.argv\n\n required_args, optional_args = inspect_args(function)\n\n func_doc = function.__doc__ or ''\n params_doc = parse_rst_params(func_doc)\n\n usage = 'usage: %prog '\n if command_name:\n usage += command_name.replace('_', '-') + ' '\n usage += '[options] ' + ' '.join(required_args)\n\n if func_doc:\n first_paragraph = re.findall('(.*?)((\\n[ \\t]*\\n)|$)', func_doc,\n re.DOTALL)[0][0]\n first_paragraph = ' '.join(l.strip() for l in\n first_paragraph.split('\\n'))\n usage += '\\n\\n' + '\\n'.join(wrap(first_paragraph, 60))\n\n if set(required_args).intersection(params_doc.keys()):\n usage += '\\n\\nRequired arguments:'\n for arg in required_args:\n usage += '\\n%s' % arg\n if arg in params_doc:\n usage += ': %s' % params_doc[arg]\n\n add_help_option = True\n if getattr(function, 'no_help', False):\n add_help_option = False\n\n fetch_all = None\n if hasattr(function, 'fetch_all'):\n fetch_all = function.fetch_all\n optional_args = [(arg, default) for arg, default in optional_args\n if arg != fetch_all]\n\n parser = optparse.OptionParser\n if getattr(function, 'non_strict', False):\n parser = NonStrictOptionParser\n\n parser = parser(usage, add_help_option=add_help_option)\n\n for arg_name, default in optional_args:\n options = {}\n if isinstance(default, bool):\n if default:\n options = {'action': 'store_false'}\n else: \n options = {'action': 'store_true'}\n elif isinstance(default, int):\n options = {'type': 'int'}\n elif isinstance(default, float):\n options = {'type': 'float'}\n parser.add_option('--' + arg_name.replace('_', '-'),\n help=params_doc.get(arg_name, None),\n dest=arg_name, default=default, metavar=default, **options)\n\n if add_dry_run_option:\n parser.add_option('--dry-run', '-n', dest='dry_run', default=False,\n action='store_true', help='don\\'t actually do anything')\n\n if getattr(function, 'no_verbosity', False):\n add_verbosity_option = False\n if add_verbosity_option:\n parser.add_option('--verbose', '-v', dest='verbose',\n action='count', help='be more verbose')\n parser.add_option('--quiet', '-q', dest='quiet',\n action='count', help='be more silent')\n\n (options, args) = parser.parse_args(args)\n\n if add_verbosity_option:\n verbosity = (options.verbose or 0) - (options.quiet or 0)\n log.inc_log_level(verbosity)\n\n\n if add_dry_run_option and options.dry_run:\n misc.options.dry = True\n log.inc_log_level(1)\n log.warn('running in dry-mode. don\\'t actually do anything')\n\n args = args[1:]\n if len(args) < len(required_args):\n parser.error('number of arguments does not match')\n kw = {}\n for arg_name, _default in optional_args:\n kw[arg_name] = getattr(options, arg_name)\n\n if fetch_all:\n kw[fetch_all] = args[len(required_args):]\n return function(*args[:len(required_args)], **kw)\n"
] | import sys
import types
import inspect
import re
import optparse
from collections import defaultdict
from textwrap import wrap
from scriptine import misc, log
def parse_and_run_function(function, args=None, command_name=None,
add_dry_run_option=True, add_verbosity_option=True):
#TODO refactor me, I'm too long
if args is None:
args = sys.argv
required_args, optional_args = inspect_args(function)
func_doc = function.__doc__ or ''
params_doc = parse_rst_params(func_doc)
usage = 'usage: %prog '
if command_name:
usage += command_name.replace('_', '-') + ' '
usage += '[options] ' + ' '.join(required_args)
if func_doc:
first_paragraph = re.findall('(.*?)((\n[ \t]*\n)|$)', func_doc,
re.DOTALL)[0][0]
first_paragraph = ' '.join(l.strip() for l in
first_paragraph.split('\n'))
usage += '\n\n' + '\n'.join(wrap(first_paragraph, 60))
if set(required_args).intersection(params_doc.keys()):
usage += '\n\nRequired arguments:'
for arg in required_args:
usage += '\n%s' % arg
if arg in params_doc:
usage += ': %s' % params_doc[arg]
add_help_option = True
if getattr(function, 'no_help', False):
add_help_option = False
fetch_all = None
if hasattr(function, 'fetch_all'):
fetch_all = function.fetch_all
optional_args = [(arg, default) for arg, default in optional_args
if arg != fetch_all]
parser = optparse.OptionParser
if getattr(function, 'non_strict', False):
parser = NonStrictOptionParser
parser = parser(usage, add_help_option=add_help_option)
for arg_name, default in optional_args:
options = {}
if isinstance(default, bool):
if default:
options = {'action': 'store_false'}
else:
options = {'action': 'store_true'}
elif isinstance(default, int):
options = {'type': 'int'}
elif isinstance(default, float):
options = {'type': 'float'}
parser.add_option('--' + arg_name.replace('_', '-'),
help=params_doc.get(arg_name, None),
dest=arg_name, default=default, metavar=default, **options)
if add_dry_run_option:
parser.add_option('--dry-run', '-n', dest='dry_run', default=False,
action='store_true', help='don\'t actually do anything')
if getattr(function, 'no_verbosity', False):
add_verbosity_option = False
if add_verbosity_option:
parser.add_option('--verbose', '-v', dest='verbose',
action='count', help='be more verbose')
parser.add_option('--quiet', '-q', dest='quiet',
action='count', help='be more silent')
(options, args) = parser.parse_args(args)
if add_verbosity_option:
verbosity = (options.verbose or 0) - (options.quiet or 0)
log.inc_log_level(verbosity)
if add_dry_run_option and options.dry_run:
misc.options.dry = True
log.inc_log_level(1)
log.warn('running in dry-mode. don\'t actually do anything')
args = args[1:]
if len(args) < len(required_args):
parser.error('number of arguments does not match')
kw = {}
for arg_name, _default in optional_args:
kw[arg_name] = getattr(options, arg_name)
if fetch_all:
kw[fetch_all] = args[len(required_args):]
return function(*args[:len(required_args)], **kw)
def no_help(cmd):
cmd.no_help = True
return cmd
def no_verbosity(cmd):
cmd.no_verbosity = True
return cmd
def non_strict(cmd):
cmd.non_strict = True
return cmd
def fetch_all(arg_name):
def _fetch_all(cmd):
cmd.fetch_all = arg_name
return cmd
return _fetch_all
def group(name):
def _group(cmd):
cmd.group = name
return cmd
return _group
class NonStrictOptionParser(optparse.OptionParser):
def _process_args(self, largs, rargs, values):
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
try:
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return
except optparse.BadOptionError:
largs.append(arg)
def inspect_args(function):
(args, _varargs, _varkw, defaults) = inspect.getargspec(function)
optional_args = []
if defaults is not None:
for default in defaults[::-1]:
optional_args.append((args.pop(), default))
optional_args.reverse()
return args, optional_args
def autocmds(namespace=None, args=None, command_suffix='_command',
add_dry_run_option=True, add_verbosity_option=True):
"""
Parse and run commands.
Will search ``namespace`` for functions that end with ``command_suffix``.
:param namespace: the namespace/module to search for commands
:param args: the arguments for the command parser. defaults to
:data:`sys.argv`
:param command_suffix: function name suffix that indicates that a
function is a command.
"""
if namespace is None:
namespace = inspect.currentframe().f_back.f_globals
elif type(namespace) is types.ModuleType:
namespace = namespace.__dict__
if args is None:
args = sys.argv
if len(args) < 2 or args[1] in ('-h', '--help'):
print_help(namespace, command_suffix)
return
command_name = args.pop(1).replace('-', '_')
function = namespace[command_name + command_suffix]
parse_and_run_function(function, args, command_name,
add_dry_run_option=add_dry_run_option,
add_verbosity_option=add_verbosity_option)
run = autocmds
def cmd(function, args=None):
if args is None:
args = sys.argv
parse_and_run_function(function, args, '',)
def print_help(namespace, command_suffix):
group_commands = defaultdict(list)
for func_name, func in namespace.iteritems():
if func_name.endswith(command_suffix):
func = namespace[func_name]
group = getattr(func, 'group', None)
command_name = func_name[:-len(command_suffix)].replace('_', '-')
group_commands[group].append((command_name, func.__doc__))
if not group_commands:
print >>sys.stderr, 'no commands found in', sys.argv[0]
return
usage = 'usage: %prog command [options]'
parser = optparse.OptionParser(usage)
parser.print_help(sys.stderr)
default_commands = group_commands.pop(None, None)
if default_commands:
print_commands(None, default_commands)
for group_name, commands in group_commands.iteritems():
print_commands(group_name, commands)
def print_commands(group_name, commands):
if group_name:
print >>sys.stderr, '\n%s commands:' % group_name.title()
else:
print >>sys.stderr, '\nCommands:'
cmd_len = max(len(cmd) for cmd, _ in commands)
for cmd, doc in commands:
if doc is not None:
doc = doc.strip().split('\n')[0]
else:
doc = ''
print >>sys.stderr, (' %-' + str(cmd_len) + 's %s') % (cmd, doc)
def parse_rst_params(doc):
"""
Parse a reStructuredText docstring and return a dictionary
with parameter names and descriptions.
>>> doc = '''
... :param foo: foo parameter
... foo parameter
...
... :param bar: bar parameter
... :param baz: baz parameter
... baz parameter
... baz parameter
... Some text.
... '''
>>> params = parse_rst_params(doc)
>>> params['foo']
'foo parameter foo parameter'
>>> params['bar']
'bar parameter'
>>> params['baz']
'baz parameter baz parameter baz parameter'
"""
param_re = re.compile(r"""^([ \t]*):param\
(?P<param>\w+):\
(?P<body>.*\n(\1[ \t]+\w.*\n)*)""",
re.MULTILINE|re.VERBOSE)
params = {}
for match in param_re.finditer(doc):
parts = match.groupdict()
body_lines = parts['body'].strip().split('\n')
params[parts['param']] = ' '.join(s.strip() for s in body_lines)
return params
|
olt/scriptine | scriptine/command.py | parse_rst_params | python | def parse_rst_params(doc):
param_re = re.compile(r"""^([ \t]*):param\
(?P<param>\w+):\
(?P<body>.*\n(\1[ \t]+\w.*\n)*)""",
re.MULTILINE|re.VERBOSE)
params = {}
for match in param_re.finditer(doc):
parts = match.groupdict()
body_lines = parts['body'].strip().split('\n')
params[parts['param']] = ' '.join(s.strip() for s in body_lines)
return params | Parse a reStructuredText docstring and return a dictionary
with parameter names and descriptions.
>>> doc = '''
... :param foo: foo parameter
... foo parameter
...
... :param bar: bar parameter
... :param baz: baz parameter
... baz parameter
... baz parameter
... Some text.
... '''
>>> params = parse_rst_params(doc)
>>> params['foo']
'foo parameter foo parameter'
>>> params['bar']
'bar parameter'
>>> params['baz']
'baz parameter baz parameter baz parameter' | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/command.py#L241-L274 | null | import sys
import types
import inspect
import re
import optparse
from collections import defaultdict
from textwrap import wrap
from scriptine import misc, log
def parse_and_run_function(function, args=None, command_name=None,
add_dry_run_option=True, add_verbosity_option=True):
#TODO refactor me, I'm too long
if args is None:
args = sys.argv
required_args, optional_args = inspect_args(function)
func_doc = function.__doc__ or ''
params_doc = parse_rst_params(func_doc)
usage = 'usage: %prog '
if command_name:
usage += command_name.replace('_', '-') + ' '
usage += '[options] ' + ' '.join(required_args)
if func_doc:
first_paragraph = re.findall('(.*?)((\n[ \t]*\n)|$)', func_doc,
re.DOTALL)[0][0]
first_paragraph = ' '.join(l.strip() for l in
first_paragraph.split('\n'))
usage += '\n\n' + '\n'.join(wrap(first_paragraph, 60))
if set(required_args).intersection(params_doc.keys()):
usage += '\n\nRequired arguments:'
for arg in required_args:
usage += '\n%s' % arg
if arg in params_doc:
usage += ': %s' % params_doc[arg]
add_help_option = True
if getattr(function, 'no_help', False):
add_help_option = False
fetch_all = None
if hasattr(function, 'fetch_all'):
fetch_all = function.fetch_all
optional_args = [(arg, default) for arg, default in optional_args
if arg != fetch_all]
parser = optparse.OptionParser
if getattr(function, 'non_strict', False):
parser = NonStrictOptionParser
parser = parser(usage, add_help_option=add_help_option)
for arg_name, default in optional_args:
options = {}
if isinstance(default, bool):
if default:
options = {'action': 'store_false'}
else:
options = {'action': 'store_true'}
elif isinstance(default, int):
options = {'type': 'int'}
elif isinstance(default, float):
options = {'type': 'float'}
parser.add_option('--' + arg_name.replace('_', '-'),
help=params_doc.get(arg_name, None),
dest=arg_name, default=default, metavar=default, **options)
if add_dry_run_option:
parser.add_option('--dry-run', '-n', dest='dry_run', default=False,
action='store_true', help='don\'t actually do anything')
if getattr(function, 'no_verbosity', False):
add_verbosity_option = False
if add_verbosity_option:
parser.add_option('--verbose', '-v', dest='verbose',
action='count', help='be more verbose')
parser.add_option('--quiet', '-q', dest='quiet',
action='count', help='be more silent')
(options, args) = parser.parse_args(args)
if add_verbosity_option:
verbosity = (options.verbose or 0) - (options.quiet or 0)
log.inc_log_level(verbosity)
if add_dry_run_option and options.dry_run:
misc.options.dry = True
log.inc_log_level(1)
log.warn('running in dry-mode. don\'t actually do anything')
args = args[1:]
if len(args) < len(required_args):
parser.error('number of arguments does not match')
kw = {}
for arg_name, _default in optional_args:
kw[arg_name] = getattr(options, arg_name)
if fetch_all:
kw[fetch_all] = args[len(required_args):]
return function(*args[:len(required_args)], **kw)
def no_help(cmd):
cmd.no_help = True
return cmd
def no_verbosity(cmd):
cmd.no_verbosity = True
return cmd
def non_strict(cmd):
cmd.non_strict = True
return cmd
def fetch_all(arg_name):
def _fetch_all(cmd):
cmd.fetch_all = arg_name
return cmd
return _fetch_all
def group(name):
def _group(cmd):
cmd.group = name
return cmd
return _group
class NonStrictOptionParser(optparse.OptionParser):
def _process_args(self, largs, rargs, values):
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
try:
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return
except optparse.BadOptionError:
largs.append(arg)
def inspect_args(function):
(args, _varargs, _varkw, defaults) = inspect.getargspec(function)
optional_args = []
if defaults is not None:
for default in defaults[::-1]:
optional_args.append((args.pop(), default))
optional_args.reverse()
return args, optional_args
def autocmds(namespace=None, args=None, command_suffix='_command',
add_dry_run_option=True, add_verbosity_option=True):
"""
Parse and run commands.
Will search ``namespace`` for functions that end with ``command_suffix``.
:param namespace: the namespace/module to search for commands
:param args: the arguments for the command parser. defaults to
:data:`sys.argv`
:param command_suffix: function name suffix that indicates that a
function is a command.
"""
if namespace is None:
namespace = inspect.currentframe().f_back.f_globals
elif type(namespace) is types.ModuleType:
namespace = namespace.__dict__
if args is None:
args = sys.argv
if len(args) < 2 or args[1] in ('-h', '--help'):
print_help(namespace, command_suffix)
return
command_name = args.pop(1).replace('-', '_')
function = namespace[command_name + command_suffix]
parse_and_run_function(function, args, command_name,
add_dry_run_option=add_dry_run_option,
add_verbosity_option=add_verbosity_option)
run = autocmds
def cmd(function, args=None):
if args is None:
args = sys.argv
parse_and_run_function(function, args, '',)
def print_help(namespace, command_suffix):
group_commands = defaultdict(list)
for func_name, func in namespace.iteritems():
if func_name.endswith(command_suffix):
func = namespace[func_name]
group = getattr(func, 'group', None)
command_name = func_name[:-len(command_suffix)].replace('_', '-')
group_commands[group].append((command_name, func.__doc__))
if not group_commands:
print >>sys.stderr, 'no commands found in', sys.argv[0]
return
usage = 'usage: %prog command [options]'
parser = optparse.OptionParser(usage)
parser.print_help(sys.stderr)
default_commands = group_commands.pop(None, None)
if default_commands:
print_commands(None, default_commands)
for group_name, commands in group_commands.iteritems():
print_commands(group_name, commands)
def print_commands(group_name, commands):
if group_name:
print >>sys.stderr, '\n%s commands:' % group_name.title()
else:
print >>sys.stderr, '\nCommands:'
cmd_len = max(len(cmd) for cmd, _ in commands)
for cmd, doc in commands:
if doc is not None:
doc = doc.strip().split('\n')[0]
else:
doc = ''
print >>sys.stderr, (' %-' + str(cmd_len) + 's %s') % (cmd, doc)
def parse_rst_params(doc):
"""
Parse a reStructuredText docstring and return a dictionary
with parameter names and descriptions.
>>> doc = '''
... :param foo: foo parameter
... foo parameter
...
... :param bar: bar parameter
... :param baz: baz parameter
... baz parameter
... baz parameter
... Some text.
... '''
>>> params = parse_rst_params(doc)
>>> params['foo']
'foo parameter foo parameter'
>>> params['bar']
'bar parameter'
>>> params['baz']
'baz parameter baz parameter baz parameter'
"""
param_re = re.compile(r"""^([ \t]*):param\
(?P<param>\w+):\
(?P<body>.*\n(\1[ \t]+\w.*\n)*)""",
re.MULTILINE|re.VERBOSE)
params = {}
for match in param_re.finditer(doc):
parts = match.groupdict()
body_lines = parts['body'].strip().split('\n')
params[parts['param']] = ' '.join(s.strip() for s in body_lines)
return params
|
olt/scriptine | scriptine/misc.py | decorator | python | def decorator(caller, func=None):
if func is None: # returns a decorator
fun = FunctionMaker(caller)
first_arg = inspect.getargspec(caller)[0][0]
src = 'def %s(%s): return _call_(caller, %s)' % (
caller.__name__, first_arg, first_arg)
return fun.make(src, dict(caller=caller, _call_=decorator),
undecorated=caller)
else: # returns a decorated function
fun = FunctionMaker(func)
src = """def %(name)s(%(signature)s):
return _call_(_func_, %(signature)s)"""
return fun.make(src, dict(_func_=func, _call_=caller), undecorated=func) | decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/misc.py#L155-L171 | [
"def make(self, src_templ, evaldict=None, addsource=False, **attrs):\n \"Make a new function from a given template and update the signature\"\n src = src_templ % vars(self) # expand name and signature\n evaldict = evaldict or {}\n mo = FunctionMaker.DEF.match(src)\n if mo is None:\n raise SyntaxError('not a valid function template\\n%s' % src)\n name = mo.group(1) # extract the function name\n reserved_names = set([name] + [\n arg.strip(' *') for arg in self.signature.split(',')])\n for n, v in evaldict.iteritems():\n if n in reserved_names:\n raise NameError('%s is overridden in\\n%s' % (n, src))\n if not src.endswith('\\n'): # add a newline just for safety\n src += '\\n'\n try:\n code = compile(src, '<string>', 'single')\n exec code in evaldict\n except:\n print >> sys.stderr, 'Error in generated code:'\n print >> sys.stderr, src\n raise\n func = evaldict[name]\n if addsource:\n attrs['__source__'] = src\n self.update(func, **attrs)\n return func\n"
] | import sys
import re
import inspect
from distutils.core import Command
from scriptine import log
class DistutilsCommand(Command):
user_options = []
def initialize_options(self): pass
def finalize_options(self): pass
def dict_to_options(d):
d = Options(d)
for k, v in d.iteritems():
if isinstance(v, dict):
d[k] = dict_to_options(v)
return d
class Options(dict):
"""
Dictionary with attribute style access.
>>> o = Options(bar='foo')
>>> o.bar
'foo'
"""
def __repr__(self):
args = ', '.join(['%s=%r' % (key, value) for key, value in
self.iteritems()])
return '%s(%s)' % (self.__class__.__name__, args)
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError(name)
__setattr__ = dict.__setitem__
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError(name)
options = Options()
options.dry = False
# --- begin decorator ---
########################## LICENCE ###############################
## Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## Redistributions in bytecode form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
## DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
if func:
# func can also be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
self.signature = inspect.formatargspec(
formatvalue=lambda val: "", *inspect.getargspec(func))[1:-1]
self.defaults = func.func_defaults
self.dict = func.__dict__.copy()
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.func_defaults = getattr(self, 'defaults', None)
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = FunctionMaker.DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
reserved_names = set([name] + [
arg.strip(' *') for arg in self.signature.split(',')])
for n, v in evaldict.iteritems():
if n in reserved_names:
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n'
try:
code = compile(src, '<string>', 'single')
exec code in evaldict
except:
print >> sys.stderr, 'Error in generated code:'
print >> sys.stderr, src
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
# --- end decorator ---
def dry(message, func, *args, **kw):
log.info(message)
if not options.dry:
return func(*args, **kw)
@decorator
def log_call(func, *args, **kw):
_log_function_call(func, *args, **kw)
return func(*args, **kw)
def _log_function_call(func, *args, **kw):
message = func.__name__
if args:
message += ' ' + ' '.join(map(str, args))
if kw:
kw_str = ' '.join(['%s %r' % (k, v) for k, v in kw.iteritems()])
message += '(' + kw_str + ')'
log.info(message)
@decorator
def dry_guard(func, *args, **kw):
_log_function_call(func, *args, **kw)
if not options.dry:
return func(*args, **kw)
|
olt/scriptine | scriptine/_path.py | path.splitpath | python | def splitpath(self):
parent, child = os.path.split(self)
return self.__class__(parent), child | p.splitpath() -> Return (p.parent, p.name). | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L202-L205 | null | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.splitdrive | python | def splitdrive(self):
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel | p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L207-L215 | null | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.splitext | python | def splitext(self):
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext | p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L217-L228 | null | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.joinpath | python | def joinpath(self, *args):
return self.__class__(os.path.join(self, *args)) | Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L252-L257 | null | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.relpath | python | def relpath(self):
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self) | Return this path as a relative path,
based from the current working directory. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L281-L286 | null | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.relpathto | python | def relpathto(self, dest):
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath) | Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath(). | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L288-L324 | [
"def abspath(self): return self.__class__(os.path.abspath(self))\n"
] | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.listdir | python | def listdir(self, pattern=None):
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names] | D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L328-L342 | null | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.walk | python | def walk(self, pattern=None, errors='strict'):
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item | D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L370-L421 | [
"def listdir(self, pattern=None):\n \"\"\" D.listdir() -> List of items in this directory.\n\n Use D.files() or D.dirs() instead if you want a listing\n of just files or just subdirectories.\n\n The elements of the list are path objects.\n\n With the optional 'pattern' argument, this only lists\n items whose names match the given pattern.\n \"\"\"\n names = os.listdir(self)\n if pattern is not None:\n names = fnmatch.filter(names, pattern)\n return [self / child for child in names]\n"
] | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.glob | python | def glob(self, pattern):
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))] | Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L515-L524 | null | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.write_bytes | python | def write_bytes(self, bytes, append=False):
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append) | Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L541-L559 | [
"def dry(message, func, *args, **kw):\n log.info(message)\n\n if not options.dry:\n return func(*args, **kw)\n"
] | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.text | python | def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n')) | r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L561-L596 | [
"def open(self, mode='r'):\n \"\"\" Open this file. Return a file object. \"\"\"\n return file(self, mode)\n"
] | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.write_text | python | def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append) | r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L598-L685 | [
"def write_bytes(self, bytes, append=False):\n \"\"\" Open this file and write the given bytes to it.\n\n Default behavior is to overwrite any existing file.\n Call p.write_bytes(bytes, append=True) to append instead.\n \"\"\"\n def _write_bytes(bytes, append):\n if append:\n mode = 'ab'\n else:\n mode = 'wb'\n f = self.open(mode)\n try:\n f.write(bytes)\n finally:\n f.close()\n\n dry(\"write_bytes %s '%r...' append=%r\" % (self, bytes[:20], append),\n _write_bytes, bytes, append)\n"
] | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.write_lines | python | def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close() | r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L713-L776 | [
"def open(self, mode='r'):\n \"\"\" Open this file. Return a file object. \"\"\"\n return file(self, mode)\n"
] | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.read_md5 | python | def read_md5(self, hex=False):
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest() | Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L778-L798 | [
"def open(self, mode='r'):\n \"\"\" Open this file. Return a file object. \"\"\"\n return file(self, mode)\n"
] | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.owner | python | def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name | r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L841-L861 | [
"def stat(self):\n \"\"\" Perform a stat() system call on this path. \"\"\"\n return os.stat(self)\n"
] | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.ensure_dir | python | def ensure_dir(self, mode=0777):
if not self.exists() or not self.isdir():
os.makedirs(self, mode) | Make sure the directory exists, create if necessary. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L914-L919 | null | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
def install(self, to, chmod=0644):
"""
Copy data and set mode to 'chmod'.
"""
self.copy(to)
path(to).chmod(chmod)
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
olt/scriptine | scriptine/_path.py | path.install | python | def install(self, to, chmod=0644):
self.copy(to)
path(to).chmod(chmod) | Copy data and set mode to 'chmod'. | train | https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L999-L1004 | null | class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __rdiv__(self, rel):
return self.__class__(os.path.join(rel, self))
__rtruediv__ = __rdiv__
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``/`` or ``C:\\``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see :meth:`path.walkdirs`).
With the optional ``pattern`` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see :meth:`path.walkfiles`).
With the optional ``pattern`` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are 'warn', which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional ``pattern`` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``test``.
The ``errors`` keyword argument controls behavior when an
error occurs. The default is ``strict``, which causes an
exception. The other allowed values are ``warn``, which
reports the error via ``warnings.warn()``, and ``ignore``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, ``pattern``, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if path matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``*.py``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
def _write_bytes(bytes, append):
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
dry("write_bytes %s '%r...' append=%r" % (self, bytes[:20], append),
_write_bytes, bytes, append)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
def isdir(self):
# isdir is a built-in on windows, need to wrap
return os.path.isdir(self)
isdir.__doc__ = os.path.isdir.__doc__
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
atime = os.path.getatime
mtime = os.path.getmtime
ctime = os.path.getctime
def newer(self, other):
return self.mtime > other.mtime
size = os.path.getsize
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
@dry_guard
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
utime.__doc__ = os.utime.__doc__
@dry_guard
def chmod(self, mode):
os.chmod(self, mode)
chmod.__doc__ = os.chmod.__doc__
if hasattr(os, 'chown'):
@dry_guard
def chown(self, uid, gid):
os.chown(self, uid, gid)
chown.__doc__ = os.chown.__doc__
@dry_guard
def rename(self, new):
os.rename(self, new)
rename.__doc__ = os.rename.__doc__
@dry_guard
def renames(self, new):
os.renames(self, new)
renames.__doc__ = os.renames.__doc__
# --- Create/delete operations on directories
@dry_guard
def mkdir(self, mode=0777):
os.mkdir(self, mode)
@dry_guard
def makedirs(self, mode=0777):
os.makedirs(self, mode)
@dry_guard
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode)
@dry_guard
def rmdir(self):
os.rmdir(self)
@dry_guard
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
@dry_guard
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
@dry_guard
def remove(self):
os.remove(self)
@dry_guard
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
@dry_guard
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
@dry_guard
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = dry_guard(shutil.copyfile)
copymode = dry_guard(shutil.copymode)
copystat = dry_guard(shutil.copystat)
copy = dry_guard(shutil.copy)
copy2 = dry_guard(shutil.copy2)
copytree = dry_guard(shutil.copytree)
if hasattr(shutil, 'move'):
move = dry_guard(shutil.move)
rmtree = dry_guard(shutil.rmtree)
# --- Convenience for scriptine
@dry_guard
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# --- contextmanagers
try:
from contextlib import contextmanager
@contextmanager
def as_working_dir(self):
"""
temporarily change into this directory
>>> with path('/').as_working_dir():
... assert path.cwd() == '/'
>>> assert path.cwd() != '/'
"""
current_dir = path(os.curdir).abspath()
os.chdir(self)
try:
yield
finally:
os.chdir(current_dir)
except ImportError:
pass
|
astroduff/commah | examples.py | runcommand | python | def runcommand(cosmology='WMAP5'):
# Return the WMAP5 cosmology concentration predicted for
# z=0 range of masses
Mi = [1e8, 1e9, 1e10]
zi = 0
print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi))
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)
print(output['c'].flatten())
# Return the WMAP5 cosmology concentration predicted for
# z=0 range of masses AND cosmological parameters
Mi = [1e8, 1e9, 1e10]
zi = 0
print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi))
output, cosmo = commah.run(cosmology=cosmology, zi=zi, Mi=Mi,
retcosmo=True)
print(output['c'].flatten())
print(cosmo)
# Return the WMAP5 cosmology concentration predicted for MW
# mass (2e12 Msol) across redshift
Mi = 2e12
z = [0, 0.5, 1, 1.5, 2, 2.5]
output = commah.run(cosmology=cosmology, zi=0, Mi=Mi, z=z)
for zval in z:
print("M(z=0)=%s has c(z=%s)=%s"
% (Mi, zval, output[output['z'] == zval]['c'].flatten()))
# Return the WMAP5 cosmology concentration predicted for MW
# mass (2e12 Msol) across redshift
Mi = 2e12
zi = [0, 0.5, 1, 1.5, 2, 2.5]
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)
for zval in zi:
print("M(z=%s)=%s has concentration %s"
% (zval, Mi, output[(output['zi'] == zval) &
(output['z'] == zval)]['c'].flatten()))
# Return the WMAP5 cosmology concentration and
# rarity of high-z cluster
Mi = 2e14
zi = 6
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)
print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi))
print(output['c'].flatten())
print("Mass variance sigma of haloes of mass %s at z=%s" % (Mi, zi))
print(output['sig'].flatten())
print("Fluctuation for haloes of mass %s at z=%s" % (Mi, zi))
print(output['nu'].flatten())
# Return the WMAP5 cosmology accretion rate prediction
# for haloes at range of redshift and mass
Mi = [1e8, 1e9, 1e10]
zi = [0]
z = [0, 0.5, 1, 1.5, 2, 2.5]
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi, z=z)
for Mval in Mi:
print("dM/dt for halo of mass %s at z=%s across redshift %s is: "
% (Mval, zi, z))
print(output[output['Mi'] == Mval]['dMdt'].flatten())
# Return the WMAP5 cosmology Halo Mass History for haloes with M(z=0) = 1e8
M = [1e8]
z = [0, 0.5, 1, 1.5, 2, 2.5]
print("Halo Mass History for z=0 mass of %s across z=%s" % (M, z))
output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)
print(output['Mz'].flatten())
# Return the WMAP5 cosmology formation redshifts for haloes at
# range of redshift and mass
M = [1e8, 1e9, 1e10]
z = [0]
print("Formation Redshifts for haloes of mass %s at z=%s" % (M, z))
output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)
for Mval in M:
print(output[output['Mi'] == Mval]['zf'].flatten())
return("Done") | Example interface commands | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/examples.py#L9-L90 | [
"def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,\n filename=None, verbose=None, retcosmo=None):\n \"\"\" Run commah code on halo of mass 'Mi' at redshift 'zi' with\n accretion and profile history at higher redshifts 'z'\n This is based on Correa et al. (2015a,b,c)\n\n Parameters\n ----------\n cosmology : str or dict\n Can be named cosmology, default WMAP7 (aka DRAGONS), or\n DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15\n or dictionary similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n zi : float / numpy array, optional\n Redshift at which halo has mass 'Mi'. If float then all\n halo masses 'Mi' are assumed to be at this redshift.\n If array but Mi is float, then this halo mass is used across\n all starting redshifts. If both Mi and zi are arrays then they\n have to be the same size for one - to - one correspondence between\n halo mass and the redshift at which it has that mass. Default is 0.\n Mi : float / numpy array, optional\n Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'\n are solved for this halo mass. If array but zi is float, then this\n redshift is applied to all halo masses. If both Mi and zi are\n arrays then they have to be the same size for one - to - one\n correspondence between halo mass and the redshift at which it\n has that mass. Default is 1e12 Msol.\n z : float / numpy array, optional\n Redshift to solve commah code at. Must have zi<z else these steps\n are skipped. Default is False, meaning commah is solved at z=zi\n\n com : bool, optional\n If true then solve for concentration-mass,\n default is True.\n mah : bool, optional\n If true then solve for accretion rate and halo mass history,\n default is True.\n filename : bool / str, optional\n If str is passed this is used as a filename for output of commah\n verbose : bool, optional\n If true then give comments, default is None.\n retcosmo : bool, optional\n Return cosmological parameters used as a dict if retcosmo = True,\n default is None.\n\n Returns\n -------\n dataset : structured dataset\n dataset contains structured columns of size\n (size(Mi) > size(z)) by size(z)\n\n If mah = True and com = False then columns are\n ('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)\n where 'zi' is the starting redshift, 'Mi' is halo mass at zi\n 'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]\n and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive\n at starting redshift 'zi'\n\n If mah = False and com = True then columns are\n ('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)\n where 'zi' is the starting redshift, 'Mi' is halo mass at zi\n 'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo\n at the redshift 'z', 'sig' is the mass variance 'sigma',\n 'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',\n 'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'\n\n If mah = True and com = True then columns are:\n ('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),\n ('c',float),('sig',float),('nu',float),('zf',float)\n\n file : structured dataset with name 'filename' if passed\n\n Raises\n ------\n Output -1\n If com = False and mah = False as user has to select something.\n Output -1\n If 'zi' and 'Mi' are arrays of unequal size. Impossible to match\n corresponding masses and redshifts of output.\n\n Examples\n --------\n Examples should be written in doctest format, and should illustrate how\n to use the function.\n\n >>> import examples\n >>> examples.runcommands() # A series of ways to query structured dataset\n >>> examples.plotcommands() # Examples to plot data\n\n \"\"\"\n\n # Check user choices...\n if not com and not mah:\n print(\"User has to choose com=True and / or mah=True \")\n return(-1)\n\n # Convert arrays / lists to np.array\n # and inflate redshift / mass axis\n # to match each other for later loop\n results = _checkinput(zi, Mi, z=z, verbose=verbose)\n\n # Return if results is -1\n if(results == -1):\n return(-1)\n # If not, unpack the returned iterable\n else:\n zi, Mi, z, lenz, lenm, lenzout = results\n # At this point we will have lenm objects to iterate over\n\n # Get the cosmological parameters for the given cosmology\n cosmo = getcosmo(cosmology)\n\n # Create output file if desired\n if filename:\n print(\"Output to file %r\" % (filename))\n fout = open(filename, 'wb')\n\n # Create the structured dataset\n try:\n if mah and com:\n if verbose:\n print(\"Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, \"\n \"zf\")\n if filename:\n fout.write(_getcosmoheader(cosmo)+'\\n')\n fout.write(\"# Initial z - Initial Halo - Output z - \"\n \" Accretion - Final Halo - concentration - \"\n \" Mass - Peak - Formation z \"+'\\n')\n fout.write(\"# - mass - -\"\n \" rate - mass - - \"\n \" Variance - Height - \"+'\\n')\n fout.write(\"# - (M200) - - \"\n \" (dM/dt) - (M200) - - \"\n \" (sigma) - (nu) - \"+'\\n')\n fout.write(\"# - [Msol] - - \"\n \" [Msol/yr] - [Msol] - - \"\n \" - - \"+'\\n')\n dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),\n ('Mi', float), ('z', float), ('dMdt', float),\n ('Mz', float), ('c', float), ('sig', float),\n ('nu', float), ('zf', float)])\n elif mah:\n if verbose:\n print(\"Output requested is zi, Mi, z, dMdt, Mz\")\n if filename:\n fout.write(_getcosmoheader(cosmo)+'\\n')\n fout.write(\"# Initial z - Initial Halo - Output z -\"\n \" Accretion - Final Halo \"+'\\n')\n fout.write(\"# - mass - -\"\n \" rate - mass \"+'\\n')\n fout.write(\"# - (M200) - -\"\n \" (dm/dt) - (M200) \"+'\\n')\n fout.write(\"# - [Msol] - -\"\n \" [Msol/yr] - [Msol] \"+'\\n')\n dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),\n ('Mi', float), ('z', float),\n ('dMdt', float), ('Mz', float)])\n else:\n if verbose:\n print(\"Output requested is zi, Mi, z, c, sig, nu, zf\")\n if filename:\n fout.write(_getcosmoheader(cosmo)+'\\n')\n fout.write(\"# Initial z - Initial Halo - Output z - \"\n \" concentration - \"\n \" Mass - Peak - Formation z \"+'\\n')\n fout.write(\"# - mass - -\"\n \" -\"\n \" Variance - Height - \"+'\\n')\n fout.write(\"# - (M200) - - \"\n \" - \"\n \" (sigma) - (nu) - \"+'\\n')\n fout.write(\"# - [Msol] - - \"\n \" - \"\n \" - - \"+'\\n')\n dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),\n ('Mi', float), ('z', float), ('c', float),\n ('sig', float), ('nu', float), ('zf', float)])\n\n # Now loop over the combination of initial redshift and halo mamss\n for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):\n if verbose:\n print(\"Output Halo of Mass Mi=%s at zi=%s\" % (Mval, zval))\n # For a given halo mass Mi at redshift zi need to know\n # output redshifts 'z'\n # Check that all requested redshifts are greater than\n # input redshift, except if z is False, in which case\n # only solve z at zi, i.e. remove a loop\n if z is False:\n ztemp = np.array(zval, ndmin=1, dtype=float)\n else:\n ztemp = np.array(z[z >= zval], dtype=float)\n\n # Loop over the output redshifts\n if ztemp.size:\n # Return accretion rates and halo mass progenitors at\n # redshifts 'z' for object of mass Mi at zi\n dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)\n if mah and com:\n # More expensive to return concentrations\n c, sig, nu, zf = COM(ztemp, Mz, **cosmo)\n # Save all arrays\n for j_ind, j_val in enumerate(ztemp):\n dataset[i_ind, j_ind] =\\\n (zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],\n c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])\n if filename:\n fout.write(\n \"{}, {}, {}, {}, {}, {}, {}, {}, {} \\n\".format(\n zval, Mval, ztemp[j_ind], dMdt[j_ind],\n Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],\n zf[j_ind]))\n elif mah:\n # Save only MAH arrays\n for j_ind, j_val in enumerate(ztemp):\n dataset[i_ind, j_ind] =\\\n (zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])\n if filename:\n fout.write(\"{}, {}, {}, {}, {} \\n\".format(\n zval, Mval, ztemp[j_ind], dMdt[j_ind],\n Mz[j_ind]))\n else:\n # Output only COM arrays\n c, sig, nu, zf = COM(ztemp, Mz, **cosmo)\n # For any halo mass Mi at redshift zi\n # solve for c, sig, nu and zf\n for j_ind, j_val in enumerate(ztemp):\n dataset[i_ind, j_ind] =\\\n (zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],\n nu[j_ind], zf[j_ind])\n if filename:\n fout.write(\"{}, {}, {}, {}, {}, {}, {} \\n\".format(\n zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],\n nu[j_ind], zf[j_ind]))\n\n # Make sure to close the file if it was opened\n finally:\n fout.close() if filename else None\n\n if retcosmo:\n return(dataset, cosmo)\n else:\n return(dataset)\n"
] | from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import commah
def plotcommand(cosmology='WMAP5', plotname=None):
""" Example ways to interrogate the dataset and plot the commah output """
# Plot the c-M relation as a functon of redshift
xarray = 10**(np.arange(1, 15, 0.2))
yval = 'c'
# Specify the redshift range
zarray = np.arange(0, 5, 0.5)
xtitle = r"Halo Mass (M$_{sol}$)"
ytitle = r"Concentration"
linelabel = "z="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
plt.ylim([2, 30])
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray)
# Access the column yval from the data file
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray, label=linelabel+str(zval), color=colors[zind])
# Overplot the D08 predictions in black
ax.plot(xarray, commah.commah.cduffy(zval, xarray), color="black")
ax.set_xscale('log')
ax.set_yscale('log')
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_CM_relation.png'" % (plotname))
fig.savefig(plotname+"_CM_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the c-z relation as a function of mass (so always Mz=M0)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'c'
# Specify the mass range
zarray = 10**np.arange(6, 14, 2)
xtitle = r"Redshift"
ytitle = r"NFW Concentration"
linelabel = r"log$_{10}$ M$_{z}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval)
# Access the column yval from the data file
yarray = output[yval].flatten()
# Plot each line in turn with different colours
ax.plot(xarray, yarray,
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_Cz_relation.png'" % (plotname))
fig.savefig(plotname+"_Cz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the zf-z relation for different masses (so always Mz=M0)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'zf'
# Specify the mass range
zarray = 10**np.arange(6, 14, 2)
xtitle = r"Redshift"
ytitle = r"Formation Redshift"
linelabel = r"log$_{10}$ M$_{z}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray,
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
leg = ax.legend(loc=2)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_zfz_relation.png'" % (plotname))
fig.savefig(plotname+"_zfz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the dM/dt-z relation for different masses (so always Mz=M0)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'dMdt'
# Specify the mass range
zarray = 10**np.arange(10, 14, 0.5)
xtitle = r"log$_{10}$ (1+z)"
ytitle = r"log$_{10}$ Accretion Rate M$_{sol}$ yr$^{-1}$"
linelabel = r"log$_{10}$ M$_z$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
cosmo = commah.getcosmo(cosmology)
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval,
com=False, mah=True)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(np.log10(xarray+1.), np.log10(yarray),
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
# Plot the semi-analytic approximate formula from Correa et al 2015b
semianalytic_approx = 71.6 * (zval / 1e12) * (cosmo['h'] / 0.7) *\
(-0.24 + 0.75 * (xarray + 1)) * np.sqrt(
cosmo['omega_M_0'] * (xarray + 1)**3 + cosmo['omega_lambda_0'])
ax.plot(np.log10(xarray + 1), np.log10(semianalytic_approx),
color='black')
leg = ax.legend(loc=2)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_dMdtz_relation.png'" % (plotname))
fig.savefig(plotname+"_dMdtz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the dMdt-M relation as a function of redshift
xarray = 10**(np.arange(10, 14, 0.5))
yval = 'dMdt'
# Specify the redshift range
zarray = np.arange(0, 5, 0.5)
xtitle = r"Halo Mass M$_{sol}$"
ytitle = r"Accretion Rate M$_{sol}$ yr$^{-1}$"
linelabel = "z="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray,
com=False, mah=True)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray, label=linelabel+str(zval),
color=colors[zind],)
ax.set_xscale('log')
ax.set_yscale('log')
leg = ax.legend(loc=2)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_MAH_M_relation.png'" % (plotname))
fig.savefig(plotname+"_MAH_M_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the (dM/M)dt-M relation as a function of redshift
xarray = 10**(np.arange(10, 14, 0.5))
yval = 'dMdt'
# Specify the redshift range
zarray = np.arange(0, 5, 0.5)
xtitle = r"Halo Mass M$_{sol}$"
ytitle = r"Specific Accretion Rate yr$^{-1}$"
linelabel = "z="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray,
mah=True, com=False)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray/xarray, label=linelabel+str(zval),
color=colors[zind],)
ax.set_xscale('log')
ax.set_yscale('log')
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_specificMAH_M_relation.png'" % (plotname))
fig.savefig(plotname+"_specificMAH_M_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the Mz-z relation as a function of mass
# (so mass is decreasing to zero as z-> inf)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'Mz'
# Specify the mass range
zarray = 10**np.arange(10, 14, 0.5)
xtitle = r"Redshift"
ytitle = r"M(z) (M$_{sol}$)"
linelabel = r"log$_{10}$ M$_{0}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray,
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
ax.set_yscale('log')
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_Mzz_relation.png'" % (plotname))
fig.savefig(plotname+"_Mzz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the Mz/M0-z relation as a function of mass
xarray = 10**(np.arange(0, 1, 0.02)) - 1
yval = 'Mz'
# Specify the mass range
zarray = 10**np.arange(10, 14, 0.5)
xtitle = r"Redshift"
ytitle = r"log$_{10}$ M(z)/M$_{0}$"
linelabel = r"log$_{10}$ M$_{0}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, np.log10(yarray/zval),
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
leg = ax.legend(loc=3)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_MzM0z_relation.png'" % (plotname))
fig.savefig(plotname+"_MzM0z_relation.png", dpi=fig.dpi*5)
else:
plt.show()
return("Done")
|
astroduff/commah | examples.py | plotcommand | python | def plotcommand(cosmology='WMAP5', plotname=None):
# Plot the c-M relation as a functon of redshift
xarray = 10**(np.arange(1, 15, 0.2))
yval = 'c'
# Specify the redshift range
zarray = np.arange(0, 5, 0.5)
xtitle = r"Halo Mass (M$_{sol}$)"
ytitle = r"Concentration"
linelabel = "z="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
plt.ylim([2, 30])
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray)
# Access the column yval from the data file
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray, label=linelabel+str(zval), color=colors[zind])
# Overplot the D08 predictions in black
ax.plot(xarray, commah.commah.cduffy(zval, xarray), color="black")
ax.set_xscale('log')
ax.set_yscale('log')
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_CM_relation.png'" % (plotname))
fig.savefig(plotname+"_CM_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the c-z relation as a function of mass (so always Mz=M0)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'c'
# Specify the mass range
zarray = 10**np.arange(6, 14, 2)
xtitle = r"Redshift"
ytitle = r"NFW Concentration"
linelabel = r"log$_{10}$ M$_{z}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval)
# Access the column yval from the data file
yarray = output[yval].flatten()
# Plot each line in turn with different colours
ax.plot(xarray, yarray,
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_Cz_relation.png'" % (plotname))
fig.savefig(plotname+"_Cz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the zf-z relation for different masses (so always Mz=M0)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'zf'
# Specify the mass range
zarray = 10**np.arange(6, 14, 2)
xtitle = r"Redshift"
ytitle = r"Formation Redshift"
linelabel = r"log$_{10}$ M$_{z}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray,
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
leg = ax.legend(loc=2)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_zfz_relation.png'" % (plotname))
fig.savefig(plotname+"_zfz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the dM/dt-z relation for different masses (so always Mz=M0)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'dMdt'
# Specify the mass range
zarray = 10**np.arange(10, 14, 0.5)
xtitle = r"log$_{10}$ (1+z)"
ytitle = r"log$_{10}$ Accretion Rate M$_{sol}$ yr$^{-1}$"
linelabel = r"log$_{10}$ M$_z$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
cosmo = commah.getcosmo(cosmology)
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval,
com=False, mah=True)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(np.log10(xarray+1.), np.log10(yarray),
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
# Plot the semi-analytic approximate formula from Correa et al 2015b
semianalytic_approx = 71.6 * (zval / 1e12) * (cosmo['h'] / 0.7) *\
(-0.24 + 0.75 * (xarray + 1)) * np.sqrt(
cosmo['omega_M_0'] * (xarray + 1)**3 + cosmo['omega_lambda_0'])
ax.plot(np.log10(xarray + 1), np.log10(semianalytic_approx),
color='black')
leg = ax.legend(loc=2)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_dMdtz_relation.png'" % (plotname))
fig.savefig(plotname+"_dMdtz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the dMdt-M relation as a function of redshift
xarray = 10**(np.arange(10, 14, 0.5))
yval = 'dMdt'
# Specify the redshift range
zarray = np.arange(0, 5, 0.5)
xtitle = r"Halo Mass M$_{sol}$"
ytitle = r"Accretion Rate M$_{sol}$ yr$^{-1}$"
linelabel = "z="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray,
com=False, mah=True)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray, label=linelabel+str(zval),
color=colors[zind],)
ax.set_xscale('log')
ax.set_yscale('log')
leg = ax.legend(loc=2)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_MAH_M_relation.png'" % (plotname))
fig.savefig(plotname+"_MAH_M_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the (dM/M)dt-M relation as a function of redshift
xarray = 10**(np.arange(10, 14, 0.5))
yval = 'dMdt'
# Specify the redshift range
zarray = np.arange(0, 5, 0.5)
xtitle = r"Halo Mass M$_{sol}$"
ytitle = r"Specific Accretion Rate yr$^{-1}$"
linelabel = "z="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray,
mah=True, com=False)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray/xarray, label=linelabel+str(zval),
color=colors[zind],)
ax.set_xscale('log')
ax.set_yscale('log')
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_specificMAH_M_relation.png'" % (plotname))
fig.savefig(plotname+"_specificMAH_M_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the Mz-z relation as a function of mass
# (so mass is decreasing to zero as z-> inf)
xarray = 10**(np.arange(0, 1, 0.05)) - 1
yval = 'Mz'
# Specify the mass range
zarray = 10**np.arange(10, 14, 0.5)
xtitle = r"Redshift"
ytitle = r"M(z) (M$_{sol}$)"
linelabel = r"log$_{10}$ M$_{0}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, yarray,
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
ax.set_yscale('log')
leg = ax.legend(loc=1)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_Mzz_relation.png'" % (plotname))
fig.savefig(plotname+"_Mzz_relation.png", dpi=fig.dpi*5)
else:
plt.show()
# Plot the Mz/M0-z relation as a function of mass
xarray = 10**(np.arange(0, 1, 0.02)) - 1
yval = 'Mz'
# Specify the mass range
zarray = 10**np.arange(10, 14, 0.5)
xtitle = r"Redshift"
ytitle = r"log$_{10}$ M(z)/M$_{0}$"
linelabel = r"log$_{10}$ M$_{0}$(M$_{sol}$)="
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
colors = cm.rainbow(np.linspace(0, 1, len(zarray)))
for zind, zval in enumerate(zarray):
output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray)
yarray = output[yval].flatten()
# Plot each line in turn with different colour
ax.plot(xarray, np.log10(yarray/zval),
label=linelabel+"{0:.1f}".format(np.log10(zval)),
color=colors[zind],)
leg = ax.legend(loc=3)
# Make box totally transparent
leg.get_frame().set_alpha(0)
leg.get_frame().set_edgecolor('white')
for label in leg.get_texts():
label.set_fontsize('small') # the font size
for label in leg.get_lines():
label.set_linewidth(4) # the legend line width
if plotname:
fig.tight_layout(pad=0.2)
print("Plotting to '%s_MzM0z_relation.png'" % (plotname))
fig.savefig(plotname+"_MzM0z_relation.png", dpi=fig.dpi*5)
else:
plt.show()
return("Done") | Example ways to interrogate the dataset and plot the commah output | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/examples.py#L93-L466 | [
"def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,\n filename=None, verbose=None, retcosmo=None):\n \"\"\" Run commah code on halo of mass 'Mi' at redshift 'zi' with\n accretion and profile history at higher redshifts 'z'\n This is based on Correa et al. (2015a,b,c)\n\n Parameters\n ----------\n cosmology : str or dict\n Can be named cosmology, default WMAP7 (aka DRAGONS), or\n DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15\n or dictionary similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n zi : float / numpy array, optional\n Redshift at which halo has mass 'Mi'. If float then all\n halo masses 'Mi' are assumed to be at this redshift.\n If array but Mi is float, then this halo mass is used across\n all starting redshifts. If both Mi and zi are arrays then they\n have to be the same size for one - to - one correspondence between\n halo mass and the redshift at which it has that mass. Default is 0.\n Mi : float / numpy array, optional\n Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'\n are solved for this halo mass. If array but zi is float, then this\n redshift is applied to all halo masses. If both Mi and zi are\n arrays then they have to be the same size for one - to - one\n correspondence between halo mass and the redshift at which it\n has that mass. Default is 1e12 Msol.\n z : float / numpy array, optional\n Redshift to solve commah code at. Must have zi<z else these steps\n are skipped. Default is False, meaning commah is solved at z=zi\n\n com : bool, optional\n If true then solve for concentration-mass,\n default is True.\n mah : bool, optional\n If true then solve for accretion rate and halo mass history,\n default is True.\n filename : bool / str, optional\n If str is passed this is used as a filename for output of commah\n verbose : bool, optional\n If true then give comments, default is None.\n retcosmo : bool, optional\n Return cosmological parameters used as a dict if retcosmo = True,\n default is None.\n\n Returns\n -------\n dataset : structured dataset\n dataset contains structured columns of size\n (size(Mi) > size(z)) by size(z)\n\n If mah = True and com = False then columns are\n ('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)\n where 'zi' is the starting redshift, 'Mi' is halo mass at zi\n 'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]\n and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive\n at starting redshift 'zi'\n\n If mah = False and com = True then columns are\n ('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)\n where 'zi' is the starting redshift, 'Mi' is halo mass at zi\n 'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo\n at the redshift 'z', 'sig' is the mass variance 'sigma',\n 'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',\n 'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'\n\n If mah = True and com = True then columns are:\n ('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),\n ('c',float),('sig',float),('nu',float),('zf',float)\n\n file : structured dataset with name 'filename' if passed\n\n Raises\n ------\n Output -1\n If com = False and mah = False as user has to select something.\n Output -1\n If 'zi' and 'Mi' are arrays of unequal size. Impossible to match\n corresponding masses and redshifts of output.\n\n Examples\n --------\n Examples should be written in doctest format, and should illustrate how\n to use the function.\n\n >>> import examples\n >>> examples.runcommands() # A series of ways to query structured dataset\n >>> examples.plotcommands() # Examples to plot data\n\n \"\"\"\n\n # Check user choices...\n if not com and not mah:\n print(\"User has to choose com=True and / or mah=True \")\n return(-1)\n\n # Convert arrays / lists to np.array\n # and inflate redshift / mass axis\n # to match each other for later loop\n results = _checkinput(zi, Mi, z=z, verbose=verbose)\n\n # Return if results is -1\n if(results == -1):\n return(-1)\n # If not, unpack the returned iterable\n else:\n zi, Mi, z, lenz, lenm, lenzout = results\n # At this point we will have lenm objects to iterate over\n\n # Get the cosmological parameters for the given cosmology\n cosmo = getcosmo(cosmology)\n\n # Create output file if desired\n if filename:\n print(\"Output to file %r\" % (filename))\n fout = open(filename, 'wb')\n\n # Create the structured dataset\n try:\n if mah and com:\n if verbose:\n print(\"Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, \"\n \"zf\")\n if filename:\n fout.write(_getcosmoheader(cosmo)+'\\n')\n fout.write(\"# Initial z - Initial Halo - Output z - \"\n \" Accretion - Final Halo - concentration - \"\n \" Mass - Peak - Formation z \"+'\\n')\n fout.write(\"# - mass - -\"\n \" rate - mass - - \"\n \" Variance - Height - \"+'\\n')\n fout.write(\"# - (M200) - - \"\n \" (dM/dt) - (M200) - - \"\n \" (sigma) - (nu) - \"+'\\n')\n fout.write(\"# - [Msol] - - \"\n \" [Msol/yr] - [Msol] - - \"\n \" - - \"+'\\n')\n dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),\n ('Mi', float), ('z', float), ('dMdt', float),\n ('Mz', float), ('c', float), ('sig', float),\n ('nu', float), ('zf', float)])\n elif mah:\n if verbose:\n print(\"Output requested is zi, Mi, z, dMdt, Mz\")\n if filename:\n fout.write(_getcosmoheader(cosmo)+'\\n')\n fout.write(\"# Initial z - Initial Halo - Output z -\"\n \" Accretion - Final Halo \"+'\\n')\n fout.write(\"# - mass - -\"\n \" rate - mass \"+'\\n')\n fout.write(\"# - (M200) - -\"\n \" (dm/dt) - (M200) \"+'\\n')\n fout.write(\"# - [Msol] - -\"\n \" [Msol/yr] - [Msol] \"+'\\n')\n dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),\n ('Mi', float), ('z', float),\n ('dMdt', float), ('Mz', float)])\n else:\n if verbose:\n print(\"Output requested is zi, Mi, z, c, sig, nu, zf\")\n if filename:\n fout.write(_getcosmoheader(cosmo)+'\\n')\n fout.write(\"# Initial z - Initial Halo - Output z - \"\n \" concentration - \"\n \" Mass - Peak - Formation z \"+'\\n')\n fout.write(\"# - mass - -\"\n \" -\"\n \" Variance - Height - \"+'\\n')\n fout.write(\"# - (M200) - - \"\n \" - \"\n \" (sigma) - (nu) - \"+'\\n')\n fout.write(\"# - [Msol] - - \"\n \" - \"\n \" - - \"+'\\n')\n dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),\n ('Mi', float), ('z', float), ('c', float),\n ('sig', float), ('nu', float), ('zf', float)])\n\n # Now loop over the combination of initial redshift and halo mamss\n for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):\n if verbose:\n print(\"Output Halo of Mass Mi=%s at zi=%s\" % (Mval, zval))\n # For a given halo mass Mi at redshift zi need to know\n # output redshifts 'z'\n # Check that all requested redshifts are greater than\n # input redshift, except if z is False, in which case\n # only solve z at zi, i.e. remove a loop\n if z is False:\n ztemp = np.array(zval, ndmin=1, dtype=float)\n else:\n ztemp = np.array(z[z >= zval], dtype=float)\n\n # Loop over the output redshifts\n if ztemp.size:\n # Return accretion rates and halo mass progenitors at\n # redshifts 'z' for object of mass Mi at zi\n dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)\n if mah and com:\n # More expensive to return concentrations\n c, sig, nu, zf = COM(ztemp, Mz, **cosmo)\n # Save all arrays\n for j_ind, j_val in enumerate(ztemp):\n dataset[i_ind, j_ind] =\\\n (zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],\n c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])\n if filename:\n fout.write(\n \"{}, {}, {}, {}, {}, {}, {}, {}, {} \\n\".format(\n zval, Mval, ztemp[j_ind], dMdt[j_ind],\n Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],\n zf[j_ind]))\n elif mah:\n # Save only MAH arrays\n for j_ind, j_val in enumerate(ztemp):\n dataset[i_ind, j_ind] =\\\n (zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])\n if filename:\n fout.write(\"{}, {}, {}, {}, {} \\n\".format(\n zval, Mval, ztemp[j_ind], dMdt[j_ind],\n Mz[j_ind]))\n else:\n # Output only COM arrays\n c, sig, nu, zf = COM(ztemp, Mz, **cosmo)\n # For any halo mass Mi at redshift zi\n # solve for c, sig, nu and zf\n for j_ind, j_val in enumerate(ztemp):\n dataset[i_ind, j_ind] =\\\n (zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],\n nu[j_ind], zf[j_ind])\n if filename:\n fout.write(\"{}, {}, {}, {}, {}, {}, {} \\n\".format(\n zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],\n nu[j_ind], zf[j_ind]))\n\n # Make sure to close the file if it was opened\n finally:\n fout.close() if filename else None\n\n if retcosmo:\n return(dataset, cosmo)\n else:\n return(dataset)\n",
"def getcosmo(cosmology):\n \"\"\" Find cosmological parameters for named cosmo in cosmology.py list \"\"\"\n\n defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),\n 'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),\n 'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),\n 'wmap1_lss': cg.WMAP1_2dF_mean(),\n 'wmap3_mean': cg.WMAP3_mean(),\n 'wmap5_ml': cg.WMAP5_ML(),\n 'wmap5_lss': cg.WMAP5_BAO_SN_mean(),\n 'wmap7_lss': cg.WMAP7_BAO_H0_mean(),\n 'planck13': cg.Planck_2013(),\n 'planck15': cg.Planck_2015()}\n\n if isinstance(cosmology, dict):\n # User providing their own variables\n cosmo = cosmology\n if 'A_scaling' not in cosmology.keys():\n A_scaling = getAscaling(cosmology, newcosmo=True)\n cosmo.update({'A_scaling': A_scaling})\n\n # Add extra variables by hand that cosmolopy requires\n # note that they aren't used (set to zero)\n for paramnames in cg.WMAP5_mean().keys():\n if paramnames not in cosmology.keys():\n cosmo.update({paramnames: 0})\n elif cosmology.lower() in defaultcosmologies.keys():\n # Load by name of cosmology instead\n cosmo = defaultcosmologies[cosmology.lower()]\n A_scaling = getAscaling(cosmology)\n cosmo.update({'A_scaling': A_scaling})\n else:\n print(\"You haven't passed a dict of cosmological parameters \")\n print(\"OR a recognised cosmology, you gave %s\" % (cosmology))\n # No idea why this has to be done by hand but should be O_k = 0\n cosmo = cp.distance.set_omega_k_0(cosmo)\n\n # Use the cosmology as **cosmo passed to cosmolopy routines\n return(cosmo)\n",
"def cduffy(z, M, vir='200crit', relaxed=True):\n \"\"\" NFW conc from Duffy 08 Table 1 for halo mass and redshift\"\"\"\n\n if(vir == '200crit'):\n if relaxed:\n params = [6.71, -0.091, -0.44]\n else:\n params = [5.71, -0.084, -0.47]\n elif(vir == 'tophat'):\n if relaxed:\n params = [9.23, -0.090, -0.69]\n else:\n params = [7.85, -0.081, -0.71]\n elif(vir == '200mean'):\n if relaxed:\n params = [11.93, -0.090, -0.99]\n else:\n params = [10.14, -0.081, -1.01]\n else:\n print(\"Didn't recognise the halo boundary definition provided %s\"\n % (vir))\n\n return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))\n"
] | from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import commah
def runcommand(cosmology='WMAP5'):
""" Example interface commands """
# Return the WMAP5 cosmology concentration predicted for
# z=0 range of masses
Mi = [1e8, 1e9, 1e10]
zi = 0
print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi))
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)
print(output['c'].flatten())
# Return the WMAP5 cosmology concentration predicted for
# z=0 range of masses AND cosmological parameters
Mi = [1e8, 1e9, 1e10]
zi = 0
print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi))
output, cosmo = commah.run(cosmology=cosmology, zi=zi, Mi=Mi,
retcosmo=True)
print(output['c'].flatten())
print(cosmo)
# Return the WMAP5 cosmology concentration predicted for MW
# mass (2e12 Msol) across redshift
Mi = 2e12
z = [0, 0.5, 1, 1.5, 2, 2.5]
output = commah.run(cosmology=cosmology, zi=0, Mi=Mi, z=z)
for zval in z:
print("M(z=0)=%s has c(z=%s)=%s"
% (Mi, zval, output[output['z'] == zval]['c'].flatten()))
# Return the WMAP5 cosmology concentration predicted for MW
# mass (2e12 Msol) across redshift
Mi = 2e12
zi = [0, 0.5, 1, 1.5, 2, 2.5]
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)
for zval in zi:
print("M(z=%s)=%s has concentration %s"
% (zval, Mi, output[(output['zi'] == zval) &
(output['z'] == zval)]['c'].flatten()))
# Return the WMAP5 cosmology concentration and
# rarity of high-z cluster
Mi = 2e14
zi = 6
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)
print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi))
print(output['c'].flatten())
print("Mass variance sigma of haloes of mass %s at z=%s" % (Mi, zi))
print(output['sig'].flatten())
print("Fluctuation for haloes of mass %s at z=%s" % (Mi, zi))
print(output['nu'].flatten())
# Return the WMAP5 cosmology accretion rate prediction
# for haloes at range of redshift and mass
Mi = [1e8, 1e9, 1e10]
zi = [0]
z = [0, 0.5, 1, 1.5, 2, 2.5]
output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi, z=z)
for Mval in Mi:
print("dM/dt for halo of mass %s at z=%s across redshift %s is: "
% (Mval, zi, z))
print(output[output['Mi'] == Mval]['dMdt'].flatten())
# Return the WMAP5 cosmology Halo Mass History for haloes with M(z=0) = 1e8
M = [1e8]
z = [0, 0.5, 1, 1.5, 2, 2.5]
print("Halo Mass History for z=0 mass of %s across z=%s" % (M, z))
output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)
print(output['Mz'].flatten())
# Return the WMAP5 cosmology formation redshifts for haloes at
# range of redshift and mass
M = [1e8, 1e9, 1e10]
z = [0]
print("Formation Redshifts for haloes of mass %s at z=%s" % (M, z))
output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)
for Mval in M:
print(output[output['Mi'] == Mval]['zf'].flatten())
return("Done")
|
astroduff/commah | commah/commah.py | _izip | python | def _izip(*iterables):
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators)) | Iterate through multiple lists or arrays of equal size | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L17-L24 | null | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | _checkinput | python | def _checkinput(zi, Mi, z=False, verbose=None):
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout) | Check and convert any input scalar or array to numpy array | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L27-L67 | null | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | getcosmo | python | def getcosmo(cosmology):
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo) | Find cosmological parameters for named cosmo in cosmology.py list | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L70-L108 | [
"def DRAGONS(flat=False, extras=True):\n \"\"\"DRAGONS cosmology assumes WMAP7 + BAO + H_0 mean from\n Komatsu et al. (2011) ApJS 192 18K (arxiv:1001.4538v1)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.2292\n omega_b_0 = 0.0458\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.725,\n 'h': 0.702,\n 'n': 0.963,\n 'sigma_8': 0.816,\n 'tau': 0.088,\n 'z_reion': 10.6,\n 't_0': 13.76,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def WMAP1_Mill(flat=False, extras=True):\n \"\"\"WMAP1 Millennium cosmology\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.206\n omega_b_0 = 0.044\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.75,\n 'h': 0.73,\n 'n': 1.0,\n 'sigma_8': 0.9,\n 'tau': 0.148,\n 'z_reion': 17.,\n 't_0': 13.7,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def WMAP3_ML(flat=False, extras=True):\n \"\"\"WMAP3 Maximum Liklihood from Spergel et al. (2007) ApJS 170 377-408\n (arXiv:astro-ph/0603449)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.1959\n omega_b_0 = 0.0411\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.763,\n 'h': 0.732,\n 'n': 0.954,\n 'sigma_8': 0.756,\n 'tau': 0.091,\n 'z_reion': 11.3,\n 't_0': 13.73,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def WMAP5_mean(flat=False, extras=True):\n \"\"\"WMAP5 parameters (using WMAP data alone) from Komatsu et\n al. (2009ApJS..180..330K).\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n Notes\n -----\n\n Values taken from \"WMAP 5 Year Mean\" of Table 1 of the paper.\n\n \"\"\"\n omega_c_0 = 0.214\n omega_b_0 = 0.044\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.742,\n 'h': 0.719,\n 'n': 0.963,\n 'sigma_8': 0.796,\n 'tau': 0.087,\n 'z_reion': 11.0,\n 't_0': 13.69\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def WMAP7_ML(flat=False, extras=True):\n \"\"\"WMAP7 ML parameters from Komatsu et al. (2011) ApJS 192 18K\n (arxiv:1001.4538v1)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.2175\n omega_b_0 = 0.0445\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.738,\n 'h': 0.714,\n 'n': 0.969,\n 'sigma_8': 0.803,\n 'tau': 0.086,\n 'z_reion': 10.3,\n 't_0': 13.71,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def WMAP9_ML(flat=False, extras=True):\n \"\"\"WMAP Maximum Likelihood from Hinshaw et al. (2013) ApJS 208 19\n (arxiv:1212.5226v3)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.235\n omega_b_0 = 0.0465\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.7185,\n 'h': 0.693,\n 'n': 0.971,\n 'sigma_8': 0.820,\n 'tau': 0.0851,\n 'z_reion': 10.36,\n 't_0': 13.76,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def WMAP1_2dF_mean(flat=False, extras=True):\n \"\"\"WMAP1 with 2dF and ACBAR results from\n Spergel et al. (2003) ApJS 148 175S (arXiv:astro-ph/0302209)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.206\n omega_b_0 = 0.044\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.75,\n 'h': 0.73,\n 'n': 0.97,\n 'sigma_8': 0.9,\n 'tau': 0.148,\n 'z_reion': 17.,\n 't_0': 13.7,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def WMAP3_mean(flat=False, extras=True):\n \"\"\"WMAP3 mean fit from Spergel et al. (2007) ApJS 170 377-408\n (arXiv:astro-ph/0603449)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.196\n omega_b_0 = 0.041\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.763,\n 'h': 0.73,\n 'n': 0.954,\n 'sigma_8': 0.756,\n 'tau': 0.091,\n 'z_reion': 11.3,\n 't_0': 13.73,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def WMAP5_ML(flat=False, extras=True):\n \"\"\"WMAP5 parameters (using WMAP data alone) from Komatsu et\n al. (2009ApJS..180..330K).\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n Notes\n -----\n\n Values taken from \"WMAP 5 Year ML\" column of Table 1 of the paper.\n\n \"\"\"\n omega_c_0 = 0.206\n omega_b_0 = 0.043\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.751,\n 'h': 0.724,\n 'n': 0.961,\n 'sigma_8': 0.787,\n 'tau': 0.089,\n 'z_reion': 11.2,\n 't_0': 13.69\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def WMAP5_BAO_SN_mean(flat=False, extras=True):\n \"\"\"WMAP5 + BAO + SN parameters from Komatsu et al. (2009ApJS..180..330K).\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n Notes\n -----\n\n From the abstract of the paper:\n\n The six parameters and the corresponding 68% uncertainties,\n derived from the WMAP data combined with the distance\n measurements from the Type Ia supernovae (SN) and the Baryon\n Acoustic Oscillations (BAO) in the distribution of galaxies,\n are:\n\n Omega_B h^2 = 0.02267+0.00058-0.00059,\n Omega_c h^2 = 0.1131 +/- 0.0034,\n Omega_Lambda = 0.726 +/- 0.015,\n n_s = 0.960 +/- 0.013,\n tau = 0.084 +/- 0.016, and\n Delata^2 R = (2.445 +/- 0.096) * 10^-9 at k = 0.002 Mpc^-1.\n\n From these, we derive\n\n sigma_8 = 0.812 +/- 0.026,\n H0 = 70.5 +/- 1.3 km s^-11 Mpc^-1,\n Omega_b = 0.0456 +/- 0.0015,\n Omega_c = 0.228 +/- 0.013,\n Omega_m h^2 = 0.1358 + 0.0037 - 0.0036,\n zreion = 10.9 +/- 1.4, and\n t0 = 13.72 +/- 0.12 Gyr\n\n \"\"\"\n omega_c_0 = 0.2284\n omega_b_0 = 0.0456\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.726,\n 'h': 0.706,\n 'n': 0.960,\n 'sigma_8': 0.812,\n 'tau': 0.084,\n 'z_reion': 10.9,\n 't_0': 13.72\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def WMAP7_BAO_H0_mean(flat=False, extras=True):\n \"\"\"WMAP7 + BAO + H_0 parameters from Komatsu et al. (2011) ApJS 192 18K\n (arxiv:1001.4538v1)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.2264 # 0.228\n omega_b_0 = 0.0456 # 0.0456\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.728, # 0.726,\n 'h': 0.704, # 0.706,\n 'n': 0.963, # 0.960,\n 'sigma_8': 0.809, # 0.812,\n 'tau': 0.087, # 0.084,\n 'z_reion': 10.4, # 10.9,\n 't_0': 13.75, # 13.72\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def Planck_2013(flat=False, extras=True):\n \"\"\"Planck 2013 XVI: Scalar Perturbations only (Maximum Likelihood)\n from Ade et al. (2013) A&A 571 16 (arxiv:1303.5076)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.267\n omega_b_0 = 0.05\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.683,\n 'h': 0.671,\n 'n': 0.9624,\n 'sigma_8': 0.82344,\n 'tau': 0.0925,\n 'z_reion': 11.35,\n 't_0': 13.82,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def Planck_2015(flat=False, extras=True):\n \"\"\"Planck 2015 XII: Cosmological parameters Table 4\n column Planck TT, TE, EE + lowP + lensing + ext\n from Ade et al. (2015) A&A in press (arxiv:1502.01589v1)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_b_0 = 0.02230/(0.6774**2)\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': 0.3089,\n 'omega_lambda_0': 0.6911,\n 'h': 0.6774,\n 'n': 0.9667,\n 'sigma_8': 0.8159,\n 'tau': 0.066,\n 'z_reion': 8.8,\n 't_0': 13.799,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo\n",
"def getAscaling(cosmology, newcosmo=None):\n \"\"\" Returns the normalisation constant between\n Rho_-2 and Rho_mean(z_formation) for a given cosmology\n\n Parameters\n ----------\n cosmology : str or dict\n Can be named cosmology, default WMAP7 (aka DRAGONS), or\n DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15\n or dictionary similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n newcosmo : str, optional\n If cosmology is not from predefined list have to perturbation\n A_scaling variable. Defaults to None.\n\n Returns\n -------\n float\n The scaled 'A' relation between rho_2 and rho_crit for the cosmology\n\n \"\"\"\n # Values from Correa 15c\n defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,\n 'wmap5': 887, 'wmap7': 887, 'wmap9': 950,\n 'wmap1_lss': 853, 'wmap3_mean': 850,\n 'wmap5_ml': 887, 'wmap5_lss': 887,\n 'wmap7_lss': 887,\n 'planck13': 880, 'planck15': 880}\n\n if newcosmo:\n # Scale from default WMAP5 cosmology using Correa et al 14b eqn C1\n A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)\n else:\n if cosmology.lower() in defaultcosmologies.keys():\n A_scaling = defaultcosmologies[cosmology.lower()]\n else:\n print(\"Error, don't recognise your cosmology for A_scaling \")\n print(\"You provided %s\" % (cosmology))\n\n return(A_scaling)\n"
] | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | _getcosmoheader | python | def _getcosmoheader(cosmo):
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader) | Output the cosmology to a string for writing to file | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L111-L119 | null | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | cduffy | python | def cduffy(z, M, vir='200crit', relaxed=True):
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2])) | NFW conc from Duffy 08 Table 1 for halo mass and redshift | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L122-L144 | null | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | _delta_sigma | python | def _delta_sigma(**cosmo):
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A) | Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------ | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L147-L172 | null | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | getAscaling | python | def getAscaling(cosmology, newcosmo=None):
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling) | Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L175-L216 | [
"def _delta_sigma(**cosmo):\n \"\"\" Perturb best-fit constant of proportionality Ascaling for\n rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)\n\n Parameters\n ----------\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n float\n The perturbed 'A' relation between rho_2 and rho_crit for the cosmology\n\n Raises\n ------\n\n \"\"\"\n\n M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)\n perturbed_A = (0.796/cosmo['sigma_8']) * \\\n (M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)\n return(perturbed_A)\n"
] | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | _int_growth | python | def _int_growth(z, **cosmo):
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y) | Returns integral of the linear growth factor from z=200 to z=z | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L219-L235 | null | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | _deriv_growth | python | def _deriv_growth(z, **cosmo):
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g) | Returns derivative of the linear growth factor at z
for a given cosmology **cosmo | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L238-L249 | [
"def _int_growth(z, **cosmo):\n \"\"\" Returns integral of the linear growth factor from z=200 to z=z \"\"\"\n\n zmax = 200\n\n if hasattr(z, \"__len__\"):\n for zval in z:\n assert(zval < zmax)\n else:\n assert(z < zmax)\n\n y, yerr = scipy.integrate.quad(\n lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +\n cosmo['omega_lambda_0'])**(1.5),\n z, zmax)\n\n return(y)\n",
"def growthfactor(z, norm=True, **cosmo):\n \"\"\" Returns linear growth factor at a given redshift, normalised to z=0\n by default, for a given cosmology\n\n Parameters\n ----------\n\n z : float or numpy array\n The redshift at which the growth factor should be calculated\n norm : boolean, optional\n If true then normalise the growth factor to z=0 case defaults True\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n float or numpy array\n The growth factor at a range of redshifts 'z'\n\n Raises\n ------\n\n \"\"\"\n H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +\n cosmo['omega_lambda_0'])\n growthval = H * _int_growth(z, **cosmo)\n if norm:\n growthval /= _int_growth(0, **cosmo)\n\n return(growthval)\n"
] | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | growthfactor | python | def growthfactor(z, norm=True, **cosmo):
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval) | Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------ | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L252-L284 | [
"def _int_growth(z, **cosmo):\n \"\"\" Returns integral of the linear growth factor from z=200 to z=z \"\"\"\n\n zmax = 200\n\n if hasattr(z, \"__len__\"):\n for zval in z:\n assert(zval < zmax)\n else:\n assert(z < zmax)\n\n y, yerr = scipy.integrate.quad(\n lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +\n cosmo['omega_lambda_0'])**(1.5),\n z, zmax)\n\n return(y)\n"
] | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | _minimize_c | python | def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2) | Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L287-L312 | null | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z'
"""
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
astroduff/commah | commah/commah.py | formationz | python | def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
rho_2 = 200*(c**3)*Y1/Yc
zf = (((1+z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
return(zf) | Rearrange eqn 18 from Correa et al (2015c) to return
formation redshift for a concentration at a given redshift
Parameters
----------
c : float / numpy array
Concentration of halo
z : float / numpy array
Redshift of halo with concentration c
Ascaling : float
Cosmological dependent scaling between densities, use function
getAscaling('WMAP5') if unsure. Default is 900.
omega_M_0 : float
Mass density of the universe. Default is 0.25
omega_lambda_0 : float
Dark Energy density of the universe. Default is 0.75
Returns
-------
zf : float / numpy array
Formation redshift for halo of concentration 'c' at redshift 'z' | train | https://github.com/astroduff/commah/blob/3ec70338c5123a053c79ddcf2cb3beac26bc9137/commah/commah.py#L315-L346 | null | #!/usr/bin/env ipython
# -*- coding: utf-8 -*-
"""Routine for creating Mass Accretion Histories and NFW profiles."""
from __future__ import absolute_import, division, print_function
import scipy
import numpy as np
import cosmolopy as cp
import commah.cosmology_list as cg
__author__ = 'Camila Correa and Alan Duffy'
__email__ = 'mail@alanrduffy.com'
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators))
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout)
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo)
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader)
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift"""
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A)
def getAscaling(cosmology, newcosmo=None):
""" Returns the normalisation constant between
Rho_-2 and Rho_mean(z_formation) for a given cosmology
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
newcosmo : str, optional
If cosmology is not from predefined list have to perturbation
A_scaling variable. Defaults to None.
Returns
-------
float
The scaled 'A' relation between rho_2 and rho_crit for the cosmology
"""
# Values from Correa 15c
defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,
'wmap5': 887, 'wmap7': 887, 'wmap9': 950,
'wmap1_lss': 853, 'wmap3_mean': 850,
'wmap5_ml': 887, 'wmap5_lss': 887,
'wmap7_lss': 887,
'planck13': 880, 'planck15': 880}
if newcosmo:
# Scale from default WMAP5 cosmology using Correa et al 14b eqn C1
A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)
else:
if cosmology.lower() in defaultcosmologies.keys():
A_scaling = defaultcosmologies[cosmology.lower()]
else:
print("Error, don't recognise your cosmology for A_scaling ")
print("You provided %s" % (cosmology))
return(A_scaling)
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y)
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
"""
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval)
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,
Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):
""" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)
for 1 unknown, i.e. concentration, returned by a minimisation call """
# Fn 1 (LHS of Eqn 18)
Y1 = np.log(2) - 0.5
Yc = np.log(1+c) - c/(1+c)
f1 = Y1/Yc
# Fn 2 (RHS of Eqn 18)
# Eqn 14 - Define the mean inner density
rho_2 = 200 * c**3 * Y1 / Yc
# Eqn 17 rearranged to solve for Formation Redshift
# essentially when universe had rho_2 density
zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *
(rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1
# RHS of Eqn 19
f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)
# LHS - RHS should be zero for the correct concentration
return(f1-f2)
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
"""
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde)
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz)
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array)
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
"""
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array)
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.