repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
vmagamedov/grpclib | grpclib/server.py | Stream.send_initial_metadata | python | async def send_initial_metadata(self, *, metadata=None):
if self._send_initial_metadata_done:
raise ProtocolError('Initial metadata was already sent')
headers = [
(':status', '200'),
('content-type', self._content_type),
]
metadata = MultiDict(metadata or ())
metadata, = await self._dispatch.send_initial_metadata(metadata)
headers.extend(encode_metadata(metadata))
await self._stream.send_headers(headers)
self._send_initial_metadata_done = True | Coroutine to send headers with initial metadata to the client.
In gRPC you can send initial metadata as soon as possible, because
gRPC doesn't use `:status` pseudo header to indicate success or failure
of the current request. gRPC uses trailers for this purpose, and
trailers are sent during :py:meth:`send_trailing_metadata` call, which
should be called in the end.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
:param metadata: custom initial metadata, dict or list of pairs | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/server.py#L99-L126 | [
"def encode_metadata(metadata):\n if hasattr(metadata, 'items'):\n metadata = metadata.items()\n result = []\n for key, value in metadata:\n if key in _SPECIAL or key.startswith('grpc-') or not _KEY_RE.match(key):\n raise ValueError('Invalid metadata key: {!r}'.format(key))\n if key.endswith('-bin'):\n if not isinstance(value, bytes):\n raise TypeError('Invalid metadata value type, bytes expected: '\n '{!r}'.format(value))\n result.append((key, b64encode(value).rstrip(b'=').decode('ascii')))\n else:\n if not isinstance(value, str):\n raise TypeError('Invalid metadata value type, str expected: '\n '{!r}'.format(value))\n if not _VALUE_RE.match(value):\n raise ValueError('Invalid metadata value: {!r}'.format(value))\n result.append((key, value))\n return result\n"
] | class Stream(StreamIterator):
"""
Represents gRPC method call – HTTP/2 request/stream, and everything you
need to communicate with client in order to handle this request.
As you can see, every method handler accepts single positional argument -
stream:
.. code-block:: python3
async def MakeLatte(self, stream: grpclib.server.Stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
This is true for every gRPC method type.
"""
# stream state
_send_initial_metadata_done = False
_send_message_count = 0
_send_trailing_metadata_done = False
_cancel_done = False
def __init__(self, stream, cardinality, recv_type, send_type,
*, codec, dispatch: _DispatchServerEvents, deadline=None):
self._stream = stream
self._cardinality = cardinality
self._recv_type = recv_type
self._send_type = send_type
self._codec = codec
self._dispatch = dispatch
#: :py:class:`~grpclib.metadata.Deadline` of the current request
self.deadline = deadline
#: Invocation metadata, received with headers from the client.
#: Represented as a multi-dict object.
self.metadata = None
@property
def _content_type(self):
return GRPC_CONTENT_TYPE + '+' + self._codec.__content_subtype__
async def recv_message(self):
"""Coroutine to receive incoming message from the client.
If client sends UNARY request, then you can call this coroutine
only once. If client sends STREAM request, then you should call this
coroutine several times, until it returns None. To simplify your code
in this case, :py:class:`Stream` class implements async iteration
protocol, so you can use it like this:
.. code-block:: python3
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python3
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so server will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
message = await recv_message(self._stream, self._codec, self._recv_type)
message, = await self._dispatch.recv_message(message)
return message
async def send_message(self, message, **kwargs):
"""Coroutine to send message to the client.
If server sends UNARY response, then you should call this coroutine only
once. If server sends STREAM response, then you can call this coroutine
as many times as you need.
:param message: message object
"""
if 'end' in kwargs:
warnings.warn('"end" argument is deprecated, use '
'"stream.send_trailing_metadata" explicitly',
stacklevel=2)
end = kwargs.pop('end', False)
assert not kwargs, kwargs
if not self._send_initial_metadata_done:
await self.send_initial_metadata()
if not self._cardinality.server_streaming:
if self._send_message_count:
raise ProtocolError('Server should send exactly one message '
'in response')
message, = await self._dispatch.send_message(message)
await send_message(self._stream, self._codec, message, self._send_type)
self._send_message_count += 1
if end:
await self.send_trailing_metadata()
async def send_trailing_metadata(self, *, status=Status.OK,
status_message=None, metadata=None):
"""Coroutine to send trailers with trailing metadata to the client.
This coroutine allows sending trailers-only responses, in case of some
failure conditions during handling current request, i.e. when
``status is not OK``.
.. note:: This coroutine will be called implicitly at exit from
request handler, with appropriate status code, if not called
explicitly during handler execution.
:param status: resulting status of this coroutine call
:param status_message: description for a status
:param metadata: custom trailing metadata, dict or list of pairs
"""
if self._send_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already sent')
if (
not self._cardinality.server_streaming
and not self._send_message_count
and status is Status.OK
):
raise ProtocolError('Unary response with OK status requires '
'a single message to be sent')
if self._send_initial_metadata_done:
headers = []
else:
# trailers-only response
headers = [(':status', '200')]
headers.append(('grpc-status', str(status.value)))
if status_message is not None:
headers.append(('grpc-message',
encode_grpc_message(status_message)))
metadata = MultiDict(metadata or ())
metadata, = await self._dispatch.send_trailing_metadata(metadata)
headers.extend(encode_metadata(metadata))
await self._stream.send_headers(headers, end_stream=True)
self._send_trailing_metadata_done = True
if status != Status.OK and self._stream.closable:
self._stream.reset_nowait()
async def cancel(self):
"""Coroutine to cancel this request/stream.
Server will send RST_STREAM frame to the client, so it will be
explicitly informed that there is nothing to expect from the server
regarding this request/stream.
"""
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
await self._stream.reset() # TODO: specify error code
self._cancel_done = True
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if (
self._send_trailing_metadata_done
or self._cancel_done
or self._stream._transport.is_closing()
):
# to suppress exception propagation
return True
if exc_val is not None:
if isinstance(exc_val, GRPCError):
status = exc_val.status
status_message = exc_val.message
elif isinstance(exc_val, Exception):
status = Status.UNKNOWN
status_message = 'Internal Server Error'
else:
# propagate exception
return
elif not self._send_message_count:
status = Status.UNKNOWN
status_message = 'Empty response'
else:
status = Status.OK
status_message = None
try:
await self.send_trailing_metadata(status=status,
status_message=status_message)
except h2.exceptions.StreamClosedError:
pass
# to suppress exception propagation
return True
|
vmagamedov/grpclib | grpclib/server.py | Stream.send_message | python | async def send_message(self, message, **kwargs):
if 'end' in kwargs:
warnings.warn('"end" argument is deprecated, use '
'"stream.send_trailing_metadata" explicitly',
stacklevel=2)
end = kwargs.pop('end', False)
assert not kwargs, kwargs
if not self._send_initial_metadata_done:
await self.send_initial_metadata()
if not self._cardinality.server_streaming:
if self._send_message_count:
raise ProtocolError('Server should send exactly one message '
'in response')
message, = await self._dispatch.send_message(message)
await send_message(self._stream, self._codec, message, self._send_type)
self._send_message_count += 1
if end:
await self.send_trailing_metadata() | Coroutine to send message to the client.
If server sends UNARY response, then you should call this coroutine only
once. If server sends STREAM response, then you can call this coroutine
as many times as you need.
:param message: message object | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/server.py#L128-L158 | [
"async def send_message(stream, codec, message, message_type, *, end=False):\n reply_bin = codec.encode(message, message_type)\n reply_data = (struct.pack('?', False)\n + struct.pack('>I', len(reply_bin))\n + reply_bin)\n await stream.send_data(reply_data, end_stream=end)\n",
"async def send_initial_metadata(self, *, metadata=None):\n \"\"\"Coroutine to send headers with initial metadata to the client.\n\n In gRPC you can send initial metadata as soon as possible, because\n gRPC doesn't use `:status` pseudo header to indicate success or failure\n of the current request. gRPC uses trailers for this purpose, and\n trailers are sent during :py:meth:`send_trailing_metadata` call, which\n should be called in the end.\n\n .. note:: This coroutine will be called implicitly during first\n :py:meth:`send_message` coroutine call, if not called before\n explicitly.\n\n :param metadata: custom initial metadata, dict or list of pairs\n \"\"\"\n if self._send_initial_metadata_done:\n raise ProtocolError('Initial metadata was already sent')\n\n headers = [\n (':status', '200'),\n ('content-type', self._content_type),\n ]\n metadata = MultiDict(metadata or ())\n metadata, = await self._dispatch.send_initial_metadata(metadata)\n headers.extend(encode_metadata(metadata))\n\n await self._stream.send_headers(headers)\n self._send_initial_metadata_done = True\n",
"async def send_trailing_metadata(self, *, status=Status.OK,\n status_message=None, metadata=None):\n \"\"\"Coroutine to send trailers with trailing metadata to the client.\n\n This coroutine allows sending trailers-only responses, in case of some\n failure conditions during handling current request, i.e. when\n ``status is not OK``.\n\n .. note:: This coroutine will be called implicitly at exit from\n request handler, with appropriate status code, if not called\n explicitly during handler execution.\n\n :param status: resulting status of this coroutine call\n :param status_message: description for a status\n :param metadata: custom trailing metadata, dict or list of pairs\n \"\"\"\n if self._send_trailing_metadata_done:\n raise ProtocolError('Trailing metadata was already sent')\n\n if (\n not self._cardinality.server_streaming\n and not self._send_message_count\n and status is Status.OK\n ):\n raise ProtocolError('Unary response with OK status requires '\n 'a single message to be sent')\n\n if self._send_initial_metadata_done:\n headers = []\n else:\n # trailers-only response\n headers = [(':status', '200')]\n\n headers.append(('grpc-status', str(status.value)))\n if status_message is not None:\n headers.append(('grpc-message',\n encode_grpc_message(status_message)))\n\n metadata = MultiDict(metadata or ())\n metadata, = await self._dispatch.send_trailing_metadata(metadata)\n headers.extend(encode_metadata(metadata))\n\n await self._stream.send_headers(headers, end_stream=True)\n self._send_trailing_metadata_done = True\n\n if status != Status.OK and self._stream.closable:\n self._stream.reset_nowait()\n"
] | class Stream(StreamIterator):
"""
Represents gRPC method call – HTTP/2 request/stream, and everything you
need to communicate with client in order to handle this request.
As you can see, every method handler accepts single positional argument -
stream:
.. code-block:: python3
async def MakeLatte(self, stream: grpclib.server.Stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
This is true for every gRPC method type.
"""
# stream state
_send_initial_metadata_done = False
_send_message_count = 0
_send_trailing_metadata_done = False
_cancel_done = False
def __init__(self, stream, cardinality, recv_type, send_type,
*, codec, dispatch: _DispatchServerEvents, deadline=None):
self._stream = stream
self._cardinality = cardinality
self._recv_type = recv_type
self._send_type = send_type
self._codec = codec
self._dispatch = dispatch
#: :py:class:`~grpclib.metadata.Deadline` of the current request
self.deadline = deadline
#: Invocation metadata, received with headers from the client.
#: Represented as a multi-dict object.
self.metadata = None
@property
def _content_type(self):
return GRPC_CONTENT_TYPE + '+' + self._codec.__content_subtype__
async def recv_message(self):
"""Coroutine to receive incoming message from the client.
If client sends UNARY request, then you can call this coroutine
only once. If client sends STREAM request, then you should call this
coroutine several times, until it returns None. To simplify your code
in this case, :py:class:`Stream` class implements async iteration
protocol, so you can use it like this:
.. code-block:: python3
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python3
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so server will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
message = await recv_message(self._stream, self._codec, self._recv_type)
message, = await self._dispatch.recv_message(message)
return message
async def send_initial_metadata(self, *, metadata=None):
"""Coroutine to send headers with initial metadata to the client.
In gRPC you can send initial metadata as soon as possible, because
gRPC doesn't use `:status` pseudo header to indicate success or failure
of the current request. gRPC uses trailers for this purpose, and
trailers are sent during :py:meth:`send_trailing_metadata` call, which
should be called in the end.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
:param metadata: custom initial metadata, dict or list of pairs
"""
if self._send_initial_metadata_done:
raise ProtocolError('Initial metadata was already sent')
headers = [
(':status', '200'),
('content-type', self._content_type),
]
metadata = MultiDict(metadata or ())
metadata, = await self._dispatch.send_initial_metadata(metadata)
headers.extend(encode_metadata(metadata))
await self._stream.send_headers(headers)
self._send_initial_metadata_done = True
async def send_trailing_metadata(self, *, status=Status.OK,
status_message=None, metadata=None):
"""Coroutine to send trailers with trailing metadata to the client.
This coroutine allows sending trailers-only responses, in case of some
failure conditions during handling current request, i.e. when
``status is not OK``.
.. note:: This coroutine will be called implicitly at exit from
request handler, with appropriate status code, if not called
explicitly during handler execution.
:param status: resulting status of this coroutine call
:param status_message: description for a status
:param metadata: custom trailing metadata, dict or list of pairs
"""
if self._send_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already sent')
if (
not self._cardinality.server_streaming
and not self._send_message_count
and status is Status.OK
):
raise ProtocolError('Unary response with OK status requires '
'a single message to be sent')
if self._send_initial_metadata_done:
headers = []
else:
# trailers-only response
headers = [(':status', '200')]
headers.append(('grpc-status', str(status.value)))
if status_message is not None:
headers.append(('grpc-message',
encode_grpc_message(status_message)))
metadata = MultiDict(metadata or ())
metadata, = await self._dispatch.send_trailing_metadata(metadata)
headers.extend(encode_metadata(metadata))
await self._stream.send_headers(headers, end_stream=True)
self._send_trailing_metadata_done = True
if status != Status.OK and self._stream.closable:
self._stream.reset_nowait()
async def cancel(self):
"""Coroutine to cancel this request/stream.
Server will send RST_STREAM frame to the client, so it will be
explicitly informed that there is nothing to expect from the server
regarding this request/stream.
"""
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
await self._stream.reset() # TODO: specify error code
self._cancel_done = True
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if (
self._send_trailing_metadata_done
or self._cancel_done
or self._stream._transport.is_closing()
):
# to suppress exception propagation
return True
if exc_val is not None:
if isinstance(exc_val, GRPCError):
status = exc_val.status
status_message = exc_val.message
elif isinstance(exc_val, Exception):
status = Status.UNKNOWN
status_message = 'Internal Server Error'
else:
# propagate exception
return
elif not self._send_message_count:
status = Status.UNKNOWN
status_message = 'Empty response'
else:
status = Status.OK
status_message = None
try:
await self.send_trailing_metadata(status=status,
status_message=status_message)
except h2.exceptions.StreamClosedError:
pass
# to suppress exception propagation
return True
|
vmagamedov/grpclib | grpclib/server.py | Stream.send_trailing_metadata | python | async def send_trailing_metadata(self, *, status=Status.OK,
status_message=None, metadata=None):
if self._send_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already sent')
if (
not self._cardinality.server_streaming
and not self._send_message_count
and status is Status.OK
):
raise ProtocolError('Unary response with OK status requires '
'a single message to be sent')
if self._send_initial_metadata_done:
headers = []
else:
# trailers-only response
headers = [(':status', '200')]
headers.append(('grpc-status', str(status.value)))
if status_message is not None:
headers.append(('grpc-message',
encode_grpc_message(status_message)))
metadata = MultiDict(metadata or ())
metadata, = await self._dispatch.send_trailing_metadata(metadata)
headers.extend(encode_metadata(metadata))
await self._stream.send_headers(headers, end_stream=True)
self._send_trailing_metadata_done = True
if status != Status.OK and self._stream.closable:
self._stream.reset_nowait() | Coroutine to send trailers with trailing metadata to the client.
This coroutine allows sending trailers-only responses, in case of some
failure conditions during handling current request, i.e. when
``status is not OK``.
.. note:: This coroutine will be called implicitly at exit from
request handler, with appropriate status code, if not called
explicitly during handler execution.
:param status: resulting status of this coroutine call
:param status_message: description for a status
:param metadata: custom trailing metadata, dict or list of pairs | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/server.py#L160-L206 | [
"def encode_metadata(metadata):\n if hasattr(metadata, 'items'):\n metadata = metadata.items()\n result = []\n for key, value in metadata:\n if key in _SPECIAL or key.startswith('grpc-') or not _KEY_RE.match(key):\n raise ValueError('Invalid metadata key: {!r}'.format(key))\n if key.endswith('-bin'):\n if not isinstance(value, bytes):\n raise TypeError('Invalid metadata value type, bytes expected: '\n '{!r}'.format(value))\n result.append((key, b64encode(value).rstrip(b'=').decode('ascii')))\n else:\n if not isinstance(value, str):\n raise TypeError('Invalid metadata value type, str expected: '\n '{!r}'.format(value))\n if not _VALUE_RE.match(value):\n raise ValueError('Invalid metadata value: {!r}'.format(value))\n result.append((key, value))\n return result\n",
"def encode_grpc_message(message: str) -> str:\n return quote(message, safe=_UNQUOTED, encoding='utf-8')\n"
] | class Stream(StreamIterator):
"""
Represents gRPC method call – HTTP/2 request/stream, and everything you
need to communicate with client in order to handle this request.
As you can see, every method handler accepts single positional argument -
stream:
.. code-block:: python3
async def MakeLatte(self, stream: grpclib.server.Stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
This is true for every gRPC method type.
"""
# stream state
_send_initial_metadata_done = False
_send_message_count = 0
_send_trailing_metadata_done = False
_cancel_done = False
def __init__(self, stream, cardinality, recv_type, send_type,
*, codec, dispatch: _DispatchServerEvents, deadline=None):
self._stream = stream
self._cardinality = cardinality
self._recv_type = recv_type
self._send_type = send_type
self._codec = codec
self._dispatch = dispatch
#: :py:class:`~grpclib.metadata.Deadline` of the current request
self.deadline = deadline
#: Invocation metadata, received with headers from the client.
#: Represented as a multi-dict object.
self.metadata = None
@property
def _content_type(self):
return GRPC_CONTENT_TYPE + '+' + self._codec.__content_subtype__
async def recv_message(self):
"""Coroutine to receive incoming message from the client.
If client sends UNARY request, then you can call this coroutine
only once. If client sends STREAM request, then you should call this
coroutine several times, until it returns None. To simplify your code
in this case, :py:class:`Stream` class implements async iteration
protocol, so you can use it like this:
.. code-block:: python3
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python3
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so server will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
message = await recv_message(self._stream, self._codec, self._recv_type)
message, = await self._dispatch.recv_message(message)
return message
async def send_initial_metadata(self, *, metadata=None):
"""Coroutine to send headers with initial metadata to the client.
In gRPC you can send initial metadata as soon as possible, because
gRPC doesn't use `:status` pseudo header to indicate success or failure
of the current request. gRPC uses trailers for this purpose, and
trailers are sent during :py:meth:`send_trailing_metadata` call, which
should be called in the end.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
:param metadata: custom initial metadata, dict or list of pairs
"""
if self._send_initial_metadata_done:
raise ProtocolError('Initial metadata was already sent')
headers = [
(':status', '200'),
('content-type', self._content_type),
]
metadata = MultiDict(metadata or ())
metadata, = await self._dispatch.send_initial_metadata(metadata)
headers.extend(encode_metadata(metadata))
await self._stream.send_headers(headers)
self._send_initial_metadata_done = True
async def send_message(self, message, **kwargs):
"""Coroutine to send message to the client.
If server sends UNARY response, then you should call this coroutine only
once. If server sends STREAM response, then you can call this coroutine
as many times as you need.
:param message: message object
"""
if 'end' in kwargs:
warnings.warn('"end" argument is deprecated, use '
'"stream.send_trailing_metadata" explicitly',
stacklevel=2)
end = kwargs.pop('end', False)
assert not kwargs, kwargs
if not self._send_initial_metadata_done:
await self.send_initial_metadata()
if not self._cardinality.server_streaming:
if self._send_message_count:
raise ProtocolError('Server should send exactly one message '
'in response')
message, = await self._dispatch.send_message(message)
await send_message(self._stream, self._codec, message, self._send_type)
self._send_message_count += 1
if end:
await self.send_trailing_metadata()
async def cancel(self):
"""Coroutine to cancel this request/stream.
Server will send RST_STREAM frame to the client, so it will be
explicitly informed that there is nothing to expect from the server
regarding this request/stream.
"""
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
await self._stream.reset() # TODO: specify error code
self._cancel_done = True
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if (
self._send_trailing_metadata_done
or self._cancel_done
or self._stream._transport.is_closing()
):
# to suppress exception propagation
return True
if exc_val is not None:
if isinstance(exc_val, GRPCError):
status = exc_val.status
status_message = exc_val.message
elif isinstance(exc_val, Exception):
status = Status.UNKNOWN
status_message = 'Internal Server Error'
else:
# propagate exception
return
elif not self._send_message_count:
status = Status.UNKNOWN
status_message = 'Empty response'
else:
status = Status.OK
status_message = None
try:
await self.send_trailing_metadata(status=status,
status_message=status_message)
except h2.exceptions.StreamClosedError:
pass
# to suppress exception propagation
return True
|
vmagamedov/grpclib | grpclib/server.py | Server.start | python | async def start(self, host=None, port=None, *, path=None,
family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
sock=None, backlog=100, ssl=None, reuse_address=None,
reuse_port=None):
if path is not None and (host is not None or port is not None):
raise ValueError("The 'path' parameter can not be used with the "
"'host' or 'port' parameters.")
if self._server is not None:
raise RuntimeError('Server is already started')
if path is not None:
self._server = await self._loop.create_unix_server(
self._protocol_factory, path, sock=sock, backlog=backlog,
ssl=ssl
)
else:
self._server = await self._loop.create_server(
self._protocol_factory, host, port,
family=family, flags=flags, sock=sock, backlog=backlog, ssl=ssl,
reuse_address=reuse_address, reuse_port=reuse_port
) | Coroutine to start the server.
:param host: can be a string, containing IPv4/v6 address or domain name.
If host is None, server will be bound to all available interfaces.
:param port: port number.
:param path: UNIX domain socket path. If specified, host and port should
be omitted (must be None).
:param family: can be set to either :py:data:`python:socket.AF_INET` or
:py:data:`python:socket.AF_INET6` to force the socket to use IPv4 or
IPv6. If not set it will be determined from host.
:param flags: is a bitmask for
:py:meth:`~python:asyncio.AbstractEventLoop.getaddrinfo`.
:param sock: sock can optionally be specified in order to use a
preexisting socket object. If specified, host and port should be
omitted (must be None).
:param backlog: is the maximum number of queued connections passed to
listen().
:param ssl: can be set to an :py:class:`~python:ssl.SSLContext`
to enable SSL over the accepted connections.
:param reuse_address: tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to expire.
:param reuse_port: tells the kernel to allow this endpoint to be bound
to the same port as other existing endpoints are bound to,
so long as they all set this flag when being created. | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/server.py#L473-L529 | null | class Server(_GC, asyncio.AbstractServer):
"""
HTTP/2 server, which uses gRPC service handlers to handle requests.
Handler is a subclass of the abstract base class, which was generated
from .proto file:
.. code-block:: python3
class CoffeeMachine(cafe_grpc.CoffeeMachineBase):
async def MakeLatte(self, stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
server = Server([CoffeeMachine()], loop=loop)
"""
__gc_interval__ = 10
def __init__(self, handlers, *, loop, codec=None):
"""
:param handlers: list of handlers
:param loop: asyncio-compatible event loop
"""
mapping = {}
for handler in handlers:
mapping.update(handler.__mapping__())
self._mapping = mapping
self._loop = loop
self._codec = codec or ProtoCodec()
self._config = h2.config.H2Configuration(
client_side=False,
header_encoding='ascii',
)
self._server = None
self._handlers = set()
self.__dispatch__ = _DispatchServerEvents()
def __gc_collect__(self):
self._handlers = {h for h in self._handlers
if not (h.closing and h.check_closed())}
def _protocol_factory(self):
self.__gc_step__()
handler = Handler(self._mapping, self._codec, self.__dispatch__,
loop=self._loop)
self._handlers.add(handler)
return H2Protocol(handler, self._config, loop=self._loop)
def close(self):
"""Stops accepting new connections, cancels all currently running
requests. Request handlers are able to handle `CancelledError` and
exit properly.
"""
if self._server is None:
raise RuntimeError('Server is not started')
self._server.close()
for handler in self._handlers:
handler.close()
async def wait_closed(self):
"""Coroutine to wait until all existing request handlers will exit
properly.
"""
if self._server is None:
raise RuntimeError('Server is not started')
await self._server.wait_closed()
if self._handlers:
await asyncio.wait({h.wait_closed() for h in self._handlers},
loop=self._loop)
|
vmagamedov/grpclib | grpclib/server.py | Server.close | python | def close(self):
if self._server is None:
raise RuntimeError('Server is not started')
self._server.close()
for handler in self._handlers:
handler.close() | Stops accepting new connections, cancels all currently running
requests. Request handlers are able to handle `CancelledError` and
exit properly. | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/server.py#L531-L540 | null | class Server(_GC, asyncio.AbstractServer):
"""
HTTP/2 server, which uses gRPC service handlers to handle requests.
Handler is a subclass of the abstract base class, which was generated
from .proto file:
.. code-block:: python3
class CoffeeMachine(cafe_grpc.CoffeeMachineBase):
async def MakeLatte(self, stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
server = Server([CoffeeMachine()], loop=loop)
"""
__gc_interval__ = 10
def __init__(self, handlers, *, loop, codec=None):
"""
:param handlers: list of handlers
:param loop: asyncio-compatible event loop
"""
mapping = {}
for handler in handlers:
mapping.update(handler.__mapping__())
self._mapping = mapping
self._loop = loop
self._codec = codec or ProtoCodec()
self._config = h2.config.H2Configuration(
client_side=False,
header_encoding='ascii',
)
self._server = None
self._handlers = set()
self.__dispatch__ = _DispatchServerEvents()
def __gc_collect__(self):
self._handlers = {h for h in self._handlers
if not (h.closing and h.check_closed())}
def _protocol_factory(self):
self.__gc_step__()
handler = Handler(self._mapping, self._codec, self.__dispatch__,
loop=self._loop)
self._handlers.add(handler)
return H2Protocol(handler, self._config, loop=self._loop)
async def start(self, host=None, port=None, *, path=None,
family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
sock=None, backlog=100, ssl=None, reuse_address=None,
reuse_port=None):
"""Coroutine to start the server.
:param host: can be a string, containing IPv4/v6 address or domain name.
If host is None, server will be bound to all available interfaces.
:param port: port number.
:param path: UNIX domain socket path. If specified, host and port should
be omitted (must be None).
:param family: can be set to either :py:data:`python:socket.AF_INET` or
:py:data:`python:socket.AF_INET6` to force the socket to use IPv4 or
IPv6. If not set it will be determined from host.
:param flags: is a bitmask for
:py:meth:`~python:asyncio.AbstractEventLoop.getaddrinfo`.
:param sock: sock can optionally be specified in order to use a
preexisting socket object. If specified, host and port should be
omitted (must be None).
:param backlog: is the maximum number of queued connections passed to
listen().
:param ssl: can be set to an :py:class:`~python:ssl.SSLContext`
to enable SSL over the accepted connections.
:param reuse_address: tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to expire.
:param reuse_port: tells the kernel to allow this endpoint to be bound
to the same port as other existing endpoints are bound to,
so long as they all set this flag when being created.
"""
if path is not None and (host is not None or port is not None):
raise ValueError("The 'path' parameter can not be used with the "
"'host' or 'port' parameters.")
if self._server is not None:
raise RuntimeError('Server is already started')
if path is not None:
self._server = await self._loop.create_unix_server(
self._protocol_factory, path, sock=sock, backlog=backlog,
ssl=ssl
)
else:
self._server = await self._loop.create_server(
self._protocol_factory, host, port,
family=family, flags=flags, sock=sock, backlog=backlog, ssl=ssl,
reuse_address=reuse_address, reuse_port=reuse_port
)
async def wait_closed(self):
"""Coroutine to wait until all existing request handlers will exit
properly.
"""
if self._server is None:
raise RuntimeError('Server is not started')
await self._server.wait_closed()
if self._handlers:
await asyncio.wait({h.wait_closed() for h in self._handlers},
loop=self._loop)
|
vmagamedov/grpclib | grpclib/server.py | Server.wait_closed | python | async def wait_closed(self):
if self._server is None:
raise RuntimeError('Server is not started')
await self._server.wait_closed()
if self._handlers:
await asyncio.wait({h.wait_closed() for h in self._handlers},
loop=self._loop) | Coroutine to wait until all existing request handlers will exit
properly. | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/server.py#L542-L551 | null | class Server(_GC, asyncio.AbstractServer):
"""
HTTP/2 server, which uses gRPC service handlers to handle requests.
Handler is a subclass of the abstract base class, which was generated
from .proto file:
.. code-block:: python3
class CoffeeMachine(cafe_grpc.CoffeeMachineBase):
async def MakeLatte(self, stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
server = Server([CoffeeMachine()], loop=loop)
"""
__gc_interval__ = 10
def __init__(self, handlers, *, loop, codec=None):
"""
:param handlers: list of handlers
:param loop: asyncio-compatible event loop
"""
mapping = {}
for handler in handlers:
mapping.update(handler.__mapping__())
self._mapping = mapping
self._loop = loop
self._codec = codec or ProtoCodec()
self._config = h2.config.H2Configuration(
client_side=False,
header_encoding='ascii',
)
self._server = None
self._handlers = set()
self.__dispatch__ = _DispatchServerEvents()
def __gc_collect__(self):
self._handlers = {h for h in self._handlers
if not (h.closing and h.check_closed())}
def _protocol_factory(self):
self.__gc_step__()
handler = Handler(self._mapping, self._codec, self.__dispatch__,
loop=self._loop)
self._handlers.add(handler)
return H2Protocol(handler, self._config, loop=self._loop)
async def start(self, host=None, port=None, *, path=None,
family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
sock=None, backlog=100, ssl=None, reuse_address=None,
reuse_port=None):
"""Coroutine to start the server.
:param host: can be a string, containing IPv4/v6 address or domain name.
If host is None, server will be bound to all available interfaces.
:param port: port number.
:param path: UNIX domain socket path. If specified, host and port should
be omitted (must be None).
:param family: can be set to either :py:data:`python:socket.AF_INET` or
:py:data:`python:socket.AF_INET6` to force the socket to use IPv4 or
IPv6. If not set it will be determined from host.
:param flags: is a bitmask for
:py:meth:`~python:asyncio.AbstractEventLoop.getaddrinfo`.
:param sock: sock can optionally be specified in order to use a
preexisting socket object. If specified, host and port should be
omitted (must be None).
:param backlog: is the maximum number of queued connections passed to
listen().
:param ssl: can be set to an :py:class:`~python:ssl.SSLContext`
to enable SSL over the accepted connections.
:param reuse_address: tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to expire.
:param reuse_port: tells the kernel to allow this endpoint to be bound
to the same port as other existing endpoints are bound to,
so long as they all set this flag when being created.
"""
if path is not None and (host is not None or port is not None):
raise ValueError("The 'path' parameter can not be used with the "
"'host' or 'port' parameters.")
if self._server is not None:
raise RuntimeError('Server is already started')
if path is not None:
self._server = await self._loop.create_unix_server(
self._protocol_factory, path, sock=sock, backlog=backlog,
ssl=ssl
)
else:
self._server = await self._loop.create_server(
self._protocol_factory, host, port,
family=family, flags=flags, sock=sock, backlog=backlog, ssl=ssl,
reuse_address=reuse_address, reuse_port=reuse_port
)
def close(self):
"""Stops accepting new connections, cancels all currently running
requests. Request handlers are able to handle `CancelledError` and
exit properly.
"""
if self._server is None:
raise RuntimeError('Server is not started')
self._server.close()
for handler in self._handlers:
handler.close()
|
vmagamedov/grpclib | grpclib/client.py | Stream.send_request | python | async def send_request(self):
if self._send_request_done:
raise ProtocolError('Request is already sent')
with self._wrapper:
protocol = await self._channel.__connect__()
stream = protocol.processor.connection\
.create_stream(wrapper=self._wrapper)
headers = [
(':method', 'POST'),
(':scheme', self._channel._scheme),
(':path', self._method_name),
(':authority', self._channel._authority),
]
if self._deadline is not None:
timeout = self._deadline.time_remaining()
headers.append(('grpc-timeout', encode_timeout(timeout)))
content_type = (GRPC_CONTENT_TYPE
+ '+' + self._codec.__content_subtype__)
headers.extend((
('te', 'trailers'),
('content-type', content_type),
('user-agent', USER_AGENT),
))
metadata, = await self._dispatch.send_request(
self._metadata,
method_name=self._method_name,
deadline=self._deadline,
content_type=content_type,
)
headers.extend(encode_metadata(metadata))
release_stream = await stream.send_request(
headers, _processor=protocol.processor,
)
self._stream = stream
self._release_stream = release_stream
self._send_request_done = True | Coroutine to send request headers with metadata to the server.
New HTTP/2 stream will be created during this coroutine call.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly. | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/client.py#L120-L165 | [
"def encode_timeout(timeout: float) -> str:\n if timeout > 10:\n return '{}S'.format(int(timeout))\n elif timeout > 0.01:\n return '{}m'.format(int(timeout * 10 ** 3))\n elif timeout > 0.00001:\n return '{}u'.format(int(timeout * 10 ** 6))\n else:\n return '{}n'.format(int(timeout * 10 ** 9))\n",
"def encode_metadata(metadata):\n if hasattr(metadata, 'items'):\n metadata = metadata.items()\n result = []\n for key, value in metadata:\n if key in _SPECIAL or key.startswith('grpc-') or not _KEY_RE.match(key):\n raise ValueError('Invalid metadata key: {!r}'.format(key))\n if key.endswith('-bin'):\n if not isinstance(value, bytes):\n raise TypeError('Invalid metadata value type, bytes expected: '\n '{!r}'.format(value))\n result.append((key, b64encode(value).rstrip(b'=').decode('ascii')))\n else:\n if not isinstance(value, str):\n raise TypeError('Invalid metadata value type, str expected: '\n '{!r}'.format(value))\n if not _VALUE_RE.match(value):\n raise ValueError('Invalid metadata value: {!r}'.format(value))\n result.append((key, value))\n return result\n",
"async def send_request(self, metadata, *, method_name, deadline,\n content_type):\n return await self.__dispatch__(SendRequest(\n metadata=metadata,\n method_name=method_name,\n deadline=deadline,\n content_type=content_type,\n ))\n"
] | class Stream(StreamIterator):
"""
Represents gRPC method call - HTTP/2 request/stream, and everything you
need to communicate with server in order to get response.
In order to work directly with stream, you should
:py:meth:`ServiceMethod.open` request like this:
.. code-block:: python3
request = cafe_pb2.LatteOrder(
size=cafe_pb2.SMALL,
temperature=70,
sugar=3,
)
async with client.MakeLatte.open() as stream:
await stream.send_message(request, end=True)
reply: empty_pb2.Empty = await stream.recv_message()
"""
# stream state
_send_request_done = False
_send_message_count = 0
_end_done = False
_recv_initial_metadata_done = False
_recv_message_count = 0
_recv_trailing_metadata_done = False
_cancel_done = False
_stream = None
_release_stream = None
_wrapper = None
_wrapper_ctx = None
#: This property contains initial metadata, received with headers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_initial_metadata` coroutine succeeds.
initial_metadata = None
#: This property contains trailing metadata, received with trailers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_trailing_metadata` coroutine succeeds.
trailing_metadata = None
def __init__(self, channel, method_name, metadata, cardinality, send_type,
recv_type, *, codec, dispatch: _DispatchChannelEvents,
deadline=None):
self._channel = channel
self._method_name = method_name
self._metadata = metadata
self._cardinality = cardinality
self._send_type = send_type
self._recv_type = recv_type
self._codec = codec
self._dispatch = dispatch
self._deadline = deadline
async def send_message(self, message, *, end=False):
"""Coroutine to send message to the server.
If client sends UNARY request, then you should call this coroutine only
once. If client sends STREAM request, then you can call this coroutine
as many times as you need.
.. warning:: It is important to finally end stream from the client-side
when you finished sending messages.
You can do this in two ways:
- specify ``end=True`` argument while sending last message - and last
DATA frame will include END_STREAM flag;
- call :py:meth:`end` coroutine after sending last message - and extra
HEADERS frame with END_STREAM flag will be sent.
First approach is preferred, because it doesn't require sending
additional HTTP/2 frame.
"""
if not self._send_request_done:
await self.send_request()
if end and self._end_done:
raise ProtocolError('Stream was already ended')
with self._wrapper:
message, = await self._dispatch.send_message(message)
await send_message(self._stream, self._codec, message,
self._send_type, end=end)
self._send_message_count += 1
if end:
self._end_done = True
async def end(self):
"""Coroutine to end stream from the client-side.
It should be used to finally end stream from the client-side when we're
finished sending messages to the server and stream wasn't closed with
last DATA frame. See :py:meth:`send_message` for more details.
HTTP/2 stream will have half-closed (local) state after this coroutine
call.
"""
if self._end_done:
raise ProtocolError('Stream was already ended')
if (
not self._cardinality.client_streaming
and not self._send_message_count
):
raise ProtocolError('Unary request requires a single message '
'to be sent')
await self._stream.end()
self._end_done = True
def _raise_for_status(self, headers_map):
status = headers_map[':status']
if status is not None and status != _H2_OK:
grpc_status = _H2_TO_GRPC_STATUS_MAP.get(status, Status.UNKNOWN)
raise GRPCError(grpc_status,
'Received :status = {!r}'.format(status))
def _raise_for_grpc_status(self, headers_map, *, optional=False):
grpc_status = headers_map.get('grpc-status')
if grpc_status is None:
if optional:
return
else:
raise GRPCError(Status.UNKNOWN, 'Missing grpc-status header')
try:
grpc_status_enum = Status(int(grpc_status))
except ValueError:
raise GRPCError(Status.UNKNOWN,
'Invalid grpc-status: {!r}'
.format(grpc_status))
else:
if grpc_status_enum is not Status.OK:
status_message = headers_map.get('grpc-message')
if status_message is not None:
status_message = decode_grpc_message(status_message)
raise GRPCError(grpc_status_enum, status_message)
async def recv_initial_metadata(self):
"""Coroutine to wait for headers with initial metadata from the server.
.. note:: This coroutine will be called implicitly during first
:py:meth:`recv_message` coroutine call, if not called before
explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers-only
response.
When this coroutine finishes, you can access received initial metadata
by using :py:attr:`initial_metadata` attribute.
"""
if not self._send_request_done:
raise ProtocolError('Request was not sent yet')
if self._recv_initial_metadata_done:
raise ProtocolError('Initial metadata was already received')
try:
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_initial_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_initial_metadata(metadata)
self.initial_metadata = metadata
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
content_type = headers_map.get('content-type')
if content_type is None:
raise GRPCError(Status.UNKNOWN,
'Missing content-type header')
base_content_type, _, sub_type = content_type.partition('+')
sub_type = sub_type or ProtoCodec.__content_subtype__
if (
base_content_type != GRPC_CONTENT_TYPE
or sub_type != self._codec.__content_subtype__
):
raise GRPCError(Status.UNKNOWN,
'Invalid content-type: {!r}'
.format(content_type))
except StreamTerminatedError:
# Server can send RST_STREAM frame right after sending trailers-only
# response, so we have to check received headers and probably raise
# more descriptive error
headers = self._stream.recv_headers_nowait()
if headers is None:
raise
else:
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
# If there are no errors in the headers, just reraise original
# StreamTerminatedError
raise
async def recv_message(self):
"""Coroutine to receive incoming message from the server.
If server sends UNARY response, then you can call this coroutine only
once. If server sends STREAM response, then you should call this
coroutine several times, until it returns None. To simplify you code in
this case, :py:class:`Stream` implements async iterations protocol, so
you can use it like this:
.. code-block:: python3
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python3
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so client will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
# TODO: check that messages were sent for non-stream-stream requests
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
with self._wrapper:
message = await recv_message(self._stream, self._codec,
self._recv_type)
self._recv_message_count += 1
message, = await self._dispatch.recv_message(message)
return message
async def recv_trailing_metadata(self):
"""Coroutine to wait for trailers with trailing metadata from the
server.
.. note:: This coroutine will be called implicitly at exit from
this call (context manager's exit), if not called before explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers.
When this coroutine finishes, you can access received trailing metadata
by using :py:attr:`trailing_metadata` attribute.
"""
if not self._end_done:
raise ProtocolError('Outgoing stream was not ended')
if (
not self._cardinality.server_streaming
and not self._recv_message_count
):
raise ProtocolError('No messages were received before waiting '
'for trailing metadata')
if self._recv_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already received')
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_trailing_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_trailing_metadata(metadata)
self.trailing_metadata = metadata
self._raise_for_grpc_status(dict(headers))
async def cancel(self):
"""Coroutine to cancel this request/stream.
Client will send RST_STREAM frame to the server, so it will be
explicitly informed that there is nothing to expect from the client
regarding this request/stream.
"""
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
with self._wrapper:
await self._stream.reset() # TODO: specify error code
self._cancel_done = True
async def __aenter__(self):
if self._deadline is None:
self._wrapper = Wrapper()
else:
self._wrapper = DeadlineWrapper()
self._wrapper_ctx = self._wrapper.start(self._deadline)
self._wrapper_ctx.__enter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if not self._send_request_done:
return
try:
if (
not exc_type
and not self._cancel_done
and not self._stream._transport.is_closing()
):
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
if not self._recv_trailing_metadata_done:
await self.recv_trailing_metadata()
finally:
if self._stream.closable:
self._stream.reset_nowait()
self._release_stream()
if self._wrapper_ctx is not None:
self._wrapper_ctx.__exit__(exc_type, exc_val, exc_tb)
|
vmagamedov/grpclib | grpclib/client.py | Stream.send_message | python | async def send_message(self, message, *, end=False):
if not self._send_request_done:
await self.send_request()
if end and self._end_done:
raise ProtocolError('Stream was already ended')
with self._wrapper:
message, = await self._dispatch.send_message(message)
await send_message(self._stream, self._codec, message,
self._send_type, end=end)
self._send_message_count += 1
if end:
self._end_done = True | Coroutine to send message to the server.
If client sends UNARY request, then you should call this coroutine only
once. If client sends STREAM request, then you can call this coroutine
as many times as you need.
.. warning:: It is important to finally end stream from the client-side
when you finished sending messages.
You can do this in two ways:
- specify ``end=True`` argument while sending last message - and last
DATA frame will include END_STREAM flag;
- call :py:meth:`end` coroutine after sending last message - and extra
HEADERS frame with END_STREAM flag will be sent.
First approach is preferred, because it doesn't require sending
additional HTTP/2 frame. | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/client.py#L167-L199 | [
"async def send_message(stream, codec, message, message_type, *, end=False):\n reply_bin = codec.encode(message, message_type)\n reply_data = (struct.pack('?', False)\n + struct.pack('>I', len(reply_bin))\n + reply_bin)\n await stream.send_data(reply_data, end_stream=end)\n",
"async def send_request(self):\n \"\"\"Coroutine to send request headers with metadata to the server.\n\n New HTTP/2 stream will be created during this coroutine call.\n\n .. note:: This coroutine will be called implicitly during first\n :py:meth:`send_message` coroutine call, if not called before\n explicitly.\n \"\"\"\n if self._send_request_done:\n raise ProtocolError('Request is already sent')\n\n with self._wrapper:\n protocol = await self._channel.__connect__()\n stream = protocol.processor.connection\\\n .create_stream(wrapper=self._wrapper)\n\n headers = [\n (':method', 'POST'),\n (':scheme', self._channel._scheme),\n (':path', self._method_name),\n (':authority', self._channel._authority),\n ]\n if self._deadline is not None:\n timeout = self._deadline.time_remaining()\n headers.append(('grpc-timeout', encode_timeout(timeout)))\n content_type = (GRPC_CONTENT_TYPE\n + '+' + self._codec.__content_subtype__)\n headers.extend((\n ('te', 'trailers'),\n ('content-type', content_type),\n ('user-agent', USER_AGENT),\n ))\n metadata, = await self._dispatch.send_request(\n self._metadata,\n method_name=self._method_name,\n deadline=self._deadline,\n content_type=content_type,\n )\n headers.extend(encode_metadata(metadata))\n release_stream = await stream.send_request(\n headers, _processor=protocol.processor,\n )\n self._stream = stream\n self._release_stream = release_stream\n self._send_request_done = True\n"
] | class Stream(StreamIterator):
"""
Represents gRPC method call - HTTP/2 request/stream, and everything you
need to communicate with server in order to get response.
In order to work directly with stream, you should
:py:meth:`ServiceMethod.open` request like this:
.. code-block:: python3
request = cafe_pb2.LatteOrder(
size=cafe_pb2.SMALL,
temperature=70,
sugar=3,
)
async with client.MakeLatte.open() as stream:
await stream.send_message(request, end=True)
reply: empty_pb2.Empty = await stream.recv_message()
"""
# stream state
_send_request_done = False
_send_message_count = 0
_end_done = False
_recv_initial_metadata_done = False
_recv_message_count = 0
_recv_trailing_metadata_done = False
_cancel_done = False
_stream = None
_release_stream = None
_wrapper = None
_wrapper_ctx = None
#: This property contains initial metadata, received with headers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_initial_metadata` coroutine succeeds.
initial_metadata = None
#: This property contains trailing metadata, received with trailers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_trailing_metadata` coroutine succeeds.
trailing_metadata = None
def __init__(self, channel, method_name, metadata, cardinality, send_type,
recv_type, *, codec, dispatch: _DispatchChannelEvents,
deadline=None):
self._channel = channel
self._method_name = method_name
self._metadata = metadata
self._cardinality = cardinality
self._send_type = send_type
self._recv_type = recv_type
self._codec = codec
self._dispatch = dispatch
self._deadline = deadline
async def send_request(self):
"""Coroutine to send request headers with metadata to the server.
New HTTP/2 stream will be created during this coroutine call.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
"""
if self._send_request_done:
raise ProtocolError('Request is already sent')
with self._wrapper:
protocol = await self._channel.__connect__()
stream = protocol.processor.connection\
.create_stream(wrapper=self._wrapper)
headers = [
(':method', 'POST'),
(':scheme', self._channel._scheme),
(':path', self._method_name),
(':authority', self._channel._authority),
]
if self._deadline is not None:
timeout = self._deadline.time_remaining()
headers.append(('grpc-timeout', encode_timeout(timeout)))
content_type = (GRPC_CONTENT_TYPE
+ '+' + self._codec.__content_subtype__)
headers.extend((
('te', 'trailers'),
('content-type', content_type),
('user-agent', USER_AGENT),
))
metadata, = await self._dispatch.send_request(
self._metadata,
method_name=self._method_name,
deadline=self._deadline,
content_type=content_type,
)
headers.extend(encode_metadata(metadata))
release_stream = await stream.send_request(
headers, _processor=protocol.processor,
)
self._stream = stream
self._release_stream = release_stream
self._send_request_done = True
async def end(self):
"""Coroutine to end stream from the client-side.
It should be used to finally end stream from the client-side when we're
finished sending messages to the server and stream wasn't closed with
last DATA frame. See :py:meth:`send_message` for more details.
HTTP/2 stream will have half-closed (local) state after this coroutine
call.
"""
if self._end_done:
raise ProtocolError('Stream was already ended')
if (
not self._cardinality.client_streaming
and not self._send_message_count
):
raise ProtocolError('Unary request requires a single message '
'to be sent')
await self._stream.end()
self._end_done = True
def _raise_for_status(self, headers_map):
status = headers_map[':status']
if status is not None and status != _H2_OK:
grpc_status = _H2_TO_GRPC_STATUS_MAP.get(status, Status.UNKNOWN)
raise GRPCError(grpc_status,
'Received :status = {!r}'.format(status))
def _raise_for_grpc_status(self, headers_map, *, optional=False):
grpc_status = headers_map.get('grpc-status')
if grpc_status is None:
if optional:
return
else:
raise GRPCError(Status.UNKNOWN, 'Missing grpc-status header')
try:
grpc_status_enum = Status(int(grpc_status))
except ValueError:
raise GRPCError(Status.UNKNOWN,
'Invalid grpc-status: {!r}'
.format(grpc_status))
else:
if grpc_status_enum is not Status.OK:
status_message = headers_map.get('grpc-message')
if status_message is not None:
status_message = decode_grpc_message(status_message)
raise GRPCError(grpc_status_enum, status_message)
async def recv_initial_metadata(self):
"""Coroutine to wait for headers with initial metadata from the server.
.. note:: This coroutine will be called implicitly during first
:py:meth:`recv_message` coroutine call, if not called before
explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers-only
response.
When this coroutine finishes, you can access received initial metadata
by using :py:attr:`initial_metadata` attribute.
"""
if not self._send_request_done:
raise ProtocolError('Request was not sent yet')
if self._recv_initial_metadata_done:
raise ProtocolError('Initial metadata was already received')
try:
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_initial_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_initial_metadata(metadata)
self.initial_metadata = metadata
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
content_type = headers_map.get('content-type')
if content_type is None:
raise GRPCError(Status.UNKNOWN,
'Missing content-type header')
base_content_type, _, sub_type = content_type.partition('+')
sub_type = sub_type or ProtoCodec.__content_subtype__
if (
base_content_type != GRPC_CONTENT_TYPE
or sub_type != self._codec.__content_subtype__
):
raise GRPCError(Status.UNKNOWN,
'Invalid content-type: {!r}'
.format(content_type))
except StreamTerminatedError:
# Server can send RST_STREAM frame right after sending trailers-only
# response, so we have to check received headers and probably raise
# more descriptive error
headers = self._stream.recv_headers_nowait()
if headers is None:
raise
else:
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
# If there are no errors in the headers, just reraise original
# StreamTerminatedError
raise
async def recv_message(self):
"""Coroutine to receive incoming message from the server.
If server sends UNARY response, then you can call this coroutine only
once. If server sends STREAM response, then you should call this
coroutine several times, until it returns None. To simplify you code in
this case, :py:class:`Stream` implements async iterations protocol, so
you can use it like this:
.. code-block:: python3
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python3
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so client will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
# TODO: check that messages were sent for non-stream-stream requests
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
with self._wrapper:
message = await recv_message(self._stream, self._codec,
self._recv_type)
self._recv_message_count += 1
message, = await self._dispatch.recv_message(message)
return message
async def recv_trailing_metadata(self):
"""Coroutine to wait for trailers with trailing metadata from the
server.
.. note:: This coroutine will be called implicitly at exit from
this call (context manager's exit), if not called before explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers.
When this coroutine finishes, you can access received trailing metadata
by using :py:attr:`trailing_metadata` attribute.
"""
if not self._end_done:
raise ProtocolError('Outgoing stream was not ended')
if (
not self._cardinality.server_streaming
and not self._recv_message_count
):
raise ProtocolError('No messages were received before waiting '
'for trailing metadata')
if self._recv_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already received')
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_trailing_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_trailing_metadata(metadata)
self.trailing_metadata = metadata
self._raise_for_grpc_status(dict(headers))
async def cancel(self):
"""Coroutine to cancel this request/stream.
Client will send RST_STREAM frame to the server, so it will be
explicitly informed that there is nothing to expect from the client
regarding this request/stream.
"""
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
with self._wrapper:
await self._stream.reset() # TODO: specify error code
self._cancel_done = True
async def __aenter__(self):
if self._deadline is None:
self._wrapper = Wrapper()
else:
self._wrapper = DeadlineWrapper()
self._wrapper_ctx = self._wrapper.start(self._deadline)
self._wrapper_ctx.__enter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if not self._send_request_done:
return
try:
if (
not exc_type
and not self._cancel_done
and not self._stream._transport.is_closing()
):
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
if not self._recv_trailing_metadata_done:
await self.recv_trailing_metadata()
finally:
if self._stream.closable:
self._stream.reset_nowait()
self._release_stream()
if self._wrapper_ctx is not None:
self._wrapper_ctx.__exit__(exc_type, exc_val, exc_tb)
|
vmagamedov/grpclib | grpclib/client.py | Stream.end | python | async def end(self):
if self._end_done:
raise ProtocolError('Stream was already ended')
if (
not self._cardinality.client_streaming
and not self._send_message_count
):
raise ProtocolError('Unary request requires a single message '
'to be sent')
await self._stream.end()
self._end_done = True | Coroutine to end stream from the client-side.
It should be used to finally end stream from the client-side when we're
finished sending messages to the server and stream wasn't closed with
last DATA frame. See :py:meth:`send_message` for more details.
HTTP/2 stream will have half-closed (local) state after this coroutine
call. | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/client.py#L201-L222 | null | class Stream(StreamIterator):
"""
Represents gRPC method call - HTTP/2 request/stream, and everything you
need to communicate with server in order to get response.
In order to work directly with stream, you should
:py:meth:`ServiceMethod.open` request like this:
.. code-block:: python3
request = cafe_pb2.LatteOrder(
size=cafe_pb2.SMALL,
temperature=70,
sugar=3,
)
async with client.MakeLatte.open() as stream:
await stream.send_message(request, end=True)
reply: empty_pb2.Empty = await stream.recv_message()
"""
# stream state
_send_request_done = False
_send_message_count = 0
_end_done = False
_recv_initial_metadata_done = False
_recv_message_count = 0
_recv_trailing_metadata_done = False
_cancel_done = False
_stream = None
_release_stream = None
_wrapper = None
_wrapper_ctx = None
#: This property contains initial metadata, received with headers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_initial_metadata` coroutine succeeds.
initial_metadata = None
#: This property contains trailing metadata, received with trailers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_trailing_metadata` coroutine succeeds.
trailing_metadata = None
def __init__(self, channel, method_name, metadata, cardinality, send_type,
recv_type, *, codec, dispatch: _DispatchChannelEvents,
deadline=None):
self._channel = channel
self._method_name = method_name
self._metadata = metadata
self._cardinality = cardinality
self._send_type = send_type
self._recv_type = recv_type
self._codec = codec
self._dispatch = dispatch
self._deadline = deadline
async def send_request(self):
"""Coroutine to send request headers with metadata to the server.
New HTTP/2 stream will be created during this coroutine call.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
"""
if self._send_request_done:
raise ProtocolError('Request is already sent')
with self._wrapper:
protocol = await self._channel.__connect__()
stream = protocol.processor.connection\
.create_stream(wrapper=self._wrapper)
headers = [
(':method', 'POST'),
(':scheme', self._channel._scheme),
(':path', self._method_name),
(':authority', self._channel._authority),
]
if self._deadline is not None:
timeout = self._deadline.time_remaining()
headers.append(('grpc-timeout', encode_timeout(timeout)))
content_type = (GRPC_CONTENT_TYPE
+ '+' + self._codec.__content_subtype__)
headers.extend((
('te', 'trailers'),
('content-type', content_type),
('user-agent', USER_AGENT),
))
metadata, = await self._dispatch.send_request(
self._metadata,
method_name=self._method_name,
deadline=self._deadline,
content_type=content_type,
)
headers.extend(encode_metadata(metadata))
release_stream = await stream.send_request(
headers, _processor=protocol.processor,
)
self._stream = stream
self._release_stream = release_stream
self._send_request_done = True
async def send_message(self, message, *, end=False):
"""Coroutine to send message to the server.
If client sends UNARY request, then you should call this coroutine only
once. If client sends STREAM request, then you can call this coroutine
as many times as you need.
.. warning:: It is important to finally end stream from the client-side
when you finished sending messages.
You can do this in two ways:
- specify ``end=True`` argument while sending last message - and last
DATA frame will include END_STREAM flag;
- call :py:meth:`end` coroutine after sending last message - and extra
HEADERS frame with END_STREAM flag will be sent.
First approach is preferred, because it doesn't require sending
additional HTTP/2 frame.
"""
if not self._send_request_done:
await self.send_request()
if end and self._end_done:
raise ProtocolError('Stream was already ended')
with self._wrapper:
message, = await self._dispatch.send_message(message)
await send_message(self._stream, self._codec, message,
self._send_type, end=end)
self._send_message_count += 1
if end:
self._end_done = True
def _raise_for_status(self, headers_map):
status = headers_map[':status']
if status is not None and status != _H2_OK:
grpc_status = _H2_TO_GRPC_STATUS_MAP.get(status, Status.UNKNOWN)
raise GRPCError(grpc_status,
'Received :status = {!r}'.format(status))
def _raise_for_grpc_status(self, headers_map, *, optional=False):
grpc_status = headers_map.get('grpc-status')
if grpc_status is None:
if optional:
return
else:
raise GRPCError(Status.UNKNOWN, 'Missing grpc-status header')
try:
grpc_status_enum = Status(int(grpc_status))
except ValueError:
raise GRPCError(Status.UNKNOWN,
'Invalid grpc-status: {!r}'
.format(grpc_status))
else:
if grpc_status_enum is not Status.OK:
status_message = headers_map.get('grpc-message')
if status_message is not None:
status_message = decode_grpc_message(status_message)
raise GRPCError(grpc_status_enum, status_message)
async def recv_initial_metadata(self):
"""Coroutine to wait for headers with initial metadata from the server.
.. note:: This coroutine will be called implicitly during first
:py:meth:`recv_message` coroutine call, if not called before
explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers-only
response.
When this coroutine finishes, you can access received initial metadata
by using :py:attr:`initial_metadata` attribute.
"""
if not self._send_request_done:
raise ProtocolError('Request was not sent yet')
if self._recv_initial_metadata_done:
raise ProtocolError('Initial metadata was already received')
try:
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_initial_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_initial_metadata(metadata)
self.initial_metadata = metadata
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
content_type = headers_map.get('content-type')
if content_type is None:
raise GRPCError(Status.UNKNOWN,
'Missing content-type header')
base_content_type, _, sub_type = content_type.partition('+')
sub_type = sub_type or ProtoCodec.__content_subtype__
if (
base_content_type != GRPC_CONTENT_TYPE
or sub_type != self._codec.__content_subtype__
):
raise GRPCError(Status.UNKNOWN,
'Invalid content-type: {!r}'
.format(content_type))
except StreamTerminatedError:
# Server can send RST_STREAM frame right after sending trailers-only
# response, so we have to check received headers and probably raise
# more descriptive error
headers = self._stream.recv_headers_nowait()
if headers is None:
raise
else:
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
# If there are no errors in the headers, just reraise original
# StreamTerminatedError
raise
async def recv_message(self):
"""Coroutine to receive incoming message from the server.
If server sends UNARY response, then you can call this coroutine only
once. If server sends STREAM response, then you should call this
coroutine several times, until it returns None. To simplify you code in
this case, :py:class:`Stream` implements async iterations protocol, so
you can use it like this:
.. code-block:: python3
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python3
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so client will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
# TODO: check that messages were sent for non-stream-stream requests
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
with self._wrapper:
message = await recv_message(self._stream, self._codec,
self._recv_type)
self._recv_message_count += 1
message, = await self._dispatch.recv_message(message)
return message
async def recv_trailing_metadata(self):
"""Coroutine to wait for trailers with trailing metadata from the
server.
.. note:: This coroutine will be called implicitly at exit from
this call (context manager's exit), if not called before explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers.
When this coroutine finishes, you can access received trailing metadata
by using :py:attr:`trailing_metadata` attribute.
"""
if not self._end_done:
raise ProtocolError('Outgoing stream was not ended')
if (
not self._cardinality.server_streaming
and not self._recv_message_count
):
raise ProtocolError('No messages were received before waiting '
'for trailing metadata')
if self._recv_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already received')
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_trailing_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_trailing_metadata(metadata)
self.trailing_metadata = metadata
self._raise_for_grpc_status(dict(headers))
async def cancel(self):
"""Coroutine to cancel this request/stream.
Client will send RST_STREAM frame to the server, so it will be
explicitly informed that there is nothing to expect from the client
regarding this request/stream.
"""
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
with self._wrapper:
await self._stream.reset() # TODO: specify error code
self._cancel_done = True
async def __aenter__(self):
if self._deadline is None:
self._wrapper = Wrapper()
else:
self._wrapper = DeadlineWrapper()
self._wrapper_ctx = self._wrapper.start(self._deadline)
self._wrapper_ctx.__enter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if not self._send_request_done:
return
try:
if (
not exc_type
and not self._cancel_done
and not self._stream._transport.is_closing()
):
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
if not self._recv_trailing_metadata_done:
await self.recv_trailing_metadata()
finally:
if self._stream.closable:
self._stream.reset_nowait()
self._release_stream()
if self._wrapper_ctx is not None:
self._wrapper_ctx.__exit__(exc_type, exc_val, exc_tb)
|
vmagamedov/grpclib | grpclib/client.py | Stream.recv_initial_metadata | python | async def recv_initial_metadata(self):
if not self._send_request_done:
raise ProtocolError('Request was not sent yet')
if self._recv_initial_metadata_done:
raise ProtocolError('Initial metadata was already received')
try:
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_initial_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_initial_metadata(metadata)
self.initial_metadata = metadata
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
content_type = headers_map.get('content-type')
if content_type is None:
raise GRPCError(Status.UNKNOWN,
'Missing content-type header')
base_content_type, _, sub_type = content_type.partition('+')
sub_type = sub_type or ProtoCodec.__content_subtype__
if (
base_content_type != GRPC_CONTENT_TYPE
or sub_type != self._codec.__content_subtype__
):
raise GRPCError(Status.UNKNOWN,
'Invalid content-type: {!r}'
.format(content_type))
except StreamTerminatedError:
# Server can send RST_STREAM frame right after sending trailers-only
# response, so we have to check received headers and probably raise
# more descriptive error
headers = self._stream.recv_headers_nowait()
if headers is None:
raise
else:
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
# If there are no errors in the headers, just reraise original
# StreamTerminatedError
raise | Coroutine to wait for headers with initial metadata from the server.
.. note:: This coroutine will be called implicitly during first
:py:meth:`recv_message` coroutine call, if not called before
explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers-only
response.
When this coroutine finishes, you can access received initial metadata
by using :py:attr:`initial_metadata` attribute. | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/client.py#L252-L312 | [
"def decode_metadata(headers):\n metadata = MultiDict()\n for key, value in headers:\n if key.startswith((':', 'grpc-')) or key in _SPECIAL:\n continue\n elif key.endswith('-bin'):\n metadata.add(key, b64decode(value.encode('ascii')\n + (b'=' * (len(value) % 4))))\n else:\n metadata.add(key, value)\n return metadata\n",
"def _raise_for_status(self, headers_map):\n status = headers_map[':status']\n if status is not None and status != _H2_OK:\n grpc_status = _H2_TO_GRPC_STATUS_MAP.get(status, Status.UNKNOWN)\n raise GRPCError(grpc_status,\n 'Received :status = {!r}'.format(status))\n",
"def _raise_for_grpc_status(self, headers_map, *, optional=False):\n grpc_status = headers_map.get('grpc-status')\n if grpc_status is None:\n if optional:\n return\n else:\n raise GRPCError(Status.UNKNOWN, 'Missing grpc-status header')\n\n try:\n grpc_status_enum = Status(int(grpc_status))\n except ValueError:\n raise GRPCError(Status.UNKNOWN,\n 'Invalid grpc-status: {!r}'\n .format(grpc_status))\n else:\n if grpc_status_enum is not Status.OK:\n status_message = headers_map.get('grpc-message')\n if status_message is not None:\n status_message = decode_grpc_message(status_message)\n raise GRPCError(grpc_status_enum, status_message)\n"
] | class Stream(StreamIterator):
"""
Represents gRPC method call - HTTP/2 request/stream, and everything you
need to communicate with server in order to get response.
In order to work directly with stream, you should
:py:meth:`ServiceMethod.open` request like this:
.. code-block:: python3
request = cafe_pb2.LatteOrder(
size=cafe_pb2.SMALL,
temperature=70,
sugar=3,
)
async with client.MakeLatte.open() as stream:
await stream.send_message(request, end=True)
reply: empty_pb2.Empty = await stream.recv_message()
"""
# stream state
_send_request_done = False
_send_message_count = 0
_end_done = False
_recv_initial_metadata_done = False
_recv_message_count = 0
_recv_trailing_metadata_done = False
_cancel_done = False
_stream = None
_release_stream = None
_wrapper = None
_wrapper_ctx = None
#: This property contains initial metadata, received with headers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_initial_metadata` coroutine succeeds.
initial_metadata = None
#: This property contains trailing metadata, received with trailers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_trailing_metadata` coroutine succeeds.
trailing_metadata = None
def __init__(self, channel, method_name, metadata, cardinality, send_type,
recv_type, *, codec, dispatch: _DispatchChannelEvents,
deadline=None):
self._channel = channel
self._method_name = method_name
self._metadata = metadata
self._cardinality = cardinality
self._send_type = send_type
self._recv_type = recv_type
self._codec = codec
self._dispatch = dispatch
self._deadline = deadline
async def send_request(self):
"""Coroutine to send request headers with metadata to the server.
New HTTP/2 stream will be created during this coroutine call.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
"""
if self._send_request_done:
raise ProtocolError('Request is already sent')
with self._wrapper:
protocol = await self._channel.__connect__()
stream = protocol.processor.connection\
.create_stream(wrapper=self._wrapper)
headers = [
(':method', 'POST'),
(':scheme', self._channel._scheme),
(':path', self._method_name),
(':authority', self._channel._authority),
]
if self._deadline is not None:
timeout = self._deadline.time_remaining()
headers.append(('grpc-timeout', encode_timeout(timeout)))
content_type = (GRPC_CONTENT_TYPE
+ '+' + self._codec.__content_subtype__)
headers.extend((
('te', 'trailers'),
('content-type', content_type),
('user-agent', USER_AGENT),
))
metadata, = await self._dispatch.send_request(
self._metadata,
method_name=self._method_name,
deadline=self._deadline,
content_type=content_type,
)
headers.extend(encode_metadata(metadata))
release_stream = await stream.send_request(
headers, _processor=protocol.processor,
)
self._stream = stream
self._release_stream = release_stream
self._send_request_done = True
async def send_message(self, message, *, end=False):
"""Coroutine to send message to the server.
If client sends UNARY request, then you should call this coroutine only
once. If client sends STREAM request, then you can call this coroutine
as many times as you need.
.. warning:: It is important to finally end stream from the client-side
when you finished sending messages.
You can do this in two ways:
- specify ``end=True`` argument while sending last message - and last
DATA frame will include END_STREAM flag;
- call :py:meth:`end` coroutine after sending last message - and extra
HEADERS frame with END_STREAM flag will be sent.
First approach is preferred, because it doesn't require sending
additional HTTP/2 frame.
"""
if not self._send_request_done:
await self.send_request()
if end and self._end_done:
raise ProtocolError('Stream was already ended')
with self._wrapper:
message, = await self._dispatch.send_message(message)
await send_message(self._stream, self._codec, message,
self._send_type, end=end)
self._send_message_count += 1
if end:
self._end_done = True
async def end(self):
"""Coroutine to end stream from the client-side.
It should be used to finally end stream from the client-side when we're
finished sending messages to the server and stream wasn't closed with
last DATA frame. See :py:meth:`send_message` for more details.
HTTP/2 stream will have half-closed (local) state after this coroutine
call.
"""
if self._end_done:
raise ProtocolError('Stream was already ended')
if (
not self._cardinality.client_streaming
and not self._send_message_count
):
raise ProtocolError('Unary request requires a single message '
'to be sent')
await self._stream.end()
self._end_done = True
def _raise_for_status(self, headers_map):
status = headers_map[':status']
if status is not None and status != _H2_OK:
grpc_status = _H2_TO_GRPC_STATUS_MAP.get(status, Status.UNKNOWN)
raise GRPCError(grpc_status,
'Received :status = {!r}'.format(status))
def _raise_for_grpc_status(self, headers_map, *, optional=False):
grpc_status = headers_map.get('grpc-status')
if grpc_status is None:
if optional:
return
else:
raise GRPCError(Status.UNKNOWN, 'Missing grpc-status header')
try:
grpc_status_enum = Status(int(grpc_status))
except ValueError:
raise GRPCError(Status.UNKNOWN,
'Invalid grpc-status: {!r}'
.format(grpc_status))
else:
if grpc_status_enum is not Status.OK:
status_message = headers_map.get('grpc-message')
if status_message is not None:
status_message = decode_grpc_message(status_message)
raise GRPCError(grpc_status_enum, status_message)
async def recv_message(self):
"""Coroutine to receive incoming message from the server.
If server sends UNARY response, then you can call this coroutine only
once. If server sends STREAM response, then you should call this
coroutine several times, until it returns None. To simplify you code in
this case, :py:class:`Stream` implements async iterations protocol, so
you can use it like this:
.. code-block:: python3
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python3
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so client will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
# TODO: check that messages were sent for non-stream-stream requests
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
with self._wrapper:
message = await recv_message(self._stream, self._codec,
self._recv_type)
self._recv_message_count += 1
message, = await self._dispatch.recv_message(message)
return message
async def recv_trailing_metadata(self):
"""Coroutine to wait for trailers with trailing metadata from the
server.
.. note:: This coroutine will be called implicitly at exit from
this call (context manager's exit), if not called before explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers.
When this coroutine finishes, you can access received trailing metadata
by using :py:attr:`trailing_metadata` attribute.
"""
if not self._end_done:
raise ProtocolError('Outgoing stream was not ended')
if (
not self._cardinality.server_streaming
and not self._recv_message_count
):
raise ProtocolError('No messages were received before waiting '
'for trailing metadata')
if self._recv_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already received')
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_trailing_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_trailing_metadata(metadata)
self.trailing_metadata = metadata
self._raise_for_grpc_status(dict(headers))
async def cancel(self):
"""Coroutine to cancel this request/stream.
Client will send RST_STREAM frame to the server, so it will be
explicitly informed that there is nothing to expect from the client
regarding this request/stream.
"""
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
with self._wrapper:
await self._stream.reset() # TODO: specify error code
self._cancel_done = True
async def __aenter__(self):
if self._deadline is None:
self._wrapper = Wrapper()
else:
self._wrapper = DeadlineWrapper()
self._wrapper_ctx = self._wrapper.start(self._deadline)
self._wrapper_ctx.__enter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if not self._send_request_done:
return
try:
if (
not exc_type
and not self._cancel_done
and not self._stream._transport.is_closing()
):
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
if not self._recv_trailing_metadata_done:
await self.recv_trailing_metadata()
finally:
if self._stream.closable:
self._stream.reset_nowait()
self._release_stream()
if self._wrapper_ctx is not None:
self._wrapper_ctx.__exit__(exc_type, exc_val, exc_tb)
|
vmagamedov/grpclib | grpclib/client.py | Stream.recv_message | python | async def recv_message(self):
# TODO: check that messages were sent for non-stream-stream requests
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
with self._wrapper:
message = await recv_message(self._stream, self._codec,
self._recv_type)
self._recv_message_count += 1
message, = await self._dispatch.recv_message(message)
return message | Coroutine to receive incoming message from the server.
If server sends UNARY response, then you can call this coroutine only
once. If server sends STREAM response, then you should call this
coroutine several times, until it returns None. To simplify you code in
this case, :py:class:`Stream` implements async iterations protocol, so
you can use it like this:
.. code-block:: python3
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python3
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so client will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/client.py#L314-L348 | [
"async def recv_message(stream, codec, message_type):\n meta = await stream.recv_data(5)\n if not meta:\n return\n\n compressed_flag = struct.unpack('?', meta[:1])[0]\n if compressed_flag:\n raise NotImplementedError('Compression not implemented')\n\n message_len = struct.unpack('>I', meta[1:])[0]\n message_bin = await stream.recv_data(message_len)\n assert len(message_bin) == message_len, \\\n '{} != {}'.format(len(message_bin), message_len)\n message = codec.decode(message_bin, message_type)\n return message\n",
"async def recv_initial_metadata(self):\n \"\"\"Coroutine to wait for headers with initial metadata from the server.\n\n .. note:: This coroutine will be called implicitly during first\n :py:meth:`recv_message` coroutine call, if not called before\n explicitly.\n\n May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned\n non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers-only\n response.\n\n When this coroutine finishes, you can access received initial metadata\n by using :py:attr:`initial_metadata` attribute.\n \"\"\"\n if not self._send_request_done:\n raise ProtocolError('Request was not sent yet')\n\n if self._recv_initial_metadata_done:\n raise ProtocolError('Initial metadata was already received')\n\n try:\n with self._wrapper:\n headers = await self._stream.recv_headers()\n self._recv_initial_metadata_done = True\n\n metadata = decode_metadata(headers)\n metadata, = await self._dispatch.recv_initial_metadata(metadata)\n self.initial_metadata = metadata\n\n headers_map = dict(headers)\n self._raise_for_status(headers_map)\n self._raise_for_grpc_status(headers_map, optional=True)\n\n content_type = headers_map.get('content-type')\n if content_type is None:\n raise GRPCError(Status.UNKNOWN,\n 'Missing content-type header')\n\n base_content_type, _, sub_type = content_type.partition('+')\n sub_type = sub_type or ProtoCodec.__content_subtype__\n if (\n base_content_type != GRPC_CONTENT_TYPE\n or sub_type != self._codec.__content_subtype__\n ):\n raise GRPCError(Status.UNKNOWN,\n 'Invalid content-type: {!r}'\n .format(content_type))\n except StreamTerminatedError:\n # Server can send RST_STREAM frame right after sending trailers-only\n # response, so we have to check received headers and probably raise\n # more descriptive error\n headers = self._stream.recv_headers_nowait()\n if headers is None:\n raise\n else:\n headers_map = dict(headers)\n self._raise_for_status(headers_map)\n self._raise_for_grpc_status(headers_map, optional=True)\n # If there are no errors in the headers, just reraise original\n # StreamTerminatedError\n raise\n"
] | class Stream(StreamIterator):
"""
Represents gRPC method call - HTTP/2 request/stream, and everything you
need to communicate with server in order to get response.
In order to work directly with stream, you should
:py:meth:`ServiceMethod.open` request like this:
.. code-block:: python3
request = cafe_pb2.LatteOrder(
size=cafe_pb2.SMALL,
temperature=70,
sugar=3,
)
async with client.MakeLatte.open() as stream:
await stream.send_message(request, end=True)
reply: empty_pb2.Empty = await stream.recv_message()
"""
# stream state
_send_request_done = False
_send_message_count = 0
_end_done = False
_recv_initial_metadata_done = False
_recv_message_count = 0
_recv_trailing_metadata_done = False
_cancel_done = False
_stream = None
_release_stream = None
_wrapper = None
_wrapper_ctx = None
#: This property contains initial metadata, received with headers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_initial_metadata` coroutine succeeds.
initial_metadata = None
#: This property contains trailing metadata, received with trailers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_trailing_metadata` coroutine succeeds.
trailing_metadata = None
def __init__(self, channel, method_name, metadata, cardinality, send_type,
recv_type, *, codec, dispatch: _DispatchChannelEvents,
deadline=None):
self._channel = channel
self._method_name = method_name
self._metadata = metadata
self._cardinality = cardinality
self._send_type = send_type
self._recv_type = recv_type
self._codec = codec
self._dispatch = dispatch
self._deadline = deadline
async def send_request(self):
"""Coroutine to send request headers with metadata to the server.
New HTTP/2 stream will be created during this coroutine call.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
"""
if self._send_request_done:
raise ProtocolError('Request is already sent')
with self._wrapper:
protocol = await self._channel.__connect__()
stream = protocol.processor.connection\
.create_stream(wrapper=self._wrapper)
headers = [
(':method', 'POST'),
(':scheme', self._channel._scheme),
(':path', self._method_name),
(':authority', self._channel._authority),
]
if self._deadline is not None:
timeout = self._deadline.time_remaining()
headers.append(('grpc-timeout', encode_timeout(timeout)))
content_type = (GRPC_CONTENT_TYPE
+ '+' + self._codec.__content_subtype__)
headers.extend((
('te', 'trailers'),
('content-type', content_type),
('user-agent', USER_AGENT),
))
metadata, = await self._dispatch.send_request(
self._metadata,
method_name=self._method_name,
deadline=self._deadline,
content_type=content_type,
)
headers.extend(encode_metadata(metadata))
release_stream = await stream.send_request(
headers, _processor=protocol.processor,
)
self._stream = stream
self._release_stream = release_stream
self._send_request_done = True
async def send_message(self, message, *, end=False):
"""Coroutine to send message to the server.
If client sends UNARY request, then you should call this coroutine only
once. If client sends STREAM request, then you can call this coroutine
as many times as you need.
.. warning:: It is important to finally end stream from the client-side
when you finished sending messages.
You can do this in two ways:
- specify ``end=True`` argument while sending last message - and last
DATA frame will include END_STREAM flag;
- call :py:meth:`end` coroutine after sending last message - and extra
HEADERS frame with END_STREAM flag will be sent.
First approach is preferred, because it doesn't require sending
additional HTTP/2 frame.
"""
if not self._send_request_done:
await self.send_request()
if end and self._end_done:
raise ProtocolError('Stream was already ended')
with self._wrapper:
message, = await self._dispatch.send_message(message)
await send_message(self._stream, self._codec, message,
self._send_type, end=end)
self._send_message_count += 1
if end:
self._end_done = True
async def end(self):
"""Coroutine to end stream from the client-side.
It should be used to finally end stream from the client-side when we're
finished sending messages to the server and stream wasn't closed with
last DATA frame. See :py:meth:`send_message` for more details.
HTTP/2 stream will have half-closed (local) state after this coroutine
call.
"""
if self._end_done:
raise ProtocolError('Stream was already ended')
if (
not self._cardinality.client_streaming
and not self._send_message_count
):
raise ProtocolError('Unary request requires a single message '
'to be sent')
await self._stream.end()
self._end_done = True
def _raise_for_status(self, headers_map):
status = headers_map[':status']
if status is not None and status != _H2_OK:
grpc_status = _H2_TO_GRPC_STATUS_MAP.get(status, Status.UNKNOWN)
raise GRPCError(grpc_status,
'Received :status = {!r}'.format(status))
def _raise_for_grpc_status(self, headers_map, *, optional=False):
grpc_status = headers_map.get('grpc-status')
if grpc_status is None:
if optional:
return
else:
raise GRPCError(Status.UNKNOWN, 'Missing grpc-status header')
try:
grpc_status_enum = Status(int(grpc_status))
except ValueError:
raise GRPCError(Status.UNKNOWN,
'Invalid grpc-status: {!r}'
.format(grpc_status))
else:
if grpc_status_enum is not Status.OK:
status_message = headers_map.get('grpc-message')
if status_message is not None:
status_message = decode_grpc_message(status_message)
raise GRPCError(grpc_status_enum, status_message)
async def recv_initial_metadata(self):
"""Coroutine to wait for headers with initial metadata from the server.
.. note:: This coroutine will be called implicitly during first
:py:meth:`recv_message` coroutine call, if not called before
explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers-only
response.
When this coroutine finishes, you can access received initial metadata
by using :py:attr:`initial_metadata` attribute.
"""
if not self._send_request_done:
raise ProtocolError('Request was not sent yet')
if self._recv_initial_metadata_done:
raise ProtocolError('Initial metadata was already received')
try:
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_initial_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_initial_metadata(metadata)
self.initial_metadata = metadata
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
content_type = headers_map.get('content-type')
if content_type is None:
raise GRPCError(Status.UNKNOWN,
'Missing content-type header')
base_content_type, _, sub_type = content_type.partition('+')
sub_type = sub_type or ProtoCodec.__content_subtype__
if (
base_content_type != GRPC_CONTENT_TYPE
or sub_type != self._codec.__content_subtype__
):
raise GRPCError(Status.UNKNOWN,
'Invalid content-type: {!r}'
.format(content_type))
except StreamTerminatedError:
# Server can send RST_STREAM frame right after sending trailers-only
# response, so we have to check received headers and probably raise
# more descriptive error
headers = self._stream.recv_headers_nowait()
if headers is None:
raise
else:
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
# If there are no errors in the headers, just reraise original
# StreamTerminatedError
raise
async def recv_trailing_metadata(self):
"""Coroutine to wait for trailers with trailing metadata from the
server.
.. note:: This coroutine will be called implicitly at exit from
this call (context manager's exit), if not called before explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers.
When this coroutine finishes, you can access received trailing metadata
by using :py:attr:`trailing_metadata` attribute.
"""
if not self._end_done:
raise ProtocolError('Outgoing stream was not ended')
if (
not self._cardinality.server_streaming
and not self._recv_message_count
):
raise ProtocolError('No messages were received before waiting '
'for trailing metadata')
if self._recv_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already received')
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_trailing_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_trailing_metadata(metadata)
self.trailing_metadata = metadata
self._raise_for_grpc_status(dict(headers))
async def cancel(self):
"""Coroutine to cancel this request/stream.
Client will send RST_STREAM frame to the server, so it will be
explicitly informed that there is nothing to expect from the client
regarding this request/stream.
"""
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
with self._wrapper:
await self._stream.reset() # TODO: specify error code
self._cancel_done = True
async def __aenter__(self):
if self._deadline is None:
self._wrapper = Wrapper()
else:
self._wrapper = DeadlineWrapper()
self._wrapper_ctx = self._wrapper.start(self._deadline)
self._wrapper_ctx.__enter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if not self._send_request_done:
return
try:
if (
not exc_type
and not self._cancel_done
and not self._stream._transport.is_closing()
):
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
if not self._recv_trailing_metadata_done:
await self.recv_trailing_metadata()
finally:
if self._stream.closable:
self._stream.reset_nowait()
self._release_stream()
if self._wrapper_ctx is not None:
self._wrapper_ctx.__exit__(exc_type, exc_val, exc_tb)
|
vmagamedov/grpclib | grpclib/client.py | Stream.recv_trailing_metadata | python | async def recv_trailing_metadata(self):
if not self._end_done:
raise ProtocolError('Outgoing stream was not ended')
if (
not self._cardinality.server_streaming
and not self._recv_message_count
):
raise ProtocolError('No messages were received before waiting '
'for trailing metadata')
if self._recv_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already received')
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_trailing_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_trailing_metadata(metadata)
self.trailing_metadata = metadata
self._raise_for_grpc_status(dict(headers)) | Coroutine to wait for trailers with trailing metadata from the
server.
.. note:: This coroutine will be called implicitly at exit from
this call (context manager's exit), if not called before explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers.
When this coroutine finishes, you can access received trailing metadata
by using :py:attr:`trailing_metadata` attribute. | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/client.py#L350-L384 | [
"def decode_metadata(headers):\n metadata = MultiDict()\n for key, value in headers:\n if key.startswith((':', 'grpc-')) or key in _SPECIAL:\n continue\n elif key.endswith('-bin'):\n metadata.add(key, b64decode(value.encode('ascii')\n + (b'=' * (len(value) % 4))))\n else:\n metadata.add(key, value)\n return metadata\n",
"def _raise_for_grpc_status(self, headers_map, *, optional=False):\n grpc_status = headers_map.get('grpc-status')\n if grpc_status is None:\n if optional:\n return\n else:\n raise GRPCError(Status.UNKNOWN, 'Missing grpc-status header')\n\n try:\n grpc_status_enum = Status(int(grpc_status))\n except ValueError:\n raise GRPCError(Status.UNKNOWN,\n 'Invalid grpc-status: {!r}'\n .format(grpc_status))\n else:\n if grpc_status_enum is not Status.OK:\n status_message = headers_map.get('grpc-message')\n if status_message is not None:\n status_message = decode_grpc_message(status_message)\n raise GRPCError(grpc_status_enum, status_message)\n"
] | class Stream(StreamIterator):
"""
Represents gRPC method call - HTTP/2 request/stream, and everything you
need to communicate with server in order to get response.
In order to work directly with stream, you should
:py:meth:`ServiceMethod.open` request like this:
.. code-block:: python3
request = cafe_pb2.LatteOrder(
size=cafe_pb2.SMALL,
temperature=70,
sugar=3,
)
async with client.MakeLatte.open() as stream:
await stream.send_message(request, end=True)
reply: empty_pb2.Empty = await stream.recv_message()
"""
# stream state
_send_request_done = False
_send_message_count = 0
_end_done = False
_recv_initial_metadata_done = False
_recv_message_count = 0
_recv_trailing_metadata_done = False
_cancel_done = False
_stream = None
_release_stream = None
_wrapper = None
_wrapper_ctx = None
#: This property contains initial metadata, received with headers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_initial_metadata` coroutine succeeds.
initial_metadata = None
#: This property contains trailing metadata, received with trailers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_trailing_metadata` coroutine succeeds.
trailing_metadata = None
def __init__(self, channel, method_name, metadata, cardinality, send_type,
recv_type, *, codec, dispatch: _DispatchChannelEvents,
deadline=None):
self._channel = channel
self._method_name = method_name
self._metadata = metadata
self._cardinality = cardinality
self._send_type = send_type
self._recv_type = recv_type
self._codec = codec
self._dispatch = dispatch
self._deadline = deadline
async def send_request(self):
"""Coroutine to send request headers with metadata to the server.
New HTTP/2 stream will be created during this coroutine call.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
"""
if self._send_request_done:
raise ProtocolError('Request is already sent')
with self._wrapper:
protocol = await self._channel.__connect__()
stream = protocol.processor.connection\
.create_stream(wrapper=self._wrapper)
headers = [
(':method', 'POST'),
(':scheme', self._channel._scheme),
(':path', self._method_name),
(':authority', self._channel._authority),
]
if self._deadline is not None:
timeout = self._deadline.time_remaining()
headers.append(('grpc-timeout', encode_timeout(timeout)))
content_type = (GRPC_CONTENT_TYPE
+ '+' + self._codec.__content_subtype__)
headers.extend((
('te', 'trailers'),
('content-type', content_type),
('user-agent', USER_AGENT),
))
metadata, = await self._dispatch.send_request(
self._metadata,
method_name=self._method_name,
deadline=self._deadline,
content_type=content_type,
)
headers.extend(encode_metadata(metadata))
release_stream = await stream.send_request(
headers, _processor=protocol.processor,
)
self._stream = stream
self._release_stream = release_stream
self._send_request_done = True
async def send_message(self, message, *, end=False):
"""Coroutine to send message to the server.
If client sends UNARY request, then you should call this coroutine only
once. If client sends STREAM request, then you can call this coroutine
as many times as you need.
.. warning:: It is important to finally end stream from the client-side
when you finished sending messages.
You can do this in two ways:
- specify ``end=True`` argument while sending last message - and last
DATA frame will include END_STREAM flag;
- call :py:meth:`end` coroutine after sending last message - and extra
HEADERS frame with END_STREAM flag will be sent.
First approach is preferred, because it doesn't require sending
additional HTTP/2 frame.
"""
if not self._send_request_done:
await self.send_request()
if end and self._end_done:
raise ProtocolError('Stream was already ended')
with self._wrapper:
message, = await self._dispatch.send_message(message)
await send_message(self._stream, self._codec, message,
self._send_type, end=end)
self._send_message_count += 1
if end:
self._end_done = True
async def end(self):
"""Coroutine to end stream from the client-side.
It should be used to finally end stream from the client-side when we're
finished sending messages to the server and stream wasn't closed with
last DATA frame. See :py:meth:`send_message` for more details.
HTTP/2 stream will have half-closed (local) state after this coroutine
call.
"""
if self._end_done:
raise ProtocolError('Stream was already ended')
if (
not self._cardinality.client_streaming
and not self._send_message_count
):
raise ProtocolError('Unary request requires a single message '
'to be sent')
await self._stream.end()
self._end_done = True
def _raise_for_status(self, headers_map):
status = headers_map[':status']
if status is not None and status != _H2_OK:
grpc_status = _H2_TO_GRPC_STATUS_MAP.get(status, Status.UNKNOWN)
raise GRPCError(grpc_status,
'Received :status = {!r}'.format(status))
def _raise_for_grpc_status(self, headers_map, *, optional=False):
grpc_status = headers_map.get('grpc-status')
if grpc_status is None:
if optional:
return
else:
raise GRPCError(Status.UNKNOWN, 'Missing grpc-status header')
try:
grpc_status_enum = Status(int(grpc_status))
except ValueError:
raise GRPCError(Status.UNKNOWN,
'Invalid grpc-status: {!r}'
.format(grpc_status))
else:
if grpc_status_enum is not Status.OK:
status_message = headers_map.get('grpc-message')
if status_message is not None:
status_message = decode_grpc_message(status_message)
raise GRPCError(grpc_status_enum, status_message)
async def recv_initial_metadata(self):
"""Coroutine to wait for headers with initial metadata from the server.
.. note:: This coroutine will be called implicitly during first
:py:meth:`recv_message` coroutine call, if not called before
explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers-only
response.
When this coroutine finishes, you can access received initial metadata
by using :py:attr:`initial_metadata` attribute.
"""
if not self._send_request_done:
raise ProtocolError('Request was not sent yet')
if self._recv_initial_metadata_done:
raise ProtocolError('Initial metadata was already received')
try:
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_initial_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_initial_metadata(metadata)
self.initial_metadata = metadata
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
content_type = headers_map.get('content-type')
if content_type is None:
raise GRPCError(Status.UNKNOWN,
'Missing content-type header')
base_content_type, _, sub_type = content_type.partition('+')
sub_type = sub_type or ProtoCodec.__content_subtype__
if (
base_content_type != GRPC_CONTENT_TYPE
or sub_type != self._codec.__content_subtype__
):
raise GRPCError(Status.UNKNOWN,
'Invalid content-type: {!r}'
.format(content_type))
except StreamTerminatedError:
# Server can send RST_STREAM frame right after sending trailers-only
# response, so we have to check received headers and probably raise
# more descriptive error
headers = self._stream.recv_headers_nowait()
if headers is None:
raise
else:
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
# If there are no errors in the headers, just reraise original
# StreamTerminatedError
raise
async def recv_message(self):
"""Coroutine to receive incoming message from the server.
If server sends UNARY response, then you can call this coroutine only
once. If server sends STREAM response, then you should call this
coroutine several times, until it returns None. To simplify you code in
this case, :py:class:`Stream` implements async iterations protocol, so
you can use it like this:
.. code-block:: python3
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python3
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so client will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
# TODO: check that messages were sent for non-stream-stream requests
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
with self._wrapper:
message = await recv_message(self._stream, self._codec,
self._recv_type)
self._recv_message_count += 1
message, = await self._dispatch.recv_message(message)
return message
async def cancel(self):
"""Coroutine to cancel this request/stream.
Client will send RST_STREAM frame to the server, so it will be
explicitly informed that there is nothing to expect from the client
regarding this request/stream.
"""
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
with self._wrapper:
await self._stream.reset() # TODO: specify error code
self._cancel_done = True
async def __aenter__(self):
if self._deadline is None:
self._wrapper = Wrapper()
else:
self._wrapper = DeadlineWrapper()
self._wrapper_ctx = self._wrapper.start(self._deadline)
self._wrapper_ctx.__enter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if not self._send_request_done:
return
try:
if (
not exc_type
and not self._cancel_done
and not self._stream._transport.is_closing()
):
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
if not self._recv_trailing_metadata_done:
await self.recv_trailing_metadata()
finally:
if self._stream.closable:
self._stream.reset_nowait()
self._release_stream()
if self._wrapper_ctx is not None:
self._wrapper_ctx.__exit__(exc_type, exc_val, exc_tb)
|
vmagamedov/grpclib | grpclib/client.py | Stream.cancel | python | async def cancel(self):
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
with self._wrapper:
await self._stream.reset() # TODO: specify error code
self._cancel_done = True | Coroutine to cancel this request/stream.
Client will send RST_STREAM frame to the server, so it will be
explicitly informed that there is nothing to expect from the client
regarding this request/stream. | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/client.py#L386-L398 | null | class Stream(StreamIterator):
"""
Represents gRPC method call - HTTP/2 request/stream, and everything you
need to communicate with server in order to get response.
In order to work directly with stream, you should
:py:meth:`ServiceMethod.open` request like this:
.. code-block:: python3
request = cafe_pb2.LatteOrder(
size=cafe_pb2.SMALL,
temperature=70,
sugar=3,
)
async with client.MakeLatte.open() as stream:
await stream.send_message(request, end=True)
reply: empty_pb2.Empty = await stream.recv_message()
"""
# stream state
_send_request_done = False
_send_message_count = 0
_end_done = False
_recv_initial_metadata_done = False
_recv_message_count = 0
_recv_trailing_metadata_done = False
_cancel_done = False
_stream = None
_release_stream = None
_wrapper = None
_wrapper_ctx = None
#: This property contains initial metadata, received with headers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_initial_metadata` coroutine succeeds.
initial_metadata = None
#: This property contains trailing metadata, received with trailers from
#: the server. It equals to ``None`` initially, and to a multi-dict object
#: after :py:meth:`recv_trailing_metadata` coroutine succeeds.
trailing_metadata = None
def __init__(self, channel, method_name, metadata, cardinality, send_type,
recv_type, *, codec, dispatch: _DispatchChannelEvents,
deadline=None):
self._channel = channel
self._method_name = method_name
self._metadata = metadata
self._cardinality = cardinality
self._send_type = send_type
self._recv_type = recv_type
self._codec = codec
self._dispatch = dispatch
self._deadline = deadline
async def send_request(self):
"""Coroutine to send request headers with metadata to the server.
New HTTP/2 stream will be created during this coroutine call.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
"""
if self._send_request_done:
raise ProtocolError('Request is already sent')
with self._wrapper:
protocol = await self._channel.__connect__()
stream = protocol.processor.connection\
.create_stream(wrapper=self._wrapper)
headers = [
(':method', 'POST'),
(':scheme', self._channel._scheme),
(':path', self._method_name),
(':authority', self._channel._authority),
]
if self._deadline is not None:
timeout = self._deadline.time_remaining()
headers.append(('grpc-timeout', encode_timeout(timeout)))
content_type = (GRPC_CONTENT_TYPE
+ '+' + self._codec.__content_subtype__)
headers.extend((
('te', 'trailers'),
('content-type', content_type),
('user-agent', USER_AGENT),
))
metadata, = await self._dispatch.send_request(
self._metadata,
method_name=self._method_name,
deadline=self._deadline,
content_type=content_type,
)
headers.extend(encode_metadata(metadata))
release_stream = await stream.send_request(
headers, _processor=protocol.processor,
)
self._stream = stream
self._release_stream = release_stream
self._send_request_done = True
async def send_message(self, message, *, end=False):
"""Coroutine to send message to the server.
If client sends UNARY request, then you should call this coroutine only
once. If client sends STREAM request, then you can call this coroutine
as many times as you need.
.. warning:: It is important to finally end stream from the client-side
when you finished sending messages.
You can do this in two ways:
- specify ``end=True`` argument while sending last message - and last
DATA frame will include END_STREAM flag;
- call :py:meth:`end` coroutine after sending last message - and extra
HEADERS frame with END_STREAM flag will be sent.
First approach is preferred, because it doesn't require sending
additional HTTP/2 frame.
"""
if not self._send_request_done:
await self.send_request()
if end and self._end_done:
raise ProtocolError('Stream was already ended')
with self._wrapper:
message, = await self._dispatch.send_message(message)
await send_message(self._stream, self._codec, message,
self._send_type, end=end)
self._send_message_count += 1
if end:
self._end_done = True
async def end(self):
"""Coroutine to end stream from the client-side.
It should be used to finally end stream from the client-side when we're
finished sending messages to the server and stream wasn't closed with
last DATA frame. See :py:meth:`send_message` for more details.
HTTP/2 stream will have half-closed (local) state after this coroutine
call.
"""
if self._end_done:
raise ProtocolError('Stream was already ended')
if (
not self._cardinality.client_streaming
and not self._send_message_count
):
raise ProtocolError('Unary request requires a single message '
'to be sent')
await self._stream.end()
self._end_done = True
def _raise_for_status(self, headers_map):
status = headers_map[':status']
if status is not None and status != _H2_OK:
grpc_status = _H2_TO_GRPC_STATUS_MAP.get(status, Status.UNKNOWN)
raise GRPCError(grpc_status,
'Received :status = {!r}'.format(status))
def _raise_for_grpc_status(self, headers_map, *, optional=False):
grpc_status = headers_map.get('grpc-status')
if grpc_status is None:
if optional:
return
else:
raise GRPCError(Status.UNKNOWN, 'Missing grpc-status header')
try:
grpc_status_enum = Status(int(grpc_status))
except ValueError:
raise GRPCError(Status.UNKNOWN,
'Invalid grpc-status: {!r}'
.format(grpc_status))
else:
if grpc_status_enum is not Status.OK:
status_message = headers_map.get('grpc-message')
if status_message is not None:
status_message = decode_grpc_message(status_message)
raise GRPCError(grpc_status_enum, status_message)
async def recv_initial_metadata(self):
"""Coroutine to wait for headers with initial metadata from the server.
.. note:: This coroutine will be called implicitly during first
:py:meth:`recv_message` coroutine call, if not called before
explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers-only
response.
When this coroutine finishes, you can access received initial metadata
by using :py:attr:`initial_metadata` attribute.
"""
if not self._send_request_done:
raise ProtocolError('Request was not sent yet')
if self._recv_initial_metadata_done:
raise ProtocolError('Initial metadata was already received')
try:
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_initial_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_initial_metadata(metadata)
self.initial_metadata = metadata
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
content_type = headers_map.get('content-type')
if content_type is None:
raise GRPCError(Status.UNKNOWN,
'Missing content-type header')
base_content_type, _, sub_type = content_type.partition('+')
sub_type = sub_type or ProtoCodec.__content_subtype__
if (
base_content_type != GRPC_CONTENT_TYPE
or sub_type != self._codec.__content_subtype__
):
raise GRPCError(Status.UNKNOWN,
'Invalid content-type: {!r}'
.format(content_type))
except StreamTerminatedError:
# Server can send RST_STREAM frame right after sending trailers-only
# response, so we have to check received headers and probably raise
# more descriptive error
headers = self._stream.recv_headers_nowait()
if headers is None:
raise
else:
headers_map = dict(headers)
self._raise_for_status(headers_map)
self._raise_for_grpc_status(headers_map, optional=True)
# If there are no errors in the headers, just reraise original
# StreamTerminatedError
raise
async def recv_message(self):
"""Coroutine to receive incoming message from the server.
If server sends UNARY response, then you can call this coroutine only
once. If server sends STREAM response, then you should call this
coroutine several times, until it returns None. To simplify you code in
this case, :py:class:`Stream` implements async iterations protocol, so
you can use it like this:
.. code-block:: python3
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python3
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so client will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
# TODO: check that messages were sent for non-stream-stream requests
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
with self._wrapper:
message = await recv_message(self._stream, self._codec,
self._recv_type)
self._recv_message_count += 1
message, = await self._dispatch.recv_message(message)
return message
async def recv_trailing_metadata(self):
"""Coroutine to wait for trailers with trailing metadata from the
server.
.. note:: This coroutine will be called implicitly at exit from
this call (context manager's exit), if not called before explicitly.
May raise :py:class:`~grpclib.exceptions.GRPCError` if server returned
non-:py:attr:`Status.OK <grpclib.const.Status.OK>` in trailers.
When this coroutine finishes, you can access received trailing metadata
by using :py:attr:`trailing_metadata` attribute.
"""
if not self._end_done:
raise ProtocolError('Outgoing stream was not ended')
if (
not self._cardinality.server_streaming
and not self._recv_message_count
):
raise ProtocolError('No messages were received before waiting '
'for trailing metadata')
if self._recv_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already received')
with self._wrapper:
headers = await self._stream.recv_headers()
self._recv_trailing_metadata_done = True
metadata = decode_metadata(headers)
metadata, = await self._dispatch.recv_trailing_metadata(metadata)
self.trailing_metadata = metadata
self._raise_for_grpc_status(dict(headers))
async def __aenter__(self):
if self._deadline is None:
self._wrapper = Wrapper()
else:
self._wrapper = DeadlineWrapper()
self._wrapper_ctx = self._wrapper.start(self._deadline)
self._wrapper_ctx.__enter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if not self._send_request_done:
return
try:
if (
not exc_type
and not self._cancel_done
and not self._stream._transport.is_closing()
):
if not self._recv_initial_metadata_done:
await self.recv_initial_metadata()
if not self._recv_trailing_metadata_done:
await self.recv_trailing_metadata()
finally:
if self._stream.closable:
self._stream.reset_nowait()
self._release_stream()
if self._wrapper_ctx is not None:
self._wrapper_ctx.__exit__(exc_type, exc_val, exc_tb)
|
vmagamedov/grpclib | grpclib/client.py | Channel.close | python | def close(self):
if self._protocol is not None:
self._protocol.processor.close()
del self._protocol | Closes connection to the server. | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/client.py#L557-L562 | null | class Channel:
"""
Represents a connection to the server, which can be used with generated
stub classes to perform gRPC calls.
.. code-block:: python3
channel = Channel(loop=loop)
client = cafe_grpc.CoffeeMachineStub(channel)
...
request = cafe_pb2.LatteOrder(
size=cafe_pb2.SMALL,
temperature=70,
sugar=3,
)
reply: empty_pb2.Empty = await client.MakeLatte(request)
...
channel.close()
"""
_protocol = None
def __init__(self, host=None, port=None, *, loop, path=None, codec=None,
ssl=None):
"""Initialize connection to the server
:param host: server host name.
:param port: server port number.
:param path: server socket path. If specified, host and port should be
omitted (must be None).
:param ssl: ``True`` or :py:class:`~python:ssl.SSLContext` object; if
``True``, default SSL context is used.
"""
if path is not None and (host is not None or port is not None):
raise ValueError("The 'path' parameter can not be used with the "
"'host' or 'port' parameters.")
else:
if host is None:
host = '127.0.0.1'
if port is None:
port = 50051
self._host = host
self._port = port
self._loop = loop
self._path = path
self._codec = codec or ProtoCodec()
self._config = H2Configuration(client_side=True,
header_encoding='ascii')
self._authority = '{}:{}'.format(self._host, self._port)
if ssl is True:
ssl = self._get_default_ssl_context()
self._ssl = ssl or None
self._scheme = 'https' if self._ssl else 'http'
self._connect_lock = asyncio.Lock(loop=self._loop)
self.__dispatch__ = _DispatchChannelEvents()
def __repr__(self):
return ('Channel({!r}, {!r}, ..., path={!r})'
.format(self._host, self._port, self._path))
def _protocol_factory(self):
return H2Protocol(Handler(), self._config, loop=self._loop)
async def _create_connection(self):
if self._path is not None:
_, protocol = await self._loop.create_unix_connection(
self._protocol_factory, self._path, ssl=self._ssl)
else:
_, protocol = await self._loop.create_connection(
self._protocol_factory, self._host, self._port,
ssl=self._ssl)
return protocol
@property
def _connected(self):
return (self._protocol is not None
and not self._protocol.handler.connection_lost)
async def __connect__(self):
if not self._connected:
async with self._connect_lock:
if not self._connected:
self._protocol = await self._create_connection()
return self._protocol
# https://python-hyper.org/projects/h2/en/stable/negotiating-http2.html
def _get_default_ssl_context(self):
if not ssl:
raise RuntimeError('SSL is not supported.')
ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
ctx.options |= (ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1)
ctx.set_ciphers('ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20')
ctx.set_alpn_protocols(['h2'])
try:
ctx.set_npn_protocols(['h2'])
except NotImplementedError:
pass
return ctx
def request(self, name, cardinality, request_type, reply_type,
*, timeout=None, deadline=None, metadata=None):
if timeout is not None and deadline is None:
deadline = Deadline.from_timeout(timeout)
elif timeout is not None and deadline is not None:
deadline = min(Deadline.from_timeout(timeout), deadline)
metadata = MultiDict(metadata or ())
return Stream(self, name, metadata, cardinality,
request_type, reply_type, codec=self._codec,
dispatch=self.__dispatch__, deadline=deadline)
def __del__(self):
if self._protocol is not None:
message = 'Unclosed connection: {!r}'.format(self)
warnings.warn(message, ResourceWarning)
if self._loop.is_closed():
return
else:
self.close()
self._loop.call_exception_handler({'message': message})
|
vmagamedov/grpclib | grpclib/client.py | ServiceMethod.open | python | def open(self, *, timeout=None, metadata=None) -> Stream:
return self.channel.request(self.name, self._cardinality,
self.request_type, self.reply_type,
timeout=timeout, metadata=metadata) | Creates and returns :py:class:`Stream` object to perform request
to the server.
Nothing will happen to the current underlying HTTP/2 connection during
this method call. It just initializes :py:class:`Stream` object for you.
Actual request will be sent only during :py:meth:`Stream.send_request`
or :py:meth:`Stream.send_message` coroutine call.
:param float timeout: request timeout (seconds)
:param metadata: custom request metadata, dict or list of pairs
:return: :py:class:`Stream` object | train | https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/client.py#L589-L604 | null | class ServiceMethod:
"""
Base class for all gRPC method types
"""
def __init__(self, channel: Channel, name, request_type, reply_type):
self.channel = channel
self.name = name
self.request_type = request_type
self.reply_type = reply_type
def __init_subclass__(cls, cardinality, **kwargs):
super().__init_subclass__(**kwargs)
cls._cardinality = cardinality
|
LuminosoInsight/wordfreq | wordfreq/tokens.py | simple_tokenize | python | def simple_tokenize(text, include_punctuation=False):
text = unicodedata.normalize('NFC', text)
if include_punctuation:
return [
token.casefold()
for token in TOKEN_RE_WITH_PUNCTUATION.findall(text)
]
else:
return [
token.strip("'").casefold()
for token in TOKEN_RE.findall(text)
] | Tokenize the given text using a straightforward, Unicode-aware token
expression.
The expression mostly implements the rules of Unicode Annex #29 that
are contained in the `regex` module's word boundary matching, including
the refinement that splits words between apostrophes and vowels in order
to separate tokens such as the French article «l'».
It makes sure not to split in the middle of a grapheme, so that zero-width
joiners and marks on Devanagari words work correctly.
Our customizations to the expression are:
- It leaves sequences of Chinese or Japanese characters (specifically, Han
ideograms and hiragana) relatively untokenized, instead of splitting each
character into its own token.
- If `include_punctuation` is False (the default), it outputs only the
tokens that start with a word-like character, or miscellaneous symbols
such as emoji. If `include_punctuation` is True, it outputs all non-space
tokens.
- It keeps Southeast Asian scripts, such as Thai, glued together. This yields
tokens that are much too long, but the alternative is that every grapheme
would end up in its own token, which is worse. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/tokens.py#L148-L186 | null | import regex
import unicodedata
import logging
import langcodes
from .language_info import get_language_info, SPACELESS_SCRIPTS, EXTRA_JAPANESE_CHARACTERS
from .preprocess import preprocess_text, smash_numbers
# Placeholders for CJK functions that we'll import on demand
_mecab_tokenize = None
_jieba_tokenize = None
_simplify_chinese = None
_WARNED_LANGUAGES = set()
logger = logging.getLogger(__name__)
def _make_spaceless_expr():
scripts = sorted(SPACELESS_SCRIPTS)
pieces = [r'\p{IsIdeo}'] + [r'\p{Script=%s}' % script_code for script_code in scripts]
return ''.join(pieces) + EXTRA_JAPANESE_CHARACTERS
SPACELESS_EXPR = _make_spaceless_expr()
TOKEN_RE = regex.compile(r"""
# Case 1: a special case for non-spaced languages
# -----------------------------------------------
# Some scripts are written without spaces, and the Unicode algorithm
# seems to overreact and insert word breaks between all their letters.
# When we see sequences of characters in these scripts, we make sure not
# to break them up. Such scripts include Han ideographs (\p{IsIdeo}),
# hiragana (\p{Script=Hiragana}), and many Southeast Asian scripts such
# as Thai and Khmer.
#
# Without this case, the standard rule (case 2) would make each character
# a separate token. This would be the correct behavior for word-wrapping,
# but a messy failure mode for NLP tokenization.
#
# If you have Chinese or Japanese text, it's certainly better to use a
# tokenizer that's designed for it. Elsewhere in this file, we have
# specific tokenizers that can handle Chinese and Japanese. With this
# rule, though, at least this general tokenizer will fail less badly
# on those languages.
#
# This rule is listed first so that it takes precedence. The placeholder
# <SPACELESS> will be replaced by the complex range expression made by
# _make_spaceless_expr().
[<SPACELESS>]+
|
# Case 2: Gender-neutral "@s"
# ---------------------------
#
# "@" and "@s" are gender-neutral word endings that can replace -a, -o,
# -as, and -os in Spanish, Portuguese, and occasionally Italian.
#
# This doesn't really conflict with other uses of the @ sign, so we simply
# recognize these endings as being part of the token in any language.
#
# We will recognize the endings as part of our main rule for recognizing
# words, which is Case 3 below. However, one case that remains separate is
# the Portuguese word "@s" itself, standing for the article "as" or "os".
# This must be followed by a word break (\b).
@s \b
|
# Case 3: Unicode segmentation with tweaks
# ----------------------------------------
# The start of the token must be 'word-like', not punctuation or whitespace
# or various other things. However, we allow characters of category So
# (Symbol - Other) because many of these are emoji, which can convey
# meaning.
(?=[\w\p{So}])
# The start of the token must not be a letter followed by «'h». If it is,
# we should use Case 3 to match up to the apostrophe, then match a new token
# starting with «h». This rule lets us break «l'heure» into two tokens, just
# like we would do for «l'arc».
(?!\w'[Hh])
# The entire token is made of graphemes (\X). Matching by graphemes means
# that we don't have to specially account for marks or ZWJ sequences. We use
# a non-greedy match so that we can control where the match ends in the
# following expression.
#
# If we were matching by codepoints (.) instead of graphemes (\X), then
# detecting boundaries would be more difficult. Here's a fact that's subtle
# and poorly documented: a position that's between codepoints, but in the
# middle of a grapheme, does not match as a word break (\b), but also does
# not match as not-a-word-break (\B). The word boundary algorithm simply
# doesn't apply in such a position.
\X+?
# The token ends when it encounters a word break (\b). We use the
# non-greedy match (+?) to make sure to end at the first word break we
# encounter.
#
# We need a special case for gender-neutral "@", which is acting as a
# letter, but Unicode considers it to be a symbol and would break words
# around it. We prefer continuing the token with "@" or "@s" over matching
# a word break.
#
# As in case 2, this is only allowed at the end of the word. Unfortunately,
# we can't use the word-break expression \b in this case, because "@"
# already is a word break according to Unicode. Instead, we use a negative
# lookahead assertion to ensure that the next character is not word-like.
(?:
@s? (?!\w) | \b
)
|
# Another subtle fact: the "non-breaking space" U+A0 counts as a word break
# here. That's surprising, but it's also what we want, because we don't want
# any kind of spaces in the middle of our tokens.
# Case 4: Fix French
# ------------------
# This allows us to match the articles in French, Catalan, and related
# languages, such as «l'», that we may have excluded from being part of
# the token in Case 2.
\w'
""".replace('<SPACELESS>', SPACELESS_EXPR), regex.V1 | regex.WORD | regex.VERBOSE)
TOKEN_RE_WITH_PUNCTUATION = regex.compile(r"""
# This expression is similar to the expression above. It adds a case between
# 2 and 3 that matches any sequence of punctuation characters.
[<SPACELESS>]+ | # Case 1
@s \b | # Case 2
[\p{punct}]+ | # punctuation
(?=[\w\p{So}]) (?!\w'[Hh]) \X+? (?: @s? (?!w) | \b) | # Case 3
\w' # Case 4
""".replace('<SPACELESS>', SPACELESS_EXPR), regex.V1 | regex.WORD | regex.VERBOSE)
# Just identify punctuation, for cases where the tokenizer is separate
PUNCT_RE = regex.compile(r"[\p{punct}]+")
def tokenize(text, lang, include_punctuation=False, external_wordlist=False):
"""
Tokenize this text in a way that's relatively simple but appropriate for
the language. Strings that are looked up in wordfreq will be run through
this function first, so that they can be expected to match the data.
The text will be run through a number of pre-processing steps that vary
by language; see the docstring of `wordfreq.preprocess.preprocess_text`.
If `include_punctuation` is True, punctuation will be included as separate
tokens. Otherwise, punctuation will be omitted in the output.
CJK scripts
-----------
In the CJK languages, word boundaries can't usually be identified by a
regular expression. Instead, there needs to be some language-specific
handling. In Chinese, we use the Jieba tokenizer, with a custom word list
to match the words whose frequencies we can look up. In Japanese and
Korean, we use the MeCab tokenizer.
The `external_wordlist` option only affects Chinese tokenization. If it's
True, then wordfreq will not use its own Chinese wordlist for tokenization.
Instead, it will use the large wordlist packaged with the Jieba tokenizer,
and it will leave Traditional Chinese characters as is. This will probably
give more accurate tokenization, but the resulting tokens won't necessarily
have word frequencies that can be looked up.
If you end up seeing tokens that are entire phrases or sentences glued
together, that probably means you passed in CJK text with the wrong
language code.
"""
# Use globals to load CJK tokenizers on demand, so that we can still run
# in environments that lack the CJK dependencies
global _mecab_tokenize, _jieba_tokenize
language = langcodes.get(lang)
info = get_language_info(language)
text = preprocess_text(text, language)
if info['tokenizer'] == 'mecab':
from wordfreq.mecab import mecab_tokenize as _mecab_tokenize
# Get just the language code out of the Language object, so we can
# use it to select a MeCab dictionary
tokens = _mecab_tokenize(text, language.language)
if not include_punctuation:
tokens = [token for token in tokens if not PUNCT_RE.match(token)]
elif info['tokenizer'] == 'jieba':
from wordfreq.chinese import jieba_tokenize as _jieba_tokenize
tokens = _jieba_tokenize(text, external_wordlist=external_wordlist)
if not include_punctuation:
tokens = [token for token in tokens if not PUNCT_RE.match(token)]
else:
# This is the default case where we use the regex tokenizer. First
# let's complain a bit if we ended up here because we don't have an
# appropriate tokenizer.
if info['tokenizer'] != 'regex' and lang not in _WARNED_LANGUAGES:
logger.warning(
"The language '{}' is in the '{}' script, which we don't "
"have a tokenizer for. The results will be bad."
.format(lang, info['script'])
)
_WARNED_LANGUAGES.add(lang)
tokens = simple_tokenize(text, include_punctuation=include_punctuation)
return tokens
def lossy_tokenize(text, lang, include_punctuation=False, external_wordlist=False):
"""
Get a list of tokens for this text, with largely the same results and
options as `tokenize`, but aggressively normalize some text in a lossy way
that's good for counting word frequencies.
In particular:
- Any sequence of 2 or more adjacent digits, possibly with intervening
punctuation such as a decimal point, will replace each digit with '0'
so that frequencies for numbers don't have to be counted separately.
This is similar to but not quite identical to the word2vec Google News
data, which replaces digits with '#' in tokens with more than one digit.
- In Chinese, unless Traditional Chinese is specifically requested using
'zh-Hant', all characters will be converted to Simplified Chinese.
"""
global _simplify_chinese
info = get_language_info(lang)
tokens = tokenize(text, lang, include_punctuation, external_wordlist)
if info['lookup_transliteration'] == 'zh-Hans':
from wordfreq.chinese import simplify_chinese as _simplify_chinese
tokens = [_simplify_chinese(token) for token in tokens]
return [smash_numbers(token) for token in tokens]
|
LuminosoInsight/wordfreq | wordfreq/tokens.py | tokenize | python | def tokenize(text, lang, include_punctuation=False, external_wordlist=False):
# Use globals to load CJK tokenizers on demand, so that we can still run
# in environments that lack the CJK dependencies
global _mecab_tokenize, _jieba_tokenize
language = langcodes.get(lang)
info = get_language_info(language)
text = preprocess_text(text, language)
if info['tokenizer'] == 'mecab':
from wordfreq.mecab import mecab_tokenize as _mecab_tokenize
# Get just the language code out of the Language object, so we can
# use it to select a MeCab dictionary
tokens = _mecab_tokenize(text, language.language)
if not include_punctuation:
tokens = [token for token in tokens if not PUNCT_RE.match(token)]
elif info['tokenizer'] == 'jieba':
from wordfreq.chinese import jieba_tokenize as _jieba_tokenize
tokens = _jieba_tokenize(text, external_wordlist=external_wordlist)
if not include_punctuation:
tokens = [token for token in tokens if not PUNCT_RE.match(token)]
else:
# This is the default case where we use the regex tokenizer. First
# let's complain a bit if we ended up here because we don't have an
# appropriate tokenizer.
if info['tokenizer'] != 'regex' and lang not in _WARNED_LANGUAGES:
logger.warning(
"The language '{}' is in the '{}' script, which we don't "
"have a tokenizer for. The results will be bad."
.format(lang, info['script'])
)
_WARNED_LANGUAGES.add(lang)
tokens = simple_tokenize(text, include_punctuation=include_punctuation)
return tokens | Tokenize this text in a way that's relatively simple but appropriate for
the language. Strings that are looked up in wordfreq will be run through
this function first, so that they can be expected to match the data.
The text will be run through a number of pre-processing steps that vary
by language; see the docstring of `wordfreq.preprocess.preprocess_text`.
If `include_punctuation` is True, punctuation will be included as separate
tokens. Otherwise, punctuation will be omitted in the output.
CJK scripts
-----------
In the CJK languages, word boundaries can't usually be identified by a
regular expression. Instead, there needs to be some language-specific
handling. In Chinese, we use the Jieba tokenizer, with a custom word list
to match the words whose frequencies we can look up. In Japanese and
Korean, we use the MeCab tokenizer.
The `external_wordlist` option only affects Chinese tokenization. If it's
True, then wordfreq will not use its own Chinese wordlist for tokenization.
Instead, it will use the large wordlist packaged with the Jieba tokenizer,
and it will leave Traditional Chinese characters as is. This will probably
give more accurate tokenization, but the resulting tokens won't necessarily
have word frequencies that can be looked up.
If you end up seeing tokens that are entire phrases or sentences glued
together, that probably means you passed in CJK text with the wrong
language code. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/tokens.py#L189-L254 | [
"def simple_tokenize(text, include_punctuation=False):\n \"\"\"\n Tokenize the given text using a straightforward, Unicode-aware token\n expression.\n\n The expression mostly implements the rules of Unicode Annex #29 that\n are contained in the `regex` module's word boundary matching, including\n the refinement that splits words between apostrophes and vowels in order\n to separate tokens such as the French article «l'».\n\n It makes sure not to split in the middle of a grapheme, so that zero-width\n joiners and marks on Devanagari words work correctly.\n\n Our customizations to the expression are:\n\n - It leaves sequences of Chinese or Japanese characters (specifically, Han\n ideograms and hiragana) relatively untokenized, instead of splitting each\n character into its own token.\n\n - If `include_punctuation` is False (the default), it outputs only the\n tokens that start with a word-like character, or miscellaneous symbols\n such as emoji. If `include_punctuation` is True, it outputs all non-space\n tokens.\n\n - It keeps Southeast Asian scripts, such as Thai, glued together. This yields\n tokens that are much too long, but the alternative is that every grapheme\n would end up in its own token, which is worse.\n \"\"\"\n text = unicodedata.normalize('NFC', text)\n if include_punctuation:\n return [\n token.casefold()\n for token in TOKEN_RE_WITH_PUNCTUATION.findall(text)\n ]\n else:\n return [\n token.strip(\"'\").casefold()\n for token in TOKEN_RE.findall(text)\n ]\n",
"def jieba_tokenize(text, external_wordlist=False):\n \"\"\"\n Tokenize the given text into tokens whose word frequencies can probably\n be looked up. This uses Jieba, a word-frequency-based tokenizer.\n\n If `external_wordlist` is False, we tell Jieba to default to using\n wordfreq's own Chinese wordlist, and not to infer unknown words using a\n hidden Markov model. This ensures that the multi-character tokens that it\n outputs will be ones whose word frequencies we can look up.\n\n If `external_wordlist` is True, this will use the largest version of\n Jieba's original dictionary, with HMM enabled, so its results will be\n independent of the data in wordfreq. These results will be better optimized\n for purposes that aren't looking up word frequencies, such as general-\n purpose tokenization, or collecting word frequencies in the first place.\n \"\"\"\n global jieba_tokenizer, jieba_orig_tokenizer\n if external_wordlist:\n if jieba_orig_tokenizer is None:\n jieba_orig_tokenizer = jieba.Tokenizer(dictionary=ORIG_DICT_FILENAME)\n return jieba_orig_tokenizer.lcut(text)\n else:\n if jieba_tokenizer is None:\n jieba_tokenizer = jieba.Tokenizer(dictionary=DICT_FILENAME)\n\n # Tokenize the Simplified Chinese version of the text, but return\n # those spans from the original text, even if it's in Traditional\n # Chinese\n tokens = []\n for _token, start, end in jieba_tokenizer.tokenize(simplify_chinese(text), HMM=False):\n tokens.append(text[start:end])\n return tokens\n",
"def mecab_tokenize(text, lang):\n \"\"\"\n Use the mecab-python3 package to tokenize the given text. The `lang`\n must be 'ja' for Japanese or 'ko' for Korean.\n\n The simplest output from mecab-python3 is the single-string form, which\n contains the same table that the command-line version of MeCab would output.\n We find the tokens in the first column of this table.\n \"\"\"\n if lang not in MECAB_DICTIONARY_NAMES:\n raise ValueError(\"Can't run MeCab on language %r\" % lang)\n if lang not in MECAB_ANALYZERS:\n MECAB_ANALYZERS[lang] = make_mecab_analyzer(MECAB_DICTIONARY_NAMES[lang])\n\n analyzer = MECAB_ANALYZERS[lang]\n text = unicodedata.normalize('NFKC', text.strip())\n analyzed = analyzer.parse(text)\n if not analyzed:\n return []\n return [line.split('\\t')[0]\n for line in analyzed.split('\\n')\n if line != '' and line != 'EOS']\n",
"def preprocess_text(text, language):\n \"\"\"\n This function applies pre-processing steps that convert forms of words\n considered equivalent into one standardized form.\n\n As one straightforward step, it case-folds the text. For the purposes of\n wordfreq and related tools, a capitalized word shouldn't have a different\n frequency from its lowercase version.\n\n The steps that are applied in order, only some of which apply to each\n language, are:\n\n - NFC or NFKC normalization, as needed for the language\n - Transliteration of multi-script languages\n - Abjad mark removal\n - Case folding\n - Fixing of diacritics\n\n We'll describe these steps out of order, to start with the more obvious\n steps.\n\n\n Case folding\n ------------\n\n The most common effect of this function is that it case-folds alphabetic\n text to lowercase:\n\n >>> preprocess_text('Word', 'en')\n 'word'\n\n This is proper Unicode-aware case-folding, so it eliminates distinctions\n in lowercase letters that would not appear in uppercase. This accounts for\n the German ß and the Greek final sigma:\n\n >>> preprocess_text('groß', 'de')\n 'gross'\n >>> preprocess_text('λέξις', 'el')\n 'λέξισ'\n\n In Turkish (and Azerbaijani), case-folding is different, because the\n uppercase and lowercase I come in two variants, one with a dot and one\n without. They are matched in a way that preserves the number of dots, which\n the usual pair of \"I\" and \"i\" do not.\n\n >>> preprocess_text('HAKKINDA İSTANBUL', 'tr')\n 'hakkında istanbul'\n\n\n Fixing of diacritics\n --------------------\n\n While we're talking about Turkish: the Turkish alphabet contains letters\n with cedillas attached to the bottom. In the case of \"ş\" and \"ţ\", these\n letters are very similar to two Romanian letters, \"ș\" and \"ț\", which have\n separate _commas_ below them.\n\n (Did you know that a cedilla is not the same as a comma under a letter? I\n didn't until I started dealing with text normalization. My keyboard layout\n even inputs a letter with a cedilla when you hit Compose+comma.)\n\n Because these letters look so similar, and because some fonts only include\n one pair of letters and not the other, there are many cases where the\n letters are confused with each other. Our preprocessing normalizes these\n Turkish and Romanian letters to the letters each language prefers.\n\n >>> preprocess_text('kișinin', 'tr') # comma to cedilla\n 'kişinin'\n >>> preprocess_text('ACELAŞI', 'ro') # cedilla to comma\n 'același'\n\n\n Unicode normalization\n ---------------------\n\n Unicode text is NFC normalized in most languages, removing trivial\n distinctions between strings that should be considered equivalent in all\n cases:\n\n >>> word = preprocess_text('natu\\N{COMBINING DIAERESIS}rlich', 'de')\n >>> word\n 'natürlich'\n >>> '\\N{LATIN SMALL LETTER U WITH DIAERESIS}' in word\n True\n\n NFC normalization is sufficient (and NFKC normalization is a bit too strong)\n for many languages that are written in cased, alphabetic scripts.\n Languages in other scripts tend to need stronger normalization to properly\n compare text. So we use NFC normalization when the language's script is\n Latin, Greek, or Cyrillic, and we use NFKC normalization for all other\n languages.\n\n Here's an example in Japanese, where preprocessing changes the width (and\n the case) of a Latin letter that's used as part of a word:\n\n >>> preprocess_text('Uターン', 'ja')\n 'uターン'\n\n In Korean, NFKC normalization is important because it aligns two different\n ways of encoding text -- as individual letters that are grouped together\n into square characters, or as the entire syllables that those characters\n represent:\n\n >>> word = '\\u1102\\u1161\\u11c0\\u1106\\u1161\\u11af'\n >>> word\n '낱말'\n >>> len(word)\n 6\n >>> word = preprocess_text(word, 'ko')\n >>> word\n '낱말'\n >>> len(word)\n 2\n\n\n Abjad mark removal\n ------------------\n\n There are many abjad languages, such as Arabic, Hebrew, Persian, and Urdu,\n where words can be marked with vowel points but rarely are. In languages\n that use abjad scripts, we remove all modifiers that are classified by\n Unicode as \"marks\". We also remove an Arabic character called the tatweel,\n which is used to visually lengthen a word.\n\n >>> preprocess_text(\"كَلِمَة\", 'ar')\n 'كلمة'\n >>> preprocess_text(\"الحمــــــد\", 'ar')\n 'الحمد'\n\n Transliteration of multi-script languages\n -----------------------------------------\n\n Some languages are written in multiple scripts, and require special care.\n These languages include Chinese, Serbian, and Azerbaijani.\n\n In Serbian, there is a well-established mapping from Cyrillic letters to\n Latin letters. We apply this mapping so that Serbian is always represented\n in Latin letters.\n\n >>> preprocess_text('схваташ', 'sr')\n 'shvataš'\n\n The transliteration is more complete than it needs to be to cover just\n Serbian, so that -- for example -- borrowings from Russian can be\n transliterated, instead of coming out in a mixed script.\n\n >>> preprocess_text('культуры', 'sr')\n \"kul'tury\"\n\n Azerbaijani (Azeri) has a similar transliteration step to Serbian,\n and then the Latin-alphabet text is handled similarly to Turkish.\n\n >>> preprocess_text('бағырты', 'az')\n 'bağırtı'\n\n We don't transliterate Traditional to Simplified Chinese in this step.\n There are some steps where we unify them internally: see chinese.py\n for more information.\n \"\"\"\n # NFC or NFKC normalization, as needed for the language\n info = get_language_info(language)\n text = unicodedata.normalize(info['normal_form'], text)\n\n # Transliteration of multi-script languages\n if info['transliteration'] is not None:\n text = transliterate(info['transliteration'], text)\n\n # Abjad mark removal\n if info['remove_marks']:\n text = remove_marks(text)\n\n # Case folding\n if info['dotless_i']:\n text = casefold_with_i_dots(text)\n else:\n text = text.casefold()\n\n # Fixing of diacritics\n if info['diacritics_under'] == 'commas':\n text = cedillas_to_commas(text)\n elif info['diacritics_under'] == 'cedillas':\n text = commas_to_cedillas(text)\n\n return text\n"
] | import regex
import unicodedata
import logging
import langcodes
from .language_info import get_language_info, SPACELESS_SCRIPTS, EXTRA_JAPANESE_CHARACTERS
from .preprocess import preprocess_text, smash_numbers
# Placeholders for CJK functions that we'll import on demand
_mecab_tokenize = None
_jieba_tokenize = None
_simplify_chinese = None
_WARNED_LANGUAGES = set()
logger = logging.getLogger(__name__)
def _make_spaceless_expr():
scripts = sorted(SPACELESS_SCRIPTS)
pieces = [r'\p{IsIdeo}'] + [r'\p{Script=%s}' % script_code for script_code in scripts]
return ''.join(pieces) + EXTRA_JAPANESE_CHARACTERS
SPACELESS_EXPR = _make_spaceless_expr()
TOKEN_RE = regex.compile(r"""
# Case 1: a special case for non-spaced languages
# -----------------------------------------------
# Some scripts are written without spaces, and the Unicode algorithm
# seems to overreact and insert word breaks between all their letters.
# When we see sequences of characters in these scripts, we make sure not
# to break them up. Such scripts include Han ideographs (\p{IsIdeo}),
# hiragana (\p{Script=Hiragana}), and many Southeast Asian scripts such
# as Thai and Khmer.
#
# Without this case, the standard rule (case 2) would make each character
# a separate token. This would be the correct behavior for word-wrapping,
# but a messy failure mode for NLP tokenization.
#
# If you have Chinese or Japanese text, it's certainly better to use a
# tokenizer that's designed for it. Elsewhere in this file, we have
# specific tokenizers that can handle Chinese and Japanese. With this
# rule, though, at least this general tokenizer will fail less badly
# on those languages.
#
# This rule is listed first so that it takes precedence. The placeholder
# <SPACELESS> will be replaced by the complex range expression made by
# _make_spaceless_expr().
[<SPACELESS>]+
|
# Case 2: Gender-neutral "@s"
# ---------------------------
#
# "@" and "@s" are gender-neutral word endings that can replace -a, -o,
# -as, and -os in Spanish, Portuguese, and occasionally Italian.
#
# This doesn't really conflict with other uses of the @ sign, so we simply
# recognize these endings as being part of the token in any language.
#
# We will recognize the endings as part of our main rule for recognizing
# words, which is Case 3 below. However, one case that remains separate is
# the Portuguese word "@s" itself, standing for the article "as" or "os".
# This must be followed by a word break (\b).
@s \b
|
# Case 3: Unicode segmentation with tweaks
# ----------------------------------------
# The start of the token must be 'word-like', not punctuation or whitespace
# or various other things. However, we allow characters of category So
# (Symbol - Other) because many of these are emoji, which can convey
# meaning.
(?=[\w\p{So}])
# The start of the token must not be a letter followed by «'h». If it is,
# we should use Case 3 to match up to the apostrophe, then match a new token
# starting with «h». This rule lets us break «l'heure» into two tokens, just
# like we would do for «l'arc».
(?!\w'[Hh])
# The entire token is made of graphemes (\X). Matching by graphemes means
# that we don't have to specially account for marks or ZWJ sequences. We use
# a non-greedy match so that we can control where the match ends in the
# following expression.
#
# If we were matching by codepoints (.) instead of graphemes (\X), then
# detecting boundaries would be more difficult. Here's a fact that's subtle
# and poorly documented: a position that's between codepoints, but in the
# middle of a grapheme, does not match as a word break (\b), but also does
# not match as not-a-word-break (\B). The word boundary algorithm simply
# doesn't apply in such a position.
\X+?
# The token ends when it encounters a word break (\b). We use the
# non-greedy match (+?) to make sure to end at the first word break we
# encounter.
#
# We need a special case for gender-neutral "@", which is acting as a
# letter, but Unicode considers it to be a symbol and would break words
# around it. We prefer continuing the token with "@" or "@s" over matching
# a word break.
#
# As in case 2, this is only allowed at the end of the word. Unfortunately,
# we can't use the word-break expression \b in this case, because "@"
# already is a word break according to Unicode. Instead, we use a negative
# lookahead assertion to ensure that the next character is not word-like.
(?:
@s? (?!\w) | \b
)
|
# Another subtle fact: the "non-breaking space" U+A0 counts as a word break
# here. That's surprising, but it's also what we want, because we don't want
# any kind of spaces in the middle of our tokens.
# Case 4: Fix French
# ------------------
# This allows us to match the articles in French, Catalan, and related
# languages, such as «l'», that we may have excluded from being part of
# the token in Case 2.
\w'
""".replace('<SPACELESS>', SPACELESS_EXPR), regex.V1 | regex.WORD | regex.VERBOSE)
TOKEN_RE_WITH_PUNCTUATION = regex.compile(r"""
# This expression is similar to the expression above. It adds a case between
# 2 and 3 that matches any sequence of punctuation characters.
[<SPACELESS>]+ | # Case 1
@s \b | # Case 2
[\p{punct}]+ | # punctuation
(?=[\w\p{So}]) (?!\w'[Hh]) \X+? (?: @s? (?!w) | \b) | # Case 3
\w' # Case 4
""".replace('<SPACELESS>', SPACELESS_EXPR), regex.V1 | regex.WORD | regex.VERBOSE)
# Just identify punctuation, for cases where the tokenizer is separate
PUNCT_RE = regex.compile(r"[\p{punct}]+")
def simple_tokenize(text, include_punctuation=False):
"""
Tokenize the given text using a straightforward, Unicode-aware token
expression.
The expression mostly implements the rules of Unicode Annex #29 that
are contained in the `regex` module's word boundary matching, including
the refinement that splits words between apostrophes and vowels in order
to separate tokens such as the French article «l'».
It makes sure not to split in the middle of a grapheme, so that zero-width
joiners and marks on Devanagari words work correctly.
Our customizations to the expression are:
- It leaves sequences of Chinese or Japanese characters (specifically, Han
ideograms and hiragana) relatively untokenized, instead of splitting each
character into its own token.
- If `include_punctuation` is False (the default), it outputs only the
tokens that start with a word-like character, or miscellaneous symbols
such as emoji. If `include_punctuation` is True, it outputs all non-space
tokens.
- It keeps Southeast Asian scripts, such as Thai, glued together. This yields
tokens that are much too long, but the alternative is that every grapheme
would end up in its own token, which is worse.
"""
text = unicodedata.normalize('NFC', text)
if include_punctuation:
return [
token.casefold()
for token in TOKEN_RE_WITH_PUNCTUATION.findall(text)
]
else:
return [
token.strip("'").casefold()
for token in TOKEN_RE.findall(text)
]
def lossy_tokenize(text, lang, include_punctuation=False, external_wordlist=False):
"""
Get a list of tokens for this text, with largely the same results and
options as `tokenize`, but aggressively normalize some text in a lossy way
that's good for counting word frequencies.
In particular:
- Any sequence of 2 or more adjacent digits, possibly with intervening
punctuation such as a decimal point, will replace each digit with '0'
so that frequencies for numbers don't have to be counted separately.
This is similar to but not quite identical to the word2vec Google News
data, which replaces digits with '#' in tokens with more than one digit.
- In Chinese, unless Traditional Chinese is specifically requested using
'zh-Hant', all characters will be converted to Simplified Chinese.
"""
global _simplify_chinese
info = get_language_info(lang)
tokens = tokenize(text, lang, include_punctuation, external_wordlist)
if info['lookup_transliteration'] == 'zh-Hans':
from wordfreq.chinese import simplify_chinese as _simplify_chinese
tokens = [_simplify_chinese(token) for token in tokens]
return [smash_numbers(token) for token in tokens]
|
LuminosoInsight/wordfreq | wordfreq/tokens.py | lossy_tokenize | python | def lossy_tokenize(text, lang, include_punctuation=False, external_wordlist=False):
global _simplify_chinese
info = get_language_info(lang)
tokens = tokenize(text, lang, include_punctuation, external_wordlist)
if info['lookup_transliteration'] == 'zh-Hans':
from wordfreq.chinese import simplify_chinese as _simplify_chinese
tokens = [_simplify_chinese(token) for token in tokens]
return [smash_numbers(token) for token in tokens] | Get a list of tokens for this text, with largely the same results and
options as `tokenize`, but aggressively normalize some text in a lossy way
that's good for counting word frequencies.
In particular:
- Any sequence of 2 or more adjacent digits, possibly with intervening
punctuation such as a decimal point, will replace each digit with '0'
so that frequencies for numbers don't have to be counted separately.
This is similar to but not quite identical to the word2vec Google News
data, which replaces digits with '#' in tokens with more than one digit.
- In Chinese, unless Traditional Chinese is specifically requested using
'zh-Hant', all characters will be converted to Simplified Chinese. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/tokens.py#L257-L284 | [
"def tokenize(text, lang, include_punctuation=False, external_wordlist=False):\n \"\"\"\n Tokenize this text in a way that's relatively simple but appropriate for\n the language. Strings that are looked up in wordfreq will be run through\n this function first, so that they can be expected to match the data.\n\n The text will be run through a number of pre-processing steps that vary\n by language; see the docstring of `wordfreq.preprocess.preprocess_text`.\n\n If `include_punctuation` is True, punctuation will be included as separate\n tokens. Otherwise, punctuation will be omitted in the output.\n\n CJK scripts\n -----------\n\n In the CJK languages, word boundaries can't usually be identified by a\n regular expression. Instead, there needs to be some language-specific\n handling. In Chinese, we use the Jieba tokenizer, with a custom word list\n to match the words whose frequencies we can look up. In Japanese and\n Korean, we use the MeCab tokenizer.\n\n The `external_wordlist` option only affects Chinese tokenization. If it's\n True, then wordfreq will not use its own Chinese wordlist for tokenization.\n Instead, it will use the large wordlist packaged with the Jieba tokenizer,\n and it will leave Traditional Chinese characters as is. This will probably\n give more accurate tokenization, but the resulting tokens won't necessarily\n have word frequencies that can be looked up.\n\n If you end up seeing tokens that are entire phrases or sentences glued\n together, that probably means you passed in CJK text with the wrong\n language code.\n \"\"\"\n # Use globals to load CJK tokenizers on demand, so that we can still run\n # in environments that lack the CJK dependencies\n global _mecab_tokenize, _jieba_tokenize\n\n language = langcodes.get(lang)\n info = get_language_info(language)\n text = preprocess_text(text, language)\n\n if info['tokenizer'] == 'mecab':\n from wordfreq.mecab import mecab_tokenize as _mecab_tokenize\n # Get just the language code out of the Language object, so we can\n # use it to select a MeCab dictionary\n tokens = _mecab_tokenize(text, language.language)\n if not include_punctuation:\n tokens = [token for token in tokens if not PUNCT_RE.match(token)]\n elif info['tokenizer'] == 'jieba':\n from wordfreq.chinese import jieba_tokenize as _jieba_tokenize\n tokens = _jieba_tokenize(text, external_wordlist=external_wordlist)\n if not include_punctuation:\n tokens = [token for token in tokens if not PUNCT_RE.match(token)]\n else:\n # This is the default case where we use the regex tokenizer. First\n # let's complain a bit if we ended up here because we don't have an\n # appropriate tokenizer.\n if info['tokenizer'] != 'regex' and lang not in _WARNED_LANGUAGES:\n logger.warning(\n \"The language '{}' is in the '{}' script, which we don't \"\n \"have a tokenizer for. The results will be bad.\"\n .format(lang, info['script'])\n )\n _WARNED_LANGUAGES.add(lang)\n tokens = simple_tokenize(text, include_punctuation=include_punctuation)\n\n return tokens\n"
] | import regex
import unicodedata
import logging
import langcodes
from .language_info import get_language_info, SPACELESS_SCRIPTS, EXTRA_JAPANESE_CHARACTERS
from .preprocess import preprocess_text, smash_numbers
# Placeholders for CJK functions that we'll import on demand
_mecab_tokenize = None
_jieba_tokenize = None
_simplify_chinese = None
_WARNED_LANGUAGES = set()
logger = logging.getLogger(__name__)
def _make_spaceless_expr():
scripts = sorted(SPACELESS_SCRIPTS)
pieces = [r'\p{IsIdeo}'] + [r'\p{Script=%s}' % script_code for script_code in scripts]
return ''.join(pieces) + EXTRA_JAPANESE_CHARACTERS
SPACELESS_EXPR = _make_spaceless_expr()
TOKEN_RE = regex.compile(r"""
# Case 1: a special case for non-spaced languages
# -----------------------------------------------
# Some scripts are written without spaces, and the Unicode algorithm
# seems to overreact and insert word breaks between all their letters.
# When we see sequences of characters in these scripts, we make sure not
# to break them up. Such scripts include Han ideographs (\p{IsIdeo}),
# hiragana (\p{Script=Hiragana}), and many Southeast Asian scripts such
# as Thai and Khmer.
#
# Without this case, the standard rule (case 2) would make each character
# a separate token. This would be the correct behavior for word-wrapping,
# but a messy failure mode for NLP tokenization.
#
# If you have Chinese or Japanese text, it's certainly better to use a
# tokenizer that's designed for it. Elsewhere in this file, we have
# specific tokenizers that can handle Chinese and Japanese. With this
# rule, though, at least this general tokenizer will fail less badly
# on those languages.
#
# This rule is listed first so that it takes precedence. The placeholder
# <SPACELESS> will be replaced by the complex range expression made by
# _make_spaceless_expr().
[<SPACELESS>]+
|
# Case 2: Gender-neutral "@s"
# ---------------------------
#
# "@" and "@s" are gender-neutral word endings that can replace -a, -o,
# -as, and -os in Spanish, Portuguese, and occasionally Italian.
#
# This doesn't really conflict with other uses of the @ sign, so we simply
# recognize these endings as being part of the token in any language.
#
# We will recognize the endings as part of our main rule for recognizing
# words, which is Case 3 below. However, one case that remains separate is
# the Portuguese word "@s" itself, standing for the article "as" or "os".
# This must be followed by a word break (\b).
@s \b
|
# Case 3: Unicode segmentation with tweaks
# ----------------------------------------
# The start of the token must be 'word-like', not punctuation or whitespace
# or various other things. However, we allow characters of category So
# (Symbol - Other) because many of these are emoji, which can convey
# meaning.
(?=[\w\p{So}])
# The start of the token must not be a letter followed by «'h». If it is,
# we should use Case 3 to match up to the apostrophe, then match a new token
# starting with «h». This rule lets us break «l'heure» into two tokens, just
# like we would do for «l'arc».
(?!\w'[Hh])
# The entire token is made of graphemes (\X). Matching by graphemes means
# that we don't have to specially account for marks or ZWJ sequences. We use
# a non-greedy match so that we can control where the match ends in the
# following expression.
#
# If we were matching by codepoints (.) instead of graphemes (\X), then
# detecting boundaries would be more difficult. Here's a fact that's subtle
# and poorly documented: a position that's between codepoints, but in the
# middle of a grapheme, does not match as a word break (\b), but also does
# not match as not-a-word-break (\B). The word boundary algorithm simply
# doesn't apply in such a position.
\X+?
# The token ends when it encounters a word break (\b). We use the
# non-greedy match (+?) to make sure to end at the first word break we
# encounter.
#
# We need a special case for gender-neutral "@", which is acting as a
# letter, but Unicode considers it to be a symbol and would break words
# around it. We prefer continuing the token with "@" or "@s" over matching
# a word break.
#
# As in case 2, this is only allowed at the end of the word. Unfortunately,
# we can't use the word-break expression \b in this case, because "@"
# already is a word break according to Unicode. Instead, we use a negative
# lookahead assertion to ensure that the next character is not word-like.
(?:
@s? (?!\w) | \b
)
|
# Another subtle fact: the "non-breaking space" U+A0 counts as a word break
# here. That's surprising, but it's also what we want, because we don't want
# any kind of spaces in the middle of our tokens.
# Case 4: Fix French
# ------------------
# This allows us to match the articles in French, Catalan, and related
# languages, such as «l'», that we may have excluded from being part of
# the token in Case 2.
\w'
""".replace('<SPACELESS>', SPACELESS_EXPR), regex.V1 | regex.WORD | regex.VERBOSE)
TOKEN_RE_WITH_PUNCTUATION = regex.compile(r"""
# This expression is similar to the expression above. It adds a case between
# 2 and 3 that matches any sequence of punctuation characters.
[<SPACELESS>]+ | # Case 1
@s \b | # Case 2
[\p{punct}]+ | # punctuation
(?=[\w\p{So}]) (?!\w'[Hh]) \X+? (?: @s? (?!w) | \b) | # Case 3
\w' # Case 4
""".replace('<SPACELESS>', SPACELESS_EXPR), regex.V1 | regex.WORD | regex.VERBOSE)
# Just identify punctuation, for cases where the tokenizer is separate
PUNCT_RE = regex.compile(r"[\p{punct}]+")
def simple_tokenize(text, include_punctuation=False):
"""
Tokenize the given text using a straightforward, Unicode-aware token
expression.
The expression mostly implements the rules of Unicode Annex #29 that
are contained in the `regex` module's word boundary matching, including
the refinement that splits words between apostrophes and vowels in order
to separate tokens such as the French article «l'».
It makes sure not to split in the middle of a grapheme, so that zero-width
joiners and marks on Devanagari words work correctly.
Our customizations to the expression are:
- It leaves sequences of Chinese or Japanese characters (specifically, Han
ideograms and hiragana) relatively untokenized, instead of splitting each
character into its own token.
- If `include_punctuation` is False (the default), it outputs only the
tokens that start with a word-like character, or miscellaneous symbols
such as emoji. If `include_punctuation` is True, it outputs all non-space
tokens.
- It keeps Southeast Asian scripts, such as Thai, glued together. This yields
tokens that are much too long, but the alternative is that every grapheme
would end up in its own token, which is worse.
"""
text = unicodedata.normalize('NFC', text)
if include_punctuation:
return [
token.casefold()
for token in TOKEN_RE_WITH_PUNCTUATION.findall(text)
]
else:
return [
token.strip("'").casefold()
for token in TOKEN_RE.findall(text)
]
def tokenize(text, lang, include_punctuation=False, external_wordlist=False):
"""
Tokenize this text in a way that's relatively simple but appropriate for
the language. Strings that are looked up in wordfreq will be run through
this function first, so that they can be expected to match the data.
The text will be run through a number of pre-processing steps that vary
by language; see the docstring of `wordfreq.preprocess.preprocess_text`.
If `include_punctuation` is True, punctuation will be included as separate
tokens. Otherwise, punctuation will be omitted in the output.
CJK scripts
-----------
In the CJK languages, word boundaries can't usually be identified by a
regular expression. Instead, there needs to be some language-specific
handling. In Chinese, we use the Jieba tokenizer, with a custom word list
to match the words whose frequencies we can look up. In Japanese and
Korean, we use the MeCab tokenizer.
The `external_wordlist` option only affects Chinese tokenization. If it's
True, then wordfreq will not use its own Chinese wordlist for tokenization.
Instead, it will use the large wordlist packaged with the Jieba tokenizer,
and it will leave Traditional Chinese characters as is. This will probably
give more accurate tokenization, but the resulting tokens won't necessarily
have word frequencies that can be looked up.
If you end up seeing tokens that are entire phrases or sentences glued
together, that probably means you passed in CJK text with the wrong
language code.
"""
# Use globals to load CJK tokenizers on demand, so that we can still run
# in environments that lack the CJK dependencies
global _mecab_tokenize, _jieba_tokenize
language = langcodes.get(lang)
info = get_language_info(language)
text = preprocess_text(text, language)
if info['tokenizer'] == 'mecab':
from wordfreq.mecab import mecab_tokenize as _mecab_tokenize
# Get just the language code out of the Language object, so we can
# use it to select a MeCab dictionary
tokens = _mecab_tokenize(text, language.language)
if not include_punctuation:
tokens = [token for token in tokens if not PUNCT_RE.match(token)]
elif info['tokenizer'] == 'jieba':
from wordfreq.chinese import jieba_tokenize as _jieba_tokenize
tokens = _jieba_tokenize(text, external_wordlist=external_wordlist)
if not include_punctuation:
tokens = [token for token in tokens if not PUNCT_RE.match(token)]
else:
# This is the default case where we use the regex tokenizer. First
# let's complain a bit if we ended up here because we don't have an
# appropriate tokenizer.
if info['tokenizer'] != 'regex' and lang not in _WARNED_LANGUAGES:
logger.warning(
"The language '{}' is in the '{}' script, which we don't "
"have a tokenizer for. The results will be bad."
.format(lang, info['script'])
)
_WARNED_LANGUAGES.add(lang)
tokens = simple_tokenize(text, include_punctuation=include_punctuation)
return tokens
|
LuminosoInsight/wordfreq | wordfreq/__init__.py | read_cBpack | python | def read_cBpack(filename):
with gzip.open(filename, 'rb') as infile:
data = msgpack.load(infile, raw=False)
header = data[0]
if (
not isinstance(header, dict) or header.get('format') != 'cB'
or header.get('version') != 1
):
raise ValueError("Unexpected header: %r" % header)
return data[1:] | Read a file from an idiosyncratic format that we use for storing
approximate word frequencies, called "cBpack".
The cBpack format is as follows:
- The file on disk is a gzipped file in msgpack format, which decodes to a
list whose first element is a header, and whose remaining elements are
lists of words.
- The header is a dictionary with 'format' and 'version' keys that make
sure that we're reading the right thing.
- Each inner list of words corresponds to a particular word frequency,
rounded to the nearest centibel -- that is, one tenth of a decibel, or
a factor of 10 ** .01.
0 cB represents a word that occurs with probability 1, so it is the only
word in the data (this of course doesn't happen). -200 cB represents a
word that occurs once per 100 tokens, -300 cB represents a word that
occurs once per 1000 tokens, and so on.
- The index of each list within the overall list (without the header) is
the negative of its frequency in centibels.
- Each inner list is sorted in alphabetical order.
As an example, consider a corpus consisting only of the words "red fish
blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red"
and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word
frequencies would decode to this:
[
{'format': 'cB', 'version': 1},
[], [], [], ... # 30 empty lists
['fish'],
[], [], [], ... # 29 more empty lists
['blue', 'red']
] | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L35-L84 | null | from pkg_resources import resource_filename
from functools import lru_cache
import langcodes
import msgpack
import gzip
import itertools
import pathlib
import random
import logging
import math
from .tokens import tokenize, simple_tokenize, lossy_tokenize
from .language_info import get_language_info
logger = logging.getLogger(__name__)
CACHE_SIZE = 100000
DATA_PATH = pathlib.Path(resource_filename('wordfreq', 'data'))
# We'll divide the frequency by 10 for each token boundary that was inferred.
# (We determined the factor of 10 empirically by looking at words in the
# Chinese wordlist that weren't common enough to be identified by the
# tokenizer. These words would get split into multiple tokens, and their
# inferred frequency would be on average 9.77 times higher than their actual
# frequency.)
INFERRED_SPACE_FACTOR = 10.0
# tokenize and simple_tokenize are imported so that other things can import
# them from here. Suppress the pyflakes warning.
tokenize = tokenize
simple_tokenize = simple_tokenize
def available_languages(wordlist='best'):
"""
Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available.
"""
if wordlist == 'best':
available = available_languages('small')
available.update(available_languages('large'))
return available
elif wordlist == 'combined':
logger.warning(
"The 'combined' wordlists have been renamed to 'small'."
)
wordlist = 'small'
available = {}
for path in DATA_PATH.glob('*.msgpack.gz'):
if not path.name.startswith('_'):
list_name = path.name.split('.')[0]
name, lang = list_name.split('_')
if name == wordlist:
available[lang] = str(path)
return available
@lru_cache(maxsize=None)
def get_frequency_list(lang, wordlist='best', match_cutoff=30):
"""
Read the raw data from a wordlist file, returning it as a list of
lists. (See `read_cBpack` for what this represents.)
Because we use the `langcodes` module, we can handle slight
variations in language codes. For example, looking for 'pt-BR',
'pt_br', or even 'PT_BR' will get you the 'pt' (Portuguese) list.
Looking up the alternate code 'por' will also get the same list.
"""
available = available_languages(wordlist)
best, score = langcodes.best_match(lang, list(available),
min_score=match_cutoff)
if score == 0:
raise LookupError("No wordlist %r available for language %r"
% (wordlist, lang))
if best != lang:
logger.warning(
"You asked for word frequencies in language %r. Using the "
"nearest match, which is %r (%s)."
% (lang, best, langcodes.get(best).language_name('en'))
)
return read_cBpack(available[best])
def cB_to_freq(cB):
"""
Convert a word frequency from the logarithmic centibel scale that we use
internally, to a proportion from 0 to 1.
On this scale, 0 cB represents the maximum possible frequency of
1.0. -100 cB represents a word that happens 1 in 10 times,
-200 cB represents something that happens 1 in 100 times, and so on.
In general, x cB represents a frequency of 10 ** (x/100).
"""
if cB > 0:
raise ValueError(
"A frequency cannot be a positive number of centibels."
)
return 10 ** (cB / 100)
def cB_to_zipf(cB):
"""
Convert a word frequency from centibels to the Zipf scale
(see `zipf_to_freq`).
The Zipf scale is related to centibels, the logarithmic unit that wordfreq
uses internally, because the Zipf unit is simply the bel, with a different
zero point. To convert centibels to Zipf, add 900 and divide by 100.
"""
return (cB + 900) / 100
def zipf_to_freq(zipf):
"""
Convert a word frequency from the Zipf scale to a proportion between 0 and
1.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
"""
return 10 ** zipf / 1e9
def freq_to_zipf(freq):
"""
Convert a word frequency from a proportion between 0 and 1 to the
Zipf scale (see `zipf_to_freq`).
"""
return math.log(freq, 10) + 9
@lru_cache(maxsize=None)
def get_frequency_dict(lang, wordlist='best', match_cutoff=30):
"""
Get a word frequency list as a dictionary, mapping tokens to
frequencies as floating-point probabilities.
"""
freqs = {}
pack = get_frequency_list(lang, wordlist, match_cutoff)
for index, bucket in enumerate(pack):
freq = cB_to_freq(-index)
for word in bucket:
freqs[word] = freq
return freqs
def iter_wordlist(lang, wordlist='best'):
"""
Yield the words in a wordlist in approximate descending order of
frequency.
Because wordfreq rounds off its frequencies, the words will form 'bands'
with the same rounded frequency, appearing in alphabetical order within
each band.
"""
return itertools.chain(*get_frequency_list(lang, wordlist))
# This dict and inner function are used to implement a "drop everything" cache
# for word_frequency(); the overheads of lru_cache() are comparable to the time
# it takes to look up frequencies from scratch, so something faster is needed.
_wf_cache = {}
def _word_frequency(word, lang, wordlist, minimum):
tokens = lossy_tokenize(word, lang)
if not tokens:
return minimum
# Frequencies for multiple tokens are combined using the formula
# 1 / f = 1 / f1 + 1 / f2 + ...
# Thus the resulting frequency is less than any individual frequency, and
# the smallest frequency dominates the sum.
freqs = get_frequency_dict(lang, wordlist)
one_over_result = 0.0
for token in tokens:
if token not in freqs:
# If any word is missing, just return the default value
return minimum
one_over_result += 1.0 / freqs[token]
freq = 1.0 / one_over_result
if get_language_info(lang)['tokenizer'] == 'jieba':
# If we used the Jieba tokenizer, we could tokenize anything to match
# our wordlist, even nonsense. To counteract this, we multiply by a
# probability for each word break that was inferred.
freq /= INFERRED_SPACE_FACTOR ** (len(tokens) - 1)
# All our frequency data is only precise to within 1% anyway, so round
# it to 3 significant digits
unrounded = max(freq, minimum)
if unrounded == 0.:
return 0.
else:
leading_zeroes = math.floor(-math.log(unrounded, 10))
return round(unrounded, leading_zeroes + 3)
def word_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity.
"""
args = (word, lang, wordlist, minimum)
try:
return _wf_cache[args]
except KeyError:
if len(_wf_cache) >= CACHE_SIZE:
_wf_cache.clear()
_wf_cache[args] = _word_frequency(*args)
return _wf_cache[args]
def zipf_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word`, in the language with code `lang`, on the Zipf
scale.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
Zipf values for reasonable words are between 0 and 8. The value this
function returns will always be at last as large as `minimum`, even for a
word that never appears. The default minimum is 0, representing words
that appear once per billion words or less.
wordfreq internally quantizes its frequencies to centibels, which are
1/100 of a Zipf unit. The output of `zipf_frequency` will be rounded to
the nearest hundredth to match this quantization.
"""
freq_min = zipf_to_freq(minimum)
freq = word_frequency(word, lang, wordlist, freq_min)
return round(freq_to_zipf(freq), 2)
@lru_cache(maxsize=100)
def top_n_list(lang, n, wordlist='best', ascii_only=False):
"""
Return a frequency list of length `n` in descending order of frequency.
This list contains words from `wordlist`, of the given language.
If `ascii_only`, then only ascii words are considered.
"""
results = []
for word in iter_wordlist(lang, wordlist):
if (not ascii_only) or max(word) <= '~':
results.append(word)
if len(results) >= n:
break
return results
def random_words(lang='en', wordlist='best', nwords=5, bits_per_word=12,
ascii_only=False):
"""
Returns a string of random, space separated words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
You can restrict the selection of words to those written in ASCII
characters by setting `ascii_only` to True.
"""
n_choices = 2 ** bits_per_word
choices = top_n_list(lang, n_choices, wordlist, ascii_only=ascii_only)
if len(choices) < n_choices:
raise ValueError(
"There aren't enough words in the wordlist to provide %d bits of "
"entropy per word." % bits_per_word
)
return ' '.join([random.choice(choices) for i in range(nwords)])
def random_ascii_words(lang='en', wordlist='best', nwords=5,
bits_per_word=12):
"""
Returns a string of random, space separated, ASCII words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
"""
return random_words(lang, wordlist, nwords, bits_per_word, ascii_only=True)
|
LuminosoInsight/wordfreq | wordfreq/__init__.py | available_languages | python | def available_languages(wordlist='best'):
if wordlist == 'best':
available = available_languages('small')
available.update(available_languages('large'))
return available
elif wordlist == 'combined':
logger.warning(
"The 'combined' wordlists have been renamed to 'small'."
)
wordlist = 'small'
available = {}
for path in DATA_PATH.glob('*.msgpack.gz'):
if not path.name.startswith('_'):
list_name = path.name.split('.')[0]
name, lang = list_name.split('_')
if name == wordlist:
available[lang] = str(path)
return available | Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L87-L109 | [
"def available_languages(wordlist='best'):\n \"\"\"\n Given a wordlist name, return a dictionary of language codes to filenames,\n representing all the languages in which that wordlist is available.\n \"\"\"\n if wordlist == 'best':\n available = available_languages('small')\n available.update(available_languages('large'))\n return available\n elif wordlist == 'combined':\n logger.warning(\n \"The 'combined' wordlists have been renamed to 'small'.\"\n )\n wordlist = 'small'\n\n available = {}\n for path in DATA_PATH.glob('*.msgpack.gz'):\n if not path.name.startswith('_'):\n list_name = path.name.split('.')[0]\n name, lang = list_name.split('_')\n if name == wordlist:\n available[lang] = str(path)\n return available\n"
] | from pkg_resources import resource_filename
from functools import lru_cache
import langcodes
import msgpack
import gzip
import itertools
import pathlib
import random
import logging
import math
from .tokens import tokenize, simple_tokenize, lossy_tokenize
from .language_info import get_language_info
logger = logging.getLogger(__name__)
CACHE_SIZE = 100000
DATA_PATH = pathlib.Path(resource_filename('wordfreq', 'data'))
# We'll divide the frequency by 10 for each token boundary that was inferred.
# (We determined the factor of 10 empirically by looking at words in the
# Chinese wordlist that weren't common enough to be identified by the
# tokenizer. These words would get split into multiple tokens, and their
# inferred frequency would be on average 9.77 times higher than their actual
# frequency.)
INFERRED_SPACE_FACTOR = 10.0
# tokenize and simple_tokenize are imported so that other things can import
# them from here. Suppress the pyflakes warning.
tokenize = tokenize
simple_tokenize = simple_tokenize
def read_cBpack(filename):
"""
Read a file from an idiosyncratic format that we use for storing
approximate word frequencies, called "cBpack".
The cBpack format is as follows:
- The file on disk is a gzipped file in msgpack format, which decodes to a
list whose first element is a header, and whose remaining elements are
lists of words.
- The header is a dictionary with 'format' and 'version' keys that make
sure that we're reading the right thing.
- Each inner list of words corresponds to a particular word frequency,
rounded to the nearest centibel -- that is, one tenth of a decibel, or
a factor of 10 ** .01.
0 cB represents a word that occurs with probability 1, so it is the only
word in the data (this of course doesn't happen). -200 cB represents a
word that occurs once per 100 tokens, -300 cB represents a word that
occurs once per 1000 tokens, and so on.
- The index of each list within the overall list (without the header) is
the negative of its frequency in centibels.
- Each inner list is sorted in alphabetical order.
As an example, consider a corpus consisting only of the words "red fish
blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red"
and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word
frequencies would decode to this:
[
{'format': 'cB', 'version': 1},
[], [], [], ... # 30 empty lists
['fish'],
[], [], [], ... # 29 more empty lists
['blue', 'red']
]
"""
with gzip.open(filename, 'rb') as infile:
data = msgpack.load(infile, raw=False)
header = data[0]
if (
not isinstance(header, dict) or header.get('format') != 'cB'
or header.get('version') != 1
):
raise ValueError("Unexpected header: %r" % header)
return data[1:]
@lru_cache(maxsize=None)
def get_frequency_list(lang, wordlist='best', match_cutoff=30):
"""
Read the raw data from a wordlist file, returning it as a list of
lists. (See `read_cBpack` for what this represents.)
Because we use the `langcodes` module, we can handle slight
variations in language codes. For example, looking for 'pt-BR',
'pt_br', or even 'PT_BR' will get you the 'pt' (Portuguese) list.
Looking up the alternate code 'por' will also get the same list.
"""
available = available_languages(wordlist)
best, score = langcodes.best_match(lang, list(available),
min_score=match_cutoff)
if score == 0:
raise LookupError("No wordlist %r available for language %r"
% (wordlist, lang))
if best != lang:
logger.warning(
"You asked for word frequencies in language %r. Using the "
"nearest match, which is %r (%s)."
% (lang, best, langcodes.get(best).language_name('en'))
)
return read_cBpack(available[best])
def cB_to_freq(cB):
"""
Convert a word frequency from the logarithmic centibel scale that we use
internally, to a proportion from 0 to 1.
On this scale, 0 cB represents the maximum possible frequency of
1.0. -100 cB represents a word that happens 1 in 10 times,
-200 cB represents something that happens 1 in 100 times, and so on.
In general, x cB represents a frequency of 10 ** (x/100).
"""
if cB > 0:
raise ValueError(
"A frequency cannot be a positive number of centibels."
)
return 10 ** (cB / 100)
def cB_to_zipf(cB):
"""
Convert a word frequency from centibels to the Zipf scale
(see `zipf_to_freq`).
The Zipf scale is related to centibels, the logarithmic unit that wordfreq
uses internally, because the Zipf unit is simply the bel, with a different
zero point. To convert centibels to Zipf, add 900 and divide by 100.
"""
return (cB + 900) / 100
def zipf_to_freq(zipf):
"""
Convert a word frequency from the Zipf scale to a proportion between 0 and
1.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
"""
return 10 ** zipf / 1e9
def freq_to_zipf(freq):
"""
Convert a word frequency from a proportion between 0 and 1 to the
Zipf scale (see `zipf_to_freq`).
"""
return math.log(freq, 10) + 9
@lru_cache(maxsize=None)
def get_frequency_dict(lang, wordlist='best', match_cutoff=30):
"""
Get a word frequency list as a dictionary, mapping tokens to
frequencies as floating-point probabilities.
"""
freqs = {}
pack = get_frequency_list(lang, wordlist, match_cutoff)
for index, bucket in enumerate(pack):
freq = cB_to_freq(-index)
for word in bucket:
freqs[word] = freq
return freqs
def iter_wordlist(lang, wordlist='best'):
"""
Yield the words in a wordlist in approximate descending order of
frequency.
Because wordfreq rounds off its frequencies, the words will form 'bands'
with the same rounded frequency, appearing in alphabetical order within
each band.
"""
return itertools.chain(*get_frequency_list(lang, wordlist))
# This dict and inner function are used to implement a "drop everything" cache
# for word_frequency(); the overheads of lru_cache() are comparable to the time
# it takes to look up frequencies from scratch, so something faster is needed.
_wf_cache = {}
def _word_frequency(word, lang, wordlist, minimum):
tokens = lossy_tokenize(word, lang)
if not tokens:
return minimum
# Frequencies for multiple tokens are combined using the formula
# 1 / f = 1 / f1 + 1 / f2 + ...
# Thus the resulting frequency is less than any individual frequency, and
# the smallest frequency dominates the sum.
freqs = get_frequency_dict(lang, wordlist)
one_over_result = 0.0
for token in tokens:
if token not in freqs:
# If any word is missing, just return the default value
return minimum
one_over_result += 1.0 / freqs[token]
freq = 1.0 / one_over_result
if get_language_info(lang)['tokenizer'] == 'jieba':
# If we used the Jieba tokenizer, we could tokenize anything to match
# our wordlist, even nonsense. To counteract this, we multiply by a
# probability for each word break that was inferred.
freq /= INFERRED_SPACE_FACTOR ** (len(tokens) - 1)
# All our frequency data is only precise to within 1% anyway, so round
# it to 3 significant digits
unrounded = max(freq, minimum)
if unrounded == 0.:
return 0.
else:
leading_zeroes = math.floor(-math.log(unrounded, 10))
return round(unrounded, leading_zeroes + 3)
def word_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity.
"""
args = (word, lang, wordlist, minimum)
try:
return _wf_cache[args]
except KeyError:
if len(_wf_cache) >= CACHE_SIZE:
_wf_cache.clear()
_wf_cache[args] = _word_frequency(*args)
return _wf_cache[args]
def zipf_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word`, in the language with code `lang`, on the Zipf
scale.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
Zipf values for reasonable words are between 0 and 8. The value this
function returns will always be at last as large as `minimum`, even for a
word that never appears. The default minimum is 0, representing words
that appear once per billion words or less.
wordfreq internally quantizes its frequencies to centibels, which are
1/100 of a Zipf unit. The output of `zipf_frequency` will be rounded to
the nearest hundredth to match this quantization.
"""
freq_min = zipf_to_freq(minimum)
freq = word_frequency(word, lang, wordlist, freq_min)
return round(freq_to_zipf(freq), 2)
@lru_cache(maxsize=100)
def top_n_list(lang, n, wordlist='best', ascii_only=False):
"""
Return a frequency list of length `n` in descending order of frequency.
This list contains words from `wordlist`, of the given language.
If `ascii_only`, then only ascii words are considered.
"""
results = []
for word in iter_wordlist(lang, wordlist):
if (not ascii_only) or max(word) <= '~':
results.append(word)
if len(results) >= n:
break
return results
def random_words(lang='en', wordlist='best', nwords=5, bits_per_word=12,
ascii_only=False):
"""
Returns a string of random, space separated words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
You can restrict the selection of words to those written in ASCII
characters by setting `ascii_only` to True.
"""
n_choices = 2 ** bits_per_word
choices = top_n_list(lang, n_choices, wordlist, ascii_only=ascii_only)
if len(choices) < n_choices:
raise ValueError(
"There aren't enough words in the wordlist to provide %d bits of "
"entropy per word." % bits_per_word
)
return ' '.join([random.choice(choices) for i in range(nwords)])
def random_ascii_words(lang='en', wordlist='best', nwords=5,
bits_per_word=12):
"""
Returns a string of random, space separated, ASCII words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
"""
return random_words(lang, wordlist, nwords, bits_per_word, ascii_only=True)
|
LuminosoInsight/wordfreq | wordfreq/__init__.py | get_frequency_list | python | def get_frequency_list(lang, wordlist='best', match_cutoff=30):
available = available_languages(wordlist)
best, score = langcodes.best_match(lang, list(available),
min_score=match_cutoff)
if score == 0:
raise LookupError("No wordlist %r available for language %r"
% (wordlist, lang))
if best != lang:
logger.warning(
"You asked for word frequencies in language %r. Using the "
"nearest match, which is %r (%s)."
% (lang, best, langcodes.get(best).language_name('en'))
)
return read_cBpack(available[best]) | Read the raw data from a wordlist file, returning it as a list of
lists. (See `read_cBpack` for what this represents.)
Because we use the `langcodes` module, we can handle slight
variations in language codes. For example, looking for 'pt-BR',
'pt_br', or even 'PT_BR' will get you the 'pt' (Portuguese) list.
Looking up the alternate code 'por' will also get the same list. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L113-L137 | [
"def read_cBpack(filename):\n \"\"\"\n Read a file from an idiosyncratic format that we use for storing\n approximate word frequencies, called \"cBpack\".\n\n The cBpack format is as follows:\n\n - The file on disk is a gzipped file in msgpack format, which decodes to a\n list whose first element is a header, and whose remaining elements are\n lists of words.\n\n - The header is a dictionary with 'format' and 'version' keys that make\n sure that we're reading the right thing.\n\n - Each inner list of words corresponds to a particular word frequency,\n rounded to the nearest centibel -- that is, one tenth of a decibel, or\n a factor of 10 ** .01.\n\n 0 cB represents a word that occurs with probability 1, so it is the only\n word in the data (this of course doesn't happen). -200 cB represents a\n word that occurs once per 100 tokens, -300 cB represents a word that\n occurs once per 1000 tokens, and so on.\n\n - The index of each list within the overall list (without the header) is\n the negative of its frequency in centibels.\n\n - Each inner list is sorted in alphabetical order.\n\n As an example, consider a corpus consisting only of the words \"red fish\n blue fish\". The word \"fish\" occurs as 50% of tokens (-30 cB), while \"red\"\n and \"blue\" occur as 25% of tokens (-60 cB). The cBpack file of their word\n frequencies would decode to this:\n\n [\n {'format': 'cB', 'version': 1},\n [], [], [], ... # 30 empty lists\n ['fish'],\n [], [], [], ... # 29 more empty lists\n ['blue', 'red']\n ]\n \"\"\"\n with gzip.open(filename, 'rb') as infile:\n data = msgpack.load(infile, raw=False)\n header = data[0]\n if (\n not isinstance(header, dict) or header.get('format') != 'cB'\n or header.get('version') != 1\n ):\n raise ValueError(\"Unexpected header: %r\" % header)\n return data[1:]\n",
"def available_languages(wordlist='best'):\n \"\"\"\n Given a wordlist name, return a dictionary of language codes to filenames,\n representing all the languages in which that wordlist is available.\n \"\"\"\n if wordlist == 'best':\n available = available_languages('small')\n available.update(available_languages('large'))\n return available\n elif wordlist == 'combined':\n logger.warning(\n \"The 'combined' wordlists have been renamed to 'small'.\"\n )\n wordlist = 'small'\n\n available = {}\n for path in DATA_PATH.glob('*.msgpack.gz'):\n if not path.name.startswith('_'):\n list_name = path.name.split('.')[0]\n name, lang = list_name.split('_')\n if name == wordlist:\n available[lang] = str(path)\n return available\n"
] | from pkg_resources import resource_filename
from functools import lru_cache
import langcodes
import msgpack
import gzip
import itertools
import pathlib
import random
import logging
import math
from .tokens import tokenize, simple_tokenize, lossy_tokenize
from .language_info import get_language_info
logger = logging.getLogger(__name__)
CACHE_SIZE = 100000
DATA_PATH = pathlib.Path(resource_filename('wordfreq', 'data'))
# We'll divide the frequency by 10 for each token boundary that was inferred.
# (We determined the factor of 10 empirically by looking at words in the
# Chinese wordlist that weren't common enough to be identified by the
# tokenizer. These words would get split into multiple tokens, and their
# inferred frequency would be on average 9.77 times higher than their actual
# frequency.)
INFERRED_SPACE_FACTOR = 10.0
# tokenize and simple_tokenize are imported so that other things can import
# them from here. Suppress the pyflakes warning.
tokenize = tokenize
simple_tokenize = simple_tokenize
def read_cBpack(filename):
"""
Read a file from an idiosyncratic format that we use for storing
approximate word frequencies, called "cBpack".
The cBpack format is as follows:
- The file on disk is a gzipped file in msgpack format, which decodes to a
list whose first element is a header, and whose remaining elements are
lists of words.
- The header is a dictionary with 'format' and 'version' keys that make
sure that we're reading the right thing.
- Each inner list of words corresponds to a particular word frequency,
rounded to the nearest centibel -- that is, one tenth of a decibel, or
a factor of 10 ** .01.
0 cB represents a word that occurs with probability 1, so it is the only
word in the data (this of course doesn't happen). -200 cB represents a
word that occurs once per 100 tokens, -300 cB represents a word that
occurs once per 1000 tokens, and so on.
- The index of each list within the overall list (without the header) is
the negative of its frequency in centibels.
- Each inner list is sorted in alphabetical order.
As an example, consider a corpus consisting only of the words "red fish
blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red"
and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word
frequencies would decode to this:
[
{'format': 'cB', 'version': 1},
[], [], [], ... # 30 empty lists
['fish'],
[], [], [], ... # 29 more empty lists
['blue', 'red']
]
"""
with gzip.open(filename, 'rb') as infile:
data = msgpack.load(infile, raw=False)
header = data[0]
if (
not isinstance(header, dict) or header.get('format') != 'cB'
or header.get('version') != 1
):
raise ValueError("Unexpected header: %r" % header)
return data[1:]
def available_languages(wordlist='best'):
"""
Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available.
"""
if wordlist == 'best':
available = available_languages('small')
available.update(available_languages('large'))
return available
elif wordlist == 'combined':
logger.warning(
"The 'combined' wordlists have been renamed to 'small'."
)
wordlist = 'small'
available = {}
for path in DATA_PATH.glob('*.msgpack.gz'):
if not path.name.startswith('_'):
list_name = path.name.split('.')[0]
name, lang = list_name.split('_')
if name == wordlist:
available[lang] = str(path)
return available
@lru_cache(maxsize=None)
def cB_to_freq(cB):
"""
Convert a word frequency from the logarithmic centibel scale that we use
internally, to a proportion from 0 to 1.
On this scale, 0 cB represents the maximum possible frequency of
1.0. -100 cB represents a word that happens 1 in 10 times,
-200 cB represents something that happens 1 in 100 times, and so on.
In general, x cB represents a frequency of 10 ** (x/100).
"""
if cB > 0:
raise ValueError(
"A frequency cannot be a positive number of centibels."
)
return 10 ** (cB / 100)
def cB_to_zipf(cB):
"""
Convert a word frequency from centibels to the Zipf scale
(see `zipf_to_freq`).
The Zipf scale is related to centibels, the logarithmic unit that wordfreq
uses internally, because the Zipf unit is simply the bel, with a different
zero point. To convert centibels to Zipf, add 900 and divide by 100.
"""
return (cB + 900) / 100
def zipf_to_freq(zipf):
"""
Convert a word frequency from the Zipf scale to a proportion between 0 and
1.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
"""
return 10 ** zipf / 1e9
def freq_to_zipf(freq):
"""
Convert a word frequency from a proportion between 0 and 1 to the
Zipf scale (see `zipf_to_freq`).
"""
return math.log(freq, 10) + 9
@lru_cache(maxsize=None)
def get_frequency_dict(lang, wordlist='best', match_cutoff=30):
"""
Get a word frequency list as a dictionary, mapping tokens to
frequencies as floating-point probabilities.
"""
freqs = {}
pack = get_frequency_list(lang, wordlist, match_cutoff)
for index, bucket in enumerate(pack):
freq = cB_to_freq(-index)
for word in bucket:
freqs[word] = freq
return freqs
def iter_wordlist(lang, wordlist='best'):
"""
Yield the words in a wordlist in approximate descending order of
frequency.
Because wordfreq rounds off its frequencies, the words will form 'bands'
with the same rounded frequency, appearing in alphabetical order within
each band.
"""
return itertools.chain(*get_frequency_list(lang, wordlist))
# This dict and inner function are used to implement a "drop everything" cache
# for word_frequency(); the overheads of lru_cache() are comparable to the time
# it takes to look up frequencies from scratch, so something faster is needed.
_wf_cache = {}
def _word_frequency(word, lang, wordlist, minimum):
tokens = lossy_tokenize(word, lang)
if not tokens:
return minimum
# Frequencies for multiple tokens are combined using the formula
# 1 / f = 1 / f1 + 1 / f2 + ...
# Thus the resulting frequency is less than any individual frequency, and
# the smallest frequency dominates the sum.
freqs = get_frequency_dict(lang, wordlist)
one_over_result = 0.0
for token in tokens:
if token not in freqs:
# If any word is missing, just return the default value
return minimum
one_over_result += 1.0 / freqs[token]
freq = 1.0 / one_over_result
if get_language_info(lang)['tokenizer'] == 'jieba':
# If we used the Jieba tokenizer, we could tokenize anything to match
# our wordlist, even nonsense. To counteract this, we multiply by a
# probability for each word break that was inferred.
freq /= INFERRED_SPACE_FACTOR ** (len(tokens) - 1)
# All our frequency data is only precise to within 1% anyway, so round
# it to 3 significant digits
unrounded = max(freq, minimum)
if unrounded == 0.:
return 0.
else:
leading_zeroes = math.floor(-math.log(unrounded, 10))
return round(unrounded, leading_zeroes + 3)
def word_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity.
"""
args = (word, lang, wordlist, minimum)
try:
return _wf_cache[args]
except KeyError:
if len(_wf_cache) >= CACHE_SIZE:
_wf_cache.clear()
_wf_cache[args] = _word_frequency(*args)
return _wf_cache[args]
def zipf_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word`, in the language with code `lang`, on the Zipf
scale.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
Zipf values for reasonable words are between 0 and 8. The value this
function returns will always be at last as large as `minimum`, even for a
word that never appears. The default minimum is 0, representing words
that appear once per billion words or less.
wordfreq internally quantizes its frequencies to centibels, which are
1/100 of a Zipf unit. The output of `zipf_frequency` will be rounded to
the nearest hundredth to match this quantization.
"""
freq_min = zipf_to_freq(minimum)
freq = word_frequency(word, lang, wordlist, freq_min)
return round(freq_to_zipf(freq), 2)
@lru_cache(maxsize=100)
def top_n_list(lang, n, wordlist='best', ascii_only=False):
"""
Return a frequency list of length `n` in descending order of frequency.
This list contains words from `wordlist`, of the given language.
If `ascii_only`, then only ascii words are considered.
"""
results = []
for word in iter_wordlist(lang, wordlist):
if (not ascii_only) or max(word) <= '~':
results.append(word)
if len(results) >= n:
break
return results
def random_words(lang='en', wordlist='best', nwords=5, bits_per_word=12,
ascii_only=False):
"""
Returns a string of random, space separated words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
You can restrict the selection of words to those written in ASCII
characters by setting `ascii_only` to True.
"""
n_choices = 2 ** bits_per_word
choices = top_n_list(lang, n_choices, wordlist, ascii_only=ascii_only)
if len(choices) < n_choices:
raise ValueError(
"There aren't enough words in the wordlist to provide %d bits of "
"entropy per word." % bits_per_word
)
return ' '.join([random.choice(choices) for i in range(nwords)])
def random_ascii_words(lang='en', wordlist='best', nwords=5,
bits_per_word=12):
"""
Returns a string of random, space separated, ASCII words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
"""
return random_words(lang, wordlist, nwords, bits_per_word, ascii_only=True)
|
LuminosoInsight/wordfreq | wordfreq/__init__.py | get_frequency_dict | python | def get_frequency_dict(lang, wordlist='best', match_cutoff=30):
freqs = {}
pack = get_frequency_list(lang, wordlist, match_cutoff)
for index, bucket in enumerate(pack):
freq = cB_to_freq(-index)
for word in bucket:
freqs[word] = freq
return freqs | Get a word frequency list as a dictionary, mapping tokens to
frequencies as floating-point probabilities. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L195-L206 | [
"def cB_to_freq(cB):\n \"\"\"\n Convert a word frequency from the logarithmic centibel scale that we use\n internally, to a proportion from 0 to 1.\n\n On this scale, 0 cB represents the maximum possible frequency of\n 1.0. -100 cB represents a word that happens 1 in 10 times,\n -200 cB represents something that happens 1 in 100 times, and so on.\n\n In general, x cB represents a frequency of 10 ** (x/100).\n \"\"\"\n if cB > 0:\n raise ValueError(\n \"A frequency cannot be a positive number of centibels.\"\n )\n return 10 ** (cB / 100)\n"
] | from pkg_resources import resource_filename
from functools import lru_cache
import langcodes
import msgpack
import gzip
import itertools
import pathlib
import random
import logging
import math
from .tokens import tokenize, simple_tokenize, lossy_tokenize
from .language_info import get_language_info
logger = logging.getLogger(__name__)
CACHE_SIZE = 100000
DATA_PATH = pathlib.Path(resource_filename('wordfreq', 'data'))
# We'll divide the frequency by 10 for each token boundary that was inferred.
# (We determined the factor of 10 empirically by looking at words in the
# Chinese wordlist that weren't common enough to be identified by the
# tokenizer. These words would get split into multiple tokens, and their
# inferred frequency would be on average 9.77 times higher than their actual
# frequency.)
INFERRED_SPACE_FACTOR = 10.0
# tokenize and simple_tokenize are imported so that other things can import
# them from here. Suppress the pyflakes warning.
tokenize = tokenize
simple_tokenize = simple_tokenize
def read_cBpack(filename):
"""
Read a file from an idiosyncratic format that we use for storing
approximate word frequencies, called "cBpack".
The cBpack format is as follows:
- The file on disk is a gzipped file in msgpack format, which decodes to a
list whose first element is a header, and whose remaining elements are
lists of words.
- The header is a dictionary with 'format' and 'version' keys that make
sure that we're reading the right thing.
- Each inner list of words corresponds to a particular word frequency,
rounded to the nearest centibel -- that is, one tenth of a decibel, or
a factor of 10 ** .01.
0 cB represents a word that occurs with probability 1, so it is the only
word in the data (this of course doesn't happen). -200 cB represents a
word that occurs once per 100 tokens, -300 cB represents a word that
occurs once per 1000 tokens, and so on.
- The index of each list within the overall list (without the header) is
the negative of its frequency in centibels.
- Each inner list is sorted in alphabetical order.
As an example, consider a corpus consisting only of the words "red fish
blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red"
and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word
frequencies would decode to this:
[
{'format': 'cB', 'version': 1},
[], [], [], ... # 30 empty lists
['fish'],
[], [], [], ... # 29 more empty lists
['blue', 'red']
]
"""
with gzip.open(filename, 'rb') as infile:
data = msgpack.load(infile, raw=False)
header = data[0]
if (
not isinstance(header, dict) or header.get('format') != 'cB'
or header.get('version') != 1
):
raise ValueError("Unexpected header: %r" % header)
return data[1:]
def available_languages(wordlist='best'):
"""
Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available.
"""
if wordlist == 'best':
available = available_languages('small')
available.update(available_languages('large'))
return available
elif wordlist == 'combined':
logger.warning(
"The 'combined' wordlists have been renamed to 'small'."
)
wordlist = 'small'
available = {}
for path in DATA_PATH.glob('*.msgpack.gz'):
if not path.name.startswith('_'):
list_name = path.name.split('.')[0]
name, lang = list_name.split('_')
if name == wordlist:
available[lang] = str(path)
return available
@lru_cache(maxsize=None)
def get_frequency_list(lang, wordlist='best', match_cutoff=30):
"""
Read the raw data from a wordlist file, returning it as a list of
lists. (See `read_cBpack` for what this represents.)
Because we use the `langcodes` module, we can handle slight
variations in language codes. For example, looking for 'pt-BR',
'pt_br', or even 'PT_BR' will get you the 'pt' (Portuguese) list.
Looking up the alternate code 'por' will also get the same list.
"""
available = available_languages(wordlist)
best, score = langcodes.best_match(lang, list(available),
min_score=match_cutoff)
if score == 0:
raise LookupError("No wordlist %r available for language %r"
% (wordlist, lang))
if best != lang:
logger.warning(
"You asked for word frequencies in language %r. Using the "
"nearest match, which is %r (%s)."
% (lang, best, langcodes.get(best).language_name('en'))
)
return read_cBpack(available[best])
def cB_to_freq(cB):
"""
Convert a word frequency from the logarithmic centibel scale that we use
internally, to a proportion from 0 to 1.
On this scale, 0 cB represents the maximum possible frequency of
1.0. -100 cB represents a word that happens 1 in 10 times,
-200 cB represents something that happens 1 in 100 times, and so on.
In general, x cB represents a frequency of 10 ** (x/100).
"""
if cB > 0:
raise ValueError(
"A frequency cannot be a positive number of centibels."
)
return 10 ** (cB / 100)
def cB_to_zipf(cB):
"""
Convert a word frequency from centibels to the Zipf scale
(see `zipf_to_freq`).
The Zipf scale is related to centibels, the logarithmic unit that wordfreq
uses internally, because the Zipf unit is simply the bel, with a different
zero point. To convert centibels to Zipf, add 900 and divide by 100.
"""
return (cB + 900) / 100
def zipf_to_freq(zipf):
"""
Convert a word frequency from the Zipf scale to a proportion between 0 and
1.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
"""
return 10 ** zipf / 1e9
def freq_to_zipf(freq):
"""
Convert a word frequency from a proportion between 0 and 1 to the
Zipf scale (see `zipf_to_freq`).
"""
return math.log(freq, 10) + 9
@lru_cache(maxsize=None)
def iter_wordlist(lang, wordlist='best'):
"""
Yield the words in a wordlist in approximate descending order of
frequency.
Because wordfreq rounds off its frequencies, the words will form 'bands'
with the same rounded frequency, appearing in alphabetical order within
each band.
"""
return itertools.chain(*get_frequency_list(lang, wordlist))
# This dict and inner function are used to implement a "drop everything" cache
# for word_frequency(); the overheads of lru_cache() are comparable to the time
# it takes to look up frequencies from scratch, so something faster is needed.
_wf_cache = {}
def _word_frequency(word, lang, wordlist, minimum):
tokens = lossy_tokenize(word, lang)
if not tokens:
return minimum
# Frequencies for multiple tokens are combined using the formula
# 1 / f = 1 / f1 + 1 / f2 + ...
# Thus the resulting frequency is less than any individual frequency, and
# the smallest frequency dominates the sum.
freqs = get_frequency_dict(lang, wordlist)
one_over_result = 0.0
for token in tokens:
if token not in freqs:
# If any word is missing, just return the default value
return minimum
one_over_result += 1.0 / freqs[token]
freq = 1.0 / one_over_result
if get_language_info(lang)['tokenizer'] == 'jieba':
# If we used the Jieba tokenizer, we could tokenize anything to match
# our wordlist, even nonsense. To counteract this, we multiply by a
# probability for each word break that was inferred.
freq /= INFERRED_SPACE_FACTOR ** (len(tokens) - 1)
# All our frequency data is only precise to within 1% anyway, so round
# it to 3 significant digits
unrounded = max(freq, minimum)
if unrounded == 0.:
return 0.
else:
leading_zeroes = math.floor(-math.log(unrounded, 10))
return round(unrounded, leading_zeroes + 3)
def word_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity.
"""
args = (word, lang, wordlist, minimum)
try:
return _wf_cache[args]
except KeyError:
if len(_wf_cache) >= CACHE_SIZE:
_wf_cache.clear()
_wf_cache[args] = _word_frequency(*args)
return _wf_cache[args]
def zipf_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word`, in the language with code `lang`, on the Zipf
scale.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
Zipf values for reasonable words are between 0 and 8. The value this
function returns will always be at last as large as `minimum`, even for a
word that never appears. The default minimum is 0, representing words
that appear once per billion words or less.
wordfreq internally quantizes its frequencies to centibels, which are
1/100 of a Zipf unit. The output of `zipf_frequency` will be rounded to
the nearest hundredth to match this quantization.
"""
freq_min = zipf_to_freq(minimum)
freq = word_frequency(word, lang, wordlist, freq_min)
return round(freq_to_zipf(freq), 2)
@lru_cache(maxsize=100)
def top_n_list(lang, n, wordlist='best', ascii_only=False):
"""
Return a frequency list of length `n` in descending order of frequency.
This list contains words from `wordlist`, of the given language.
If `ascii_only`, then only ascii words are considered.
"""
results = []
for word in iter_wordlist(lang, wordlist):
if (not ascii_only) or max(word) <= '~':
results.append(word)
if len(results) >= n:
break
return results
def random_words(lang='en', wordlist='best', nwords=5, bits_per_word=12,
ascii_only=False):
"""
Returns a string of random, space separated words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
You can restrict the selection of words to those written in ASCII
characters by setting `ascii_only` to True.
"""
n_choices = 2 ** bits_per_word
choices = top_n_list(lang, n_choices, wordlist, ascii_only=ascii_only)
if len(choices) < n_choices:
raise ValueError(
"There aren't enough words in the wordlist to provide %d bits of "
"entropy per word." % bits_per_word
)
return ' '.join([random.choice(choices) for i in range(nwords)])
def random_ascii_words(lang='en', wordlist='best', nwords=5,
bits_per_word=12):
"""
Returns a string of random, space separated, ASCII words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
"""
return random_words(lang, wordlist, nwords, bits_per_word, ascii_only=True)
|
LuminosoInsight/wordfreq | wordfreq/__init__.py | word_frequency | python | def word_frequency(word, lang, wordlist='best', minimum=0.):
args = (word, lang, wordlist, minimum)
try:
return _wf_cache[args]
except KeyError:
if len(_wf_cache) >= CACHE_SIZE:
_wf_cache.clear()
_wf_cache[args] = _word_frequency(*args)
return _wf_cache[args] | Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L262-L286 | [
"def _word_frequency(word, lang, wordlist, minimum):\n tokens = lossy_tokenize(word, lang)\n if not tokens:\n return minimum\n\n # Frequencies for multiple tokens are combined using the formula\n # 1 / f = 1 / f1 + 1 / f2 + ...\n # Thus the resulting frequency is less than any individual frequency, and\n # the smallest frequency dominates the sum.\n freqs = get_frequency_dict(lang, wordlist)\n one_over_result = 0.0\n for token in tokens:\n if token not in freqs:\n # If any word is missing, just return the default value\n return minimum\n one_over_result += 1.0 / freqs[token]\n\n freq = 1.0 / one_over_result\n\n if get_language_info(lang)['tokenizer'] == 'jieba':\n # If we used the Jieba tokenizer, we could tokenize anything to match\n # our wordlist, even nonsense. To counteract this, we multiply by a\n # probability for each word break that was inferred.\n freq /= INFERRED_SPACE_FACTOR ** (len(tokens) - 1)\n\n # All our frequency data is only precise to within 1% anyway, so round\n # it to 3 significant digits\n unrounded = max(freq, minimum)\n if unrounded == 0.:\n return 0.\n else:\n leading_zeroes = math.floor(-math.log(unrounded, 10))\n return round(unrounded, leading_zeroes + 3)\n"
] | from pkg_resources import resource_filename
from functools import lru_cache
import langcodes
import msgpack
import gzip
import itertools
import pathlib
import random
import logging
import math
from .tokens import tokenize, simple_tokenize, lossy_tokenize
from .language_info import get_language_info
logger = logging.getLogger(__name__)
CACHE_SIZE = 100000
DATA_PATH = pathlib.Path(resource_filename('wordfreq', 'data'))
# We'll divide the frequency by 10 for each token boundary that was inferred.
# (We determined the factor of 10 empirically by looking at words in the
# Chinese wordlist that weren't common enough to be identified by the
# tokenizer. These words would get split into multiple tokens, and their
# inferred frequency would be on average 9.77 times higher than their actual
# frequency.)
INFERRED_SPACE_FACTOR = 10.0
# tokenize and simple_tokenize are imported so that other things can import
# them from here. Suppress the pyflakes warning.
tokenize = tokenize
simple_tokenize = simple_tokenize
def read_cBpack(filename):
"""
Read a file from an idiosyncratic format that we use for storing
approximate word frequencies, called "cBpack".
The cBpack format is as follows:
- The file on disk is a gzipped file in msgpack format, which decodes to a
list whose first element is a header, and whose remaining elements are
lists of words.
- The header is a dictionary with 'format' and 'version' keys that make
sure that we're reading the right thing.
- Each inner list of words corresponds to a particular word frequency,
rounded to the nearest centibel -- that is, one tenth of a decibel, or
a factor of 10 ** .01.
0 cB represents a word that occurs with probability 1, so it is the only
word in the data (this of course doesn't happen). -200 cB represents a
word that occurs once per 100 tokens, -300 cB represents a word that
occurs once per 1000 tokens, and so on.
- The index of each list within the overall list (without the header) is
the negative of its frequency in centibels.
- Each inner list is sorted in alphabetical order.
As an example, consider a corpus consisting only of the words "red fish
blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red"
and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word
frequencies would decode to this:
[
{'format': 'cB', 'version': 1},
[], [], [], ... # 30 empty lists
['fish'],
[], [], [], ... # 29 more empty lists
['blue', 'red']
]
"""
with gzip.open(filename, 'rb') as infile:
data = msgpack.load(infile, raw=False)
header = data[0]
if (
not isinstance(header, dict) or header.get('format') != 'cB'
or header.get('version') != 1
):
raise ValueError("Unexpected header: %r" % header)
return data[1:]
def available_languages(wordlist='best'):
"""
Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available.
"""
if wordlist == 'best':
available = available_languages('small')
available.update(available_languages('large'))
return available
elif wordlist == 'combined':
logger.warning(
"The 'combined' wordlists have been renamed to 'small'."
)
wordlist = 'small'
available = {}
for path in DATA_PATH.glob('*.msgpack.gz'):
if not path.name.startswith('_'):
list_name = path.name.split('.')[0]
name, lang = list_name.split('_')
if name == wordlist:
available[lang] = str(path)
return available
@lru_cache(maxsize=None)
def get_frequency_list(lang, wordlist='best', match_cutoff=30):
"""
Read the raw data from a wordlist file, returning it as a list of
lists. (See `read_cBpack` for what this represents.)
Because we use the `langcodes` module, we can handle slight
variations in language codes. For example, looking for 'pt-BR',
'pt_br', or even 'PT_BR' will get you the 'pt' (Portuguese) list.
Looking up the alternate code 'por' will also get the same list.
"""
available = available_languages(wordlist)
best, score = langcodes.best_match(lang, list(available),
min_score=match_cutoff)
if score == 0:
raise LookupError("No wordlist %r available for language %r"
% (wordlist, lang))
if best != lang:
logger.warning(
"You asked for word frequencies in language %r. Using the "
"nearest match, which is %r (%s)."
% (lang, best, langcodes.get(best).language_name('en'))
)
return read_cBpack(available[best])
def cB_to_freq(cB):
"""
Convert a word frequency from the logarithmic centibel scale that we use
internally, to a proportion from 0 to 1.
On this scale, 0 cB represents the maximum possible frequency of
1.0. -100 cB represents a word that happens 1 in 10 times,
-200 cB represents something that happens 1 in 100 times, and so on.
In general, x cB represents a frequency of 10 ** (x/100).
"""
if cB > 0:
raise ValueError(
"A frequency cannot be a positive number of centibels."
)
return 10 ** (cB / 100)
def cB_to_zipf(cB):
"""
Convert a word frequency from centibels to the Zipf scale
(see `zipf_to_freq`).
The Zipf scale is related to centibels, the logarithmic unit that wordfreq
uses internally, because the Zipf unit is simply the bel, with a different
zero point. To convert centibels to Zipf, add 900 and divide by 100.
"""
return (cB + 900) / 100
def zipf_to_freq(zipf):
"""
Convert a word frequency from the Zipf scale to a proportion between 0 and
1.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
"""
return 10 ** zipf / 1e9
def freq_to_zipf(freq):
"""
Convert a word frequency from a proportion between 0 and 1 to the
Zipf scale (see `zipf_to_freq`).
"""
return math.log(freq, 10) + 9
@lru_cache(maxsize=None)
def get_frequency_dict(lang, wordlist='best', match_cutoff=30):
"""
Get a word frequency list as a dictionary, mapping tokens to
frequencies as floating-point probabilities.
"""
freqs = {}
pack = get_frequency_list(lang, wordlist, match_cutoff)
for index, bucket in enumerate(pack):
freq = cB_to_freq(-index)
for word in bucket:
freqs[word] = freq
return freqs
def iter_wordlist(lang, wordlist='best'):
"""
Yield the words in a wordlist in approximate descending order of
frequency.
Because wordfreq rounds off its frequencies, the words will form 'bands'
with the same rounded frequency, appearing in alphabetical order within
each band.
"""
return itertools.chain(*get_frequency_list(lang, wordlist))
# This dict and inner function are used to implement a "drop everything" cache
# for word_frequency(); the overheads of lru_cache() are comparable to the time
# it takes to look up frequencies from scratch, so something faster is needed.
_wf_cache = {}
def _word_frequency(word, lang, wordlist, minimum):
tokens = lossy_tokenize(word, lang)
if not tokens:
return minimum
# Frequencies for multiple tokens are combined using the formula
# 1 / f = 1 / f1 + 1 / f2 + ...
# Thus the resulting frequency is less than any individual frequency, and
# the smallest frequency dominates the sum.
freqs = get_frequency_dict(lang, wordlist)
one_over_result = 0.0
for token in tokens:
if token not in freqs:
# If any word is missing, just return the default value
return minimum
one_over_result += 1.0 / freqs[token]
freq = 1.0 / one_over_result
if get_language_info(lang)['tokenizer'] == 'jieba':
# If we used the Jieba tokenizer, we could tokenize anything to match
# our wordlist, even nonsense. To counteract this, we multiply by a
# probability for each word break that was inferred.
freq /= INFERRED_SPACE_FACTOR ** (len(tokens) - 1)
# All our frequency data is only precise to within 1% anyway, so round
# it to 3 significant digits
unrounded = max(freq, minimum)
if unrounded == 0.:
return 0.
else:
leading_zeroes = math.floor(-math.log(unrounded, 10))
return round(unrounded, leading_zeroes + 3)
def zipf_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word`, in the language with code `lang`, on the Zipf
scale.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
Zipf values for reasonable words are between 0 and 8. The value this
function returns will always be at last as large as `minimum`, even for a
word that never appears. The default minimum is 0, representing words
that appear once per billion words or less.
wordfreq internally quantizes its frequencies to centibels, which are
1/100 of a Zipf unit. The output of `zipf_frequency` will be rounded to
the nearest hundredth to match this quantization.
"""
freq_min = zipf_to_freq(minimum)
freq = word_frequency(word, lang, wordlist, freq_min)
return round(freq_to_zipf(freq), 2)
@lru_cache(maxsize=100)
def top_n_list(lang, n, wordlist='best', ascii_only=False):
"""
Return a frequency list of length `n` in descending order of frequency.
This list contains words from `wordlist`, of the given language.
If `ascii_only`, then only ascii words are considered.
"""
results = []
for word in iter_wordlist(lang, wordlist):
if (not ascii_only) or max(word) <= '~':
results.append(word)
if len(results) >= n:
break
return results
def random_words(lang='en', wordlist='best', nwords=5, bits_per_word=12,
ascii_only=False):
"""
Returns a string of random, space separated words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
You can restrict the selection of words to those written in ASCII
characters by setting `ascii_only` to True.
"""
n_choices = 2 ** bits_per_word
choices = top_n_list(lang, n_choices, wordlist, ascii_only=ascii_only)
if len(choices) < n_choices:
raise ValueError(
"There aren't enough words in the wordlist to provide %d bits of "
"entropy per word." % bits_per_word
)
return ' '.join([random.choice(choices) for i in range(nwords)])
def random_ascii_words(lang='en', wordlist='best', nwords=5,
bits_per_word=12):
"""
Returns a string of random, space separated, ASCII words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
"""
return random_words(lang, wordlist, nwords, bits_per_word, ascii_only=True)
|
LuminosoInsight/wordfreq | wordfreq/__init__.py | zipf_frequency | python | def zipf_frequency(word, lang, wordlist='best', minimum=0.):
freq_min = zipf_to_freq(minimum)
freq = word_frequency(word, lang, wordlist, freq_min)
return round(freq_to_zipf(freq), 2) | Get the frequency of `word`, in the language with code `lang`, on the Zipf
scale.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
Zipf values for reasonable words are between 0 and 8. The value this
function returns will always be at last as large as `minimum`, even for a
word that never appears. The default minimum is 0, representing words
that appear once per billion words or less.
wordfreq internally quantizes its frequencies to centibels, which are
1/100 of a Zipf unit. The output of `zipf_frequency` will be rounded to
the nearest hundredth to match this quantization. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L289-L313 | [
"def zipf_to_freq(zipf):\n \"\"\"\n Convert a word frequency from the Zipf scale to a proportion between 0 and\n 1.\n\n The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,\n who compiled the SUBTLEX data. The goal of the Zipf scale is to map\n reasonable word frequencies to understandable, small positive numbers.\n\n A word rates as x on the Zipf scale when it occurs 10**x times per billion\n words. For example, a word that occurs once per million words is at 3.0 on\n the Zipf scale.\n \"\"\"\n return 10 ** zipf / 1e9\n",
"def freq_to_zipf(freq):\n \"\"\"\n Convert a word frequency from a proportion between 0 and 1 to the\n Zipf scale (see `zipf_to_freq`).\n \"\"\"\n return math.log(freq, 10) + 9\n",
"def word_frequency(word, lang, wordlist='best', minimum=0.):\n \"\"\"\n Get the frequency of `word` in the language with code `lang`, from the\n specified `wordlist`.\n\n These wordlists can be specified:\n\n - 'large': a wordlist built from at least 5 sources, containing word\n frequencies of 10^-8 and higher\n - 'small': a wordlist built from at least 3 sources, containing word\n frquencies of 10^-6 and higher\n - 'best': uses 'large' if available, and 'small' otherwise\n\n The value returned will always be at least as large as `minimum`.\n You could set this value to 10^-8, for example, to return 10^-8 for\n unknown words in the 'large' list instead of 0, avoiding a discontinuity.\n \"\"\"\n args = (word, lang, wordlist, minimum)\n try:\n return _wf_cache[args]\n except KeyError:\n if len(_wf_cache) >= CACHE_SIZE:\n _wf_cache.clear()\n _wf_cache[args] = _word_frequency(*args)\n return _wf_cache[args]\n"
] | from pkg_resources import resource_filename
from functools import lru_cache
import langcodes
import msgpack
import gzip
import itertools
import pathlib
import random
import logging
import math
from .tokens import tokenize, simple_tokenize, lossy_tokenize
from .language_info import get_language_info
logger = logging.getLogger(__name__)
CACHE_SIZE = 100000
DATA_PATH = pathlib.Path(resource_filename('wordfreq', 'data'))
# We'll divide the frequency by 10 for each token boundary that was inferred.
# (We determined the factor of 10 empirically by looking at words in the
# Chinese wordlist that weren't common enough to be identified by the
# tokenizer. These words would get split into multiple tokens, and their
# inferred frequency would be on average 9.77 times higher than their actual
# frequency.)
INFERRED_SPACE_FACTOR = 10.0
# tokenize and simple_tokenize are imported so that other things can import
# them from here. Suppress the pyflakes warning.
tokenize = tokenize
simple_tokenize = simple_tokenize
def read_cBpack(filename):
"""
Read a file from an idiosyncratic format that we use for storing
approximate word frequencies, called "cBpack".
The cBpack format is as follows:
- The file on disk is a gzipped file in msgpack format, which decodes to a
list whose first element is a header, and whose remaining elements are
lists of words.
- The header is a dictionary with 'format' and 'version' keys that make
sure that we're reading the right thing.
- Each inner list of words corresponds to a particular word frequency,
rounded to the nearest centibel -- that is, one tenth of a decibel, or
a factor of 10 ** .01.
0 cB represents a word that occurs with probability 1, so it is the only
word in the data (this of course doesn't happen). -200 cB represents a
word that occurs once per 100 tokens, -300 cB represents a word that
occurs once per 1000 tokens, and so on.
- The index of each list within the overall list (without the header) is
the negative of its frequency in centibels.
- Each inner list is sorted in alphabetical order.
As an example, consider a corpus consisting only of the words "red fish
blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red"
and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word
frequencies would decode to this:
[
{'format': 'cB', 'version': 1},
[], [], [], ... # 30 empty lists
['fish'],
[], [], [], ... # 29 more empty lists
['blue', 'red']
]
"""
with gzip.open(filename, 'rb') as infile:
data = msgpack.load(infile, raw=False)
header = data[0]
if (
not isinstance(header, dict) or header.get('format') != 'cB'
or header.get('version') != 1
):
raise ValueError("Unexpected header: %r" % header)
return data[1:]
def available_languages(wordlist='best'):
"""
Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available.
"""
if wordlist == 'best':
available = available_languages('small')
available.update(available_languages('large'))
return available
elif wordlist == 'combined':
logger.warning(
"The 'combined' wordlists have been renamed to 'small'."
)
wordlist = 'small'
available = {}
for path in DATA_PATH.glob('*.msgpack.gz'):
if not path.name.startswith('_'):
list_name = path.name.split('.')[0]
name, lang = list_name.split('_')
if name == wordlist:
available[lang] = str(path)
return available
@lru_cache(maxsize=None)
def get_frequency_list(lang, wordlist='best', match_cutoff=30):
"""
Read the raw data from a wordlist file, returning it as a list of
lists. (See `read_cBpack` for what this represents.)
Because we use the `langcodes` module, we can handle slight
variations in language codes. For example, looking for 'pt-BR',
'pt_br', or even 'PT_BR' will get you the 'pt' (Portuguese) list.
Looking up the alternate code 'por' will also get the same list.
"""
available = available_languages(wordlist)
best, score = langcodes.best_match(lang, list(available),
min_score=match_cutoff)
if score == 0:
raise LookupError("No wordlist %r available for language %r"
% (wordlist, lang))
if best != lang:
logger.warning(
"You asked for word frequencies in language %r. Using the "
"nearest match, which is %r (%s)."
% (lang, best, langcodes.get(best).language_name('en'))
)
return read_cBpack(available[best])
def cB_to_freq(cB):
"""
Convert a word frequency from the logarithmic centibel scale that we use
internally, to a proportion from 0 to 1.
On this scale, 0 cB represents the maximum possible frequency of
1.0. -100 cB represents a word that happens 1 in 10 times,
-200 cB represents something that happens 1 in 100 times, and so on.
In general, x cB represents a frequency of 10 ** (x/100).
"""
if cB > 0:
raise ValueError(
"A frequency cannot be a positive number of centibels."
)
return 10 ** (cB / 100)
def cB_to_zipf(cB):
"""
Convert a word frequency from centibels to the Zipf scale
(see `zipf_to_freq`).
The Zipf scale is related to centibels, the logarithmic unit that wordfreq
uses internally, because the Zipf unit is simply the bel, with a different
zero point. To convert centibels to Zipf, add 900 and divide by 100.
"""
return (cB + 900) / 100
def zipf_to_freq(zipf):
"""
Convert a word frequency from the Zipf scale to a proportion between 0 and
1.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
"""
return 10 ** zipf / 1e9
def freq_to_zipf(freq):
"""
Convert a word frequency from a proportion between 0 and 1 to the
Zipf scale (see `zipf_to_freq`).
"""
return math.log(freq, 10) + 9
@lru_cache(maxsize=None)
def get_frequency_dict(lang, wordlist='best', match_cutoff=30):
"""
Get a word frequency list as a dictionary, mapping tokens to
frequencies as floating-point probabilities.
"""
freqs = {}
pack = get_frequency_list(lang, wordlist, match_cutoff)
for index, bucket in enumerate(pack):
freq = cB_to_freq(-index)
for word in bucket:
freqs[word] = freq
return freqs
def iter_wordlist(lang, wordlist='best'):
"""
Yield the words in a wordlist in approximate descending order of
frequency.
Because wordfreq rounds off its frequencies, the words will form 'bands'
with the same rounded frequency, appearing in alphabetical order within
each band.
"""
return itertools.chain(*get_frequency_list(lang, wordlist))
# This dict and inner function are used to implement a "drop everything" cache
# for word_frequency(); the overheads of lru_cache() are comparable to the time
# it takes to look up frequencies from scratch, so something faster is needed.
_wf_cache = {}
def _word_frequency(word, lang, wordlist, minimum):
tokens = lossy_tokenize(word, lang)
if not tokens:
return minimum
# Frequencies for multiple tokens are combined using the formula
# 1 / f = 1 / f1 + 1 / f2 + ...
# Thus the resulting frequency is less than any individual frequency, and
# the smallest frequency dominates the sum.
freqs = get_frequency_dict(lang, wordlist)
one_over_result = 0.0
for token in tokens:
if token not in freqs:
# If any word is missing, just return the default value
return minimum
one_over_result += 1.0 / freqs[token]
freq = 1.0 / one_over_result
if get_language_info(lang)['tokenizer'] == 'jieba':
# If we used the Jieba tokenizer, we could tokenize anything to match
# our wordlist, even nonsense. To counteract this, we multiply by a
# probability for each word break that was inferred.
freq /= INFERRED_SPACE_FACTOR ** (len(tokens) - 1)
# All our frequency data is only precise to within 1% anyway, so round
# it to 3 significant digits
unrounded = max(freq, minimum)
if unrounded == 0.:
return 0.
else:
leading_zeroes = math.floor(-math.log(unrounded, 10))
return round(unrounded, leading_zeroes + 3)
def word_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity.
"""
args = (word, lang, wordlist, minimum)
try:
return _wf_cache[args]
except KeyError:
if len(_wf_cache) >= CACHE_SIZE:
_wf_cache.clear()
_wf_cache[args] = _word_frequency(*args)
return _wf_cache[args]
@lru_cache(maxsize=100)
def top_n_list(lang, n, wordlist='best', ascii_only=False):
"""
Return a frequency list of length `n` in descending order of frequency.
This list contains words from `wordlist`, of the given language.
If `ascii_only`, then only ascii words are considered.
"""
results = []
for word in iter_wordlist(lang, wordlist):
if (not ascii_only) or max(word) <= '~':
results.append(word)
if len(results) >= n:
break
return results
def random_words(lang='en', wordlist='best', nwords=5, bits_per_word=12,
ascii_only=False):
"""
Returns a string of random, space separated words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
You can restrict the selection of words to those written in ASCII
characters by setting `ascii_only` to True.
"""
n_choices = 2 ** bits_per_word
choices = top_n_list(lang, n_choices, wordlist, ascii_only=ascii_only)
if len(choices) < n_choices:
raise ValueError(
"There aren't enough words in the wordlist to provide %d bits of "
"entropy per word." % bits_per_word
)
return ' '.join([random.choice(choices) for i in range(nwords)])
def random_ascii_words(lang='en', wordlist='best', nwords=5,
bits_per_word=12):
"""
Returns a string of random, space separated, ASCII words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
"""
return random_words(lang, wordlist, nwords, bits_per_word, ascii_only=True)
|
LuminosoInsight/wordfreq | wordfreq/__init__.py | top_n_list | python | def top_n_list(lang, n, wordlist='best', ascii_only=False):
results = []
for word in iter_wordlist(lang, wordlist):
if (not ascii_only) or max(word) <= '~':
results.append(word)
if len(results) >= n:
break
return results | Return a frequency list of length `n` in descending order of frequency.
This list contains words from `wordlist`, of the given language.
If `ascii_only`, then only ascii words are considered. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L317-L329 | [
"def iter_wordlist(lang, wordlist='best'):\n \"\"\"\n Yield the words in a wordlist in approximate descending order of\n frequency.\n\n Because wordfreq rounds off its frequencies, the words will form 'bands'\n with the same rounded frequency, appearing in alphabetical order within\n each band.\n \"\"\"\n return itertools.chain(*get_frequency_list(lang, wordlist))\n"
] | from pkg_resources import resource_filename
from functools import lru_cache
import langcodes
import msgpack
import gzip
import itertools
import pathlib
import random
import logging
import math
from .tokens import tokenize, simple_tokenize, lossy_tokenize
from .language_info import get_language_info
logger = logging.getLogger(__name__)
CACHE_SIZE = 100000
DATA_PATH = pathlib.Path(resource_filename('wordfreq', 'data'))
# We'll divide the frequency by 10 for each token boundary that was inferred.
# (We determined the factor of 10 empirically by looking at words in the
# Chinese wordlist that weren't common enough to be identified by the
# tokenizer. These words would get split into multiple tokens, and their
# inferred frequency would be on average 9.77 times higher than their actual
# frequency.)
INFERRED_SPACE_FACTOR = 10.0
# tokenize and simple_tokenize are imported so that other things can import
# them from here. Suppress the pyflakes warning.
tokenize = tokenize
simple_tokenize = simple_tokenize
def read_cBpack(filename):
"""
Read a file from an idiosyncratic format that we use for storing
approximate word frequencies, called "cBpack".
The cBpack format is as follows:
- The file on disk is a gzipped file in msgpack format, which decodes to a
list whose first element is a header, and whose remaining elements are
lists of words.
- The header is a dictionary with 'format' and 'version' keys that make
sure that we're reading the right thing.
- Each inner list of words corresponds to a particular word frequency,
rounded to the nearest centibel -- that is, one tenth of a decibel, or
a factor of 10 ** .01.
0 cB represents a word that occurs with probability 1, so it is the only
word in the data (this of course doesn't happen). -200 cB represents a
word that occurs once per 100 tokens, -300 cB represents a word that
occurs once per 1000 tokens, and so on.
- The index of each list within the overall list (without the header) is
the negative of its frequency in centibels.
- Each inner list is sorted in alphabetical order.
As an example, consider a corpus consisting only of the words "red fish
blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red"
and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word
frequencies would decode to this:
[
{'format': 'cB', 'version': 1},
[], [], [], ... # 30 empty lists
['fish'],
[], [], [], ... # 29 more empty lists
['blue', 'red']
]
"""
with gzip.open(filename, 'rb') as infile:
data = msgpack.load(infile, raw=False)
header = data[0]
if (
not isinstance(header, dict) or header.get('format') != 'cB'
or header.get('version') != 1
):
raise ValueError("Unexpected header: %r" % header)
return data[1:]
def available_languages(wordlist='best'):
"""
Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available.
"""
if wordlist == 'best':
available = available_languages('small')
available.update(available_languages('large'))
return available
elif wordlist == 'combined':
logger.warning(
"The 'combined' wordlists have been renamed to 'small'."
)
wordlist = 'small'
available = {}
for path in DATA_PATH.glob('*.msgpack.gz'):
if not path.name.startswith('_'):
list_name = path.name.split('.')[0]
name, lang = list_name.split('_')
if name == wordlist:
available[lang] = str(path)
return available
@lru_cache(maxsize=None)
def get_frequency_list(lang, wordlist='best', match_cutoff=30):
"""
Read the raw data from a wordlist file, returning it as a list of
lists. (See `read_cBpack` for what this represents.)
Because we use the `langcodes` module, we can handle slight
variations in language codes. For example, looking for 'pt-BR',
'pt_br', or even 'PT_BR' will get you the 'pt' (Portuguese) list.
Looking up the alternate code 'por' will also get the same list.
"""
available = available_languages(wordlist)
best, score = langcodes.best_match(lang, list(available),
min_score=match_cutoff)
if score == 0:
raise LookupError("No wordlist %r available for language %r"
% (wordlist, lang))
if best != lang:
logger.warning(
"You asked for word frequencies in language %r. Using the "
"nearest match, which is %r (%s)."
% (lang, best, langcodes.get(best).language_name('en'))
)
return read_cBpack(available[best])
def cB_to_freq(cB):
"""
Convert a word frequency from the logarithmic centibel scale that we use
internally, to a proportion from 0 to 1.
On this scale, 0 cB represents the maximum possible frequency of
1.0. -100 cB represents a word that happens 1 in 10 times,
-200 cB represents something that happens 1 in 100 times, and so on.
In general, x cB represents a frequency of 10 ** (x/100).
"""
if cB > 0:
raise ValueError(
"A frequency cannot be a positive number of centibels."
)
return 10 ** (cB / 100)
def cB_to_zipf(cB):
"""
Convert a word frequency from centibels to the Zipf scale
(see `zipf_to_freq`).
The Zipf scale is related to centibels, the logarithmic unit that wordfreq
uses internally, because the Zipf unit is simply the bel, with a different
zero point. To convert centibels to Zipf, add 900 and divide by 100.
"""
return (cB + 900) / 100
def zipf_to_freq(zipf):
"""
Convert a word frequency from the Zipf scale to a proportion between 0 and
1.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
"""
return 10 ** zipf / 1e9
def freq_to_zipf(freq):
"""
Convert a word frequency from a proportion between 0 and 1 to the
Zipf scale (see `zipf_to_freq`).
"""
return math.log(freq, 10) + 9
@lru_cache(maxsize=None)
def get_frequency_dict(lang, wordlist='best', match_cutoff=30):
"""
Get a word frequency list as a dictionary, mapping tokens to
frequencies as floating-point probabilities.
"""
freqs = {}
pack = get_frequency_list(lang, wordlist, match_cutoff)
for index, bucket in enumerate(pack):
freq = cB_to_freq(-index)
for word in bucket:
freqs[word] = freq
return freqs
def iter_wordlist(lang, wordlist='best'):
"""
Yield the words in a wordlist in approximate descending order of
frequency.
Because wordfreq rounds off its frequencies, the words will form 'bands'
with the same rounded frequency, appearing in alphabetical order within
each band.
"""
return itertools.chain(*get_frequency_list(lang, wordlist))
# This dict and inner function are used to implement a "drop everything" cache
# for word_frequency(); the overheads of lru_cache() are comparable to the time
# it takes to look up frequencies from scratch, so something faster is needed.
_wf_cache = {}
def _word_frequency(word, lang, wordlist, minimum):
tokens = lossy_tokenize(word, lang)
if not tokens:
return minimum
# Frequencies for multiple tokens are combined using the formula
# 1 / f = 1 / f1 + 1 / f2 + ...
# Thus the resulting frequency is less than any individual frequency, and
# the smallest frequency dominates the sum.
freqs = get_frequency_dict(lang, wordlist)
one_over_result = 0.0
for token in tokens:
if token not in freqs:
# If any word is missing, just return the default value
return minimum
one_over_result += 1.0 / freqs[token]
freq = 1.0 / one_over_result
if get_language_info(lang)['tokenizer'] == 'jieba':
# If we used the Jieba tokenizer, we could tokenize anything to match
# our wordlist, even nonsense. To counteract this, we multiply by a
# probability for each word break that was inferred.
freq /= INFERRED_SPACE_FACTOR ** (len(tokens) - 1)
# All our frequency data is only precise to within 1% anyway, so round
# it to 3 significant digits
unrounded = max(freq, minimum)
if unrounded == 0.:
return 0.
else:
leading_zeroes = math.floor(-math.log(unrounded, 10))
return round(unrounded, leading_zeroes + 3)
def word_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity.
"""
args = (word, lang, wordlist, minimum)
try:
return _wf_cache[args]
except KeyError:
if len(_wf_cache) >= CACHE_SIZE:
_wf_cache.clear()
_wf_cache[args] = _word_frequency(*args)
return _wf_cache[args]
def zipf_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word`, in the language with code `lang`, on the Zipf
scale.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
Zipf values for reasonable words are between 0 and 8. The value this
function returns will always be at last as large as `minimum`, even for a
word that never appears. The default minimum is 0, representing words
that appear once per billion words or less.
wordfreq internally quantizes its frequencies to centibels, which are
1/100 of a Zipf unit. The output of `zipf_frequency` will be rounded to
the nearest hundredth to match this quantization.
"""
freq_min = zipf_to_freq(minimum)
freq = word_frequency(word, lang, wordlist, freq_min)
return round(freq_to_zipf(freq), 2)
@lru_cache(maxsize=100)
def random_words(lang='en', wordlist='best', nwords=5, bits_per_word=12,
ascii_only=False):
"""
Returns a string of random, space separated words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
You can restrict the selection of words to those written in ASCII
characters by setting `ascii_only` to True.
"""
n_choices = 2 ** bits_per_word
choices = top_n_list(lang, n_choices, wordlist, ascii_only=ascii_only)
if len(choices) < n_choices:
raise ValueError(
"There aren't enough words in the wordlist to provide %d bits of "
"entropy per word." % bits_per_word
)
return ' '.join([random.choice(choices) for i in range(nwords)])
def random_ascii_words(lang='en', wordlist='best', nwords=5,
bits_per_word=12):
"""
Returns a string of random, space separated, ASCII words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
"""
return random_words(lang, wordlist, nwords, bits_per_word, ascii_only=True)
|
LuminosoInsight/wordfreq | wordfreq/__init__.py | random_words | python | def random_words(lang='en', wordlist='best', nwords=5, bits_per_word=12,
ascii_only=False):
n_choices = 2 ** bits_per_word
choices = top_n_list(lang, n_choices, wordlist, ascii_only=ascii_only)
if len(choices) < n_choices:
raise ValueError(
"There aren't enough words in the wordlist to provide %d bits of "
"entropy per word." % bits_per_word
)
return ' '.join([random.choice(choices) for i in range(nwords)]) | Returns a string of random, space separated words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
You can restrict the selection of words to those written in ASCII
characters by setting `ascii_only` to True. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L332-L354 | null | from pkg_resources import resource_filename
from functools import lru_cache
import langcodes
import msgpack
import gzip
import itertools
import pathlib
import random
import logging
import math
from .tokens import tokenize, simple_tokenize, lossy_tokenize
from .language_info import get_language_info
logger = logging.getLogger(__name__)
CACHE_SIZE = 100000
DATA_PATH = pathlib.Path(resource_filename('wordfreq', 'data'))
# We'll divide the frequency by 10 for each token boundary that was inferred.
# (We determined the factor of 10 empirically by looking at words in the
# Chinese wordlist that weren't common enough to be identified by the
# tokenizer. These words would get split into multiple tokens, and their
# inferred frequency would be on average 9.77 times higher than their actual
# frequency.)
INFERRED_SPACE_FACTOR = 10.0
# tokenize and simple_tokenize are imported so that other things can import
# them from here. Suppress the pyflakes warning.
tokenize = tokenize
simple_tokenize = simple_tokenize
def read_cBpack(filename):
"""
Read a file from an idiosyncratic format that we use for storing
approximate word frequencies, called "cBpack".
The cBpack format is as follows:
- The file on disk is a gzipped file in msgpack format, which decodes to a
list whose first element is a header, and whose remaining elements are
lists of words.
- The header is a dictionary with 'format' and 'version' keys that make
sure that we're reading the right thing.
- Each inner list of words corresponds to a particular word frequency,
rounded to the nearest centibel -- that is, one tenth of a decibel, or
a factor of 10 ** .01.
0 cB represents a word that occurs with probability 1, so it is the only
word in the data (this of course doesn't happen). -200 cB represents a
word that occurs once per 100 tokens, -300 cB represents a word that
occurs once per 1000 tokens, and so on.
- The index of each list within the overall list (without the header) is
the negative of its frequency in centibels.
- Each inner list is sorted in alphabetical order.
As an example, consider a corpus consisting only of the words "red fish
blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red"
and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word
frequencies would decode to this:
[
{'format': 'cB', 'version': 1},
[], [], [], ... # 30 empty lists
['fish'],
[], [], [], ... # 29 more empty lists
['blue', 'red']
]
"""
with gzip.open(filename, 'rb') as infile:
data = msgpack.load(infile, raw=False)
header = data[0]
if (
not isinstance(header, dict) or header.get('format') != 'cB'
or header.get('version') != 1
):
raise ValueError("Unexpected header: %r" % header)
return data[1:]
def available_languages(wordlist='best'):
"""
Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available.
"""
if wordlist == 'best':
available = available_languages('small')
available.update(available_languages('large'))
return available
elif wordlist == 'combined':
logger.warning(
"The 'combined' wordlists have been renamed to 'small'."
)
wordlist = 'small'
available = {}
for path in DATA_PATH.glob('*.msgpack.gz'):
if not path.name.startswith('_'):
list_name = path.name.split('.')[0]
name, lang = list_name.split('_')
if name == wordlist:
available[lang] = str(path)
return available
@lru_cache(maxsize=None)
def get_frequency_list(lang, wordlist='best', match_cutoff=30):
"""
Read the raw data from a wordlist file, returning it as a list of
lists. (See `read_cBpack` for what this represents.)
Because we use the `langcodes` module, we can handle slight
variations in language codes. For example, looking for 'pt-BR',
'pt_br', or even 'PT_BR' will get you the 'pt' (Portuguese) list.
Looking up the alternate code 'por' will also get the same list.
"""
available = available_languages(wordlist)
best, score = langcodes.best_match(lang, list(available),
min_score=match_cutoff)
if score == 0:
raise LookupError("No wordlist %r available for language %r"
% (wordlist, lang))
if best != lang:
logger.warning(
"You asked for word frequencies in language %r. Using the "
"nearest match, which is %r (%s)."
% (lang, best, langcodes.get(best).language_name('en'))
)
return read_cBpack(available[best])
def cB_to_freq(cB):
"""
Convert a word frequency from the logarithmic centibel scale that we use
internally, to a proportion from 0 to 1.
On this scale, 0 cB represents the maximum possible frequency of
1.0. -100 cB represents a word that happens 1 in 10 times,
-200 cB represents something that happens 1 in 100 times, and so on.
In general, x cB represents a frequency of 10 ** (x/100).
"""
if cB > 0:
raise ValueError(
"A frequency cannot be a positive number of centibels."
)
return 10 ** (cB / 100)
def cB_to_zipf(cB):
"""
Convert a word frequency from centibels to the Zipf scale
(see `zipf_to_freq`).
The Zipf scale is related to centibels, the logarithmic unit that wordfreq
uses internally, because the Zipf unit is simply the bel, with a different
zero point. To convert centibels to Zipf, add 900 and divide by 100.
"""
return (cB + 900) / 100
def zipf_to_freq(zipf):
"""
Convert a word frequency from the Zipf scale to a proportion between 0 and
1.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
"""
return 10 ** zipf / 1e9
def freq_to_zipf(freq):
"""
Convert a word frequency from a proportion between 0 and 1 to the
Zipf scale (see `zipf_to_freq`).
"""
return math.log(freq, 10) + 9
@lru_cache(maxsize=None)
def get_frequency_dict(lang, wordlist='best', match_cutoff=30):
"""
Get a word frequency list as a dictionary, mapping tokens to
frequencies as floating-point probabilities.
"""
freqs = {}
pack = get_frequency_list(lang, wordlist, match_cutoff)
for index, bucket in enumerate(pack):
freq = cB_to_freq(-index)
for word in bucket:
freqs[word] = freq
return freqs
def iter_wordlist(lang, wordlist='best'):
"""
Yield the words in a wordlist in approximate descending order of
frequency.
Because wordfreq rounds off its frequencies, the words will form 'bands'
with the same rounded frequency, appearing in alphabetical order within
each band.
"""
return itertools.chain(*get_frequency_list(lang, wordlist))
# This dict and inner function are used to implement a "drop everything" cache
# for word_frequency(); the overheads of lru_cache() are comparable to the time
# it takes to look up frequencies from scratch, so something faster is needed.
_wf_cache = {}
def _word_frequency(word, lang, wordlist, minimum):
tokens = lossy_tokenize(word, lang)
if not tokens:
return minimum
# Frequencies for multiple tokens are combined using the formula
# 1 / f = 1 / f1 + 1 / f2 + ...
# Thus the resulting frequency is less than any individual frequency, and
# the smallest frequency dominates the sum.
freqs = get_frequency_dict(lang, wordlist)
one_over_result = 0.0
for token in tokens:
if token not in freqs:
# If any word is missing, just return the default value
return minimum
one_over_result += 1.0 / freqs[token]
freq = 1.0 / one_over_result
if get_language_info(lang)['tokenizer'] == 'jieba':
# If we used the Jieba tokenizer, we could tokenize anything to match
# our wordlist, even nonsense. To counteract this, we multiply by a
# probability for each word break that was inferred.
freq /= INFERRED_SPACE_FACTOR ** (len(tokens) - 1)
# All our frequency data is only precise to within 1% anyway, so round
# it to 3 significant digits
unrounded = max(freq, minimum)
if unrounded == 0.:
return 0.
else:
leading_zeroes = math.floor(-math.log(unrounded, 10))
return round(unrounded, leading_zeroes + 3)
def word_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity.
"""
args = (word, lang, wordlist, minimum)
try:
return _wf_cache[args]
except KeyError:
if len(_wf_cache) >= CACHE_SIZE:
_wf_cache.clear()
_wf_cache[args] = _word_frequency(*args)
return _wf_cache[args]
def zipf_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word`, in the language with code `lang`, on the Zipf
scale.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
Zipf values for reasonable words are between 0 and 8. The value this
function returns will always be at last as large as `minimum`, even for a
word that never appears. The default minimum is 0, representing words
that appear once per billion words or less.
wordfreq internally quantizes its frequencies to centibels, which are
1/100 of a Zipf unit. The output of `zipf_frequency` will be rounded to
the nearest hundredth to match this quantization.
"""
freq_min = zipf_to_freq(minimum)
freq = word_frequency(word, lang, wordlist, freq_min)
return round(freq_to_zipf(freq), 2)
@lru_cache(maxsize=100)
def top_n_list(lang, n, wordlist='best', ascii_only=False):
"""
Return a frequency list of length `n` in descending order of frequency.
This list contains words from `wordlist`, of the given language.
If `ascii_only`, then only ascii words are considered.
"""
results = []
for word in iter_wordlist(lang, wordlist):
if (not ascii_only) or max(word) <= '~':
results.append(word)
if len(results) >= n:
break
return results
def random_ascii_words(lang='en', wordlist='best', nwords=5,
bits_per_word=12):
"""
Returns a string of random, space separated, ASCII words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
"""
return random_words(lang, wordlist, nwords, bits_per_word, ascii_only=True)
|
LuminosoInsight/wordfreq | wordfreq/__init__.py | random_ascii_words | python | def random_ascii_words(lang='en', wordlist='best', nwords=5,
bits_per_word=12):
return random_words(lang, wordlist, nwords, bits_per_word, ascii_only=True) | Returns a string of random, space separated, ASCII words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L357-L369 | [
"def random_words(lang='en', wordlist='best', nwords=5, bits_per_word=12,\n ascii_only=False):\n \"\"\"\n Returns a string of random, space separated words.\n\n These words are of the given language and from the given wordlist.\n There will be `nwords` words in the string.\n\n `bits_per_word` determines the amount of entropy provided by each word;\n when it's higher, this function will choose from a larger list of\n words, some of which are more rare.\n\n You can restrict the selection of words to those written in ASCII\n characters by setting `ascii_only` to True.\n \"\"\"\n n_choices = 2 ** bits_per_word\n choices = top_n_list(lang, n_choices, wordlist, ascii_only=ascii_only)\n if len(choices) < n_choices:\n raise ValueError(\n \"There aren't enough words in the wordlist to provide %d bits of \"\n \"entropy per word.\" % bits_per_word\n )\n return ' '.join([random.choice(choices) for i in range(nwords)])\n"
] | from pkg_resources import resource_filename
from functools import lru_cache
import langcodes
import msgpack
import gzip
import itertools
import pathlib
import random
import logging
import math
from .tokens import tokenize, simple_tokenize, lossy_tokenize
from .language_info import get_language_info
logger = logging.getLogger(__name__)
CACHE_SIZE = 100000
DATA_PATH = pathlib.Path(resource_filename('wordfreq', 'data'))
# We'll divide the frequency by 10 for each token boundary that was inferred.
# (We determined the factor of 10 empirically by looking at words in the
# Chinese wordlist that weren't common enough to be identified by the
# tokenizer. These words would get split into multiple tokens, and their
# inferred frequency would be on average 9.77 times higher than their actual
# frequency.)
INFERRED_SPACE_FACTOR = 10.0
# tokenize and simple_tokenize are imported so that other things can import
# them from here. Suppress the pyflakes warning.
tokenize = tokenize
simple_tokenize = simple_tokenize
def read_cBpack(filename):
"""
Read a file from an idiosyncratic format that we use for storing
approximate word frequencies, called "cBpack".
The cBpack format is as follows:
- The file on disk is a gzipped file in msgpack format, which decodes to a
list whose first element is a header, and whose remaining elements are
lists of words.
- The header is a dictionary with 'format' and 'version' keys that make
sure that we're reading the right thing.
- Each inner list of words corresponds to a particular word frequency,
rounded to the nearest centibel -- that is, one tenth of a decibel, or
a factor of 10 ** .01.
0 cB represents a word that occurs with probability 1, so it is the only
word in the data (this of course doesn't happen). -200 cB represents a
word that occurs once per 100 tokens, -300 cB represents a word that
occurs once per 1000 tokens, and so on.
- The index of each list within the overall list (without the header) is
the negative of its frequency in centibels.
- Each inner list is sorted in alphabetical order.
As an example, consider a corpus consisting only of the words "red fish
blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red"
and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word
frequencies would decode to this:
[
{'format': 'cB', 'version': 1},
[], [], [], ... # 30 empty lists
['fish'],
[], [], [], ... # 29 more empty lists
['blue', 'red']
]
"""
with gzip.open(filename, 'rb') as infile:
data = msgpack.load(infile, raw=False)
header = data[0]
if (
not isinstance(header, dict) or header.get('format') != 'cB'
or header.get('version') != 1
):
raise ValueError("Unexpected header: %r" % header)
return data[1:]
def available_languages(wordlist='best'):
"""
Given a wordlist name, return a dictionary of language codes to filenames,
representing all the languages in which that wordlist is available.
"""
if wordlist == 'best':
available = available_languages('small')
available.update(available_languages('large'))
return available
elif wordlist == 'combined':
logger.warning(
"The 'combined' wordlists have been renamed to 'small'."
)
wordlist = 'small'
available = {}
for path in DATA_PATH.glob('*.msgpack.gz'):
if not path.name.startswith('_'):
list_name = path.name.split('.')[0]
name, lang = list_name.split('_')
if name == wordlist:
available[lang] = str(path)
return available
@lru_cache(maxsize=None)
def get_frequency_list(lang, wordlist='best', match_cutoff=30):
"""
Read the raw data from a wordlist file, returning it as a list of
lists. (See `read_cBpack` for what this represents.)
Because we use the `langcodes` module, we can handle slight
variations in language codes. For example, looking for 'pt-BR',
'pt_br', or even 'PT_BR' will get you the 'pt' (Portuguese) list.
Looking up the alternate code 'por' will also get the same list.
"""
available = available_languages(wordlist)
best, score = langcodes.best_match(lang, list(available),
min_score=match_cutoff)
if score == 0:
raise LookupError("No wordlist %r available for language %r"
% (wordlist, lang))
if best != lang:
logger.warning(
"You asked for word frequencies in language %r. Using the "
"nearest match, which is %r (%s)."
% (lang, best, langcodes.get(best).language_name('en'))
)
return read_cBpack(available[best])
def cB_to_freq(cB):
"""
Convert a word frequency from the logarithmic centibel scale that we use
internally, to a proportion from 0 to 1.
On this scale, 0 cB represents the maximum possible frequency of
1.0. -100 cB represents a word that happens 1 in 10 times,
-200 cB represents something that happens 1 in 100 times, and so on.
In general, x cB represents a frequency of 10 ** (x/100).
"""
if cB > 0:
raise ValueError(
"A frequency cannot be a positive number of centibels."
)
return 10 ** (cB / 100)
def cB_to_zipf(cB):
"""
Convert a word frequency from centibels to the Zipf scale
(see `zipf_to_freq`).
The Zipf scale is related to centibels, the logarithmic unit that wordfreq
uses internally, because the Zipf unit is simply the bel, with a different
zero point. To convert centibels to Zipf, add 900 and divide by 100.
"""
return (cB + 900) / 100
def zipf_to_freq(zipf):
"""
Convert a word frequency from the Zipf scale to a proportion between 0 and
1.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
"""
return 10 ** zipf / 1e9
def freq_to_zipf(freq):
"""
Convert a word frequency from a proportion between 0 and 1 to the
Zipf scale (see `zipf_to_freq`).
"""
return math.log(freq, 10) + 9
@lru_cache(maxsize=None)
def get_frequency_dict(lang, wordlist='best', match_cutoff=30):
"""
Get a word frequency list as a dictionary, mapping tokens to
frequencies as floating-point probabilities.
"""
freqs = {}
pack = get_frequency_list(lang, wordlist, match_cutoff)
for index, bucket in enumerate(pack):
freq = cB_to_freq(-index)
for word in bucket:
freqs[word] = freq
return freqs
def iter_wordlist(lang, wordlist='best'):
"""
Yield the words in a wordlist in approximate descending order of
frequency.
Because wordfreq rounds off its frequencies, the words will form 'bands'
with the same rounded frequency, appearing in alphabetical order within
each band.
"""
return itertools.chain(*get_frequency_list(lang, wordlist))
# This dict and inner function are used to implement a "drop everything" cache
# for word_frequency(); the overheads of lru_cache() are comparable to the time
# it takes to look up frequencies from scratch, so something faster is needed.
_wf_cache = {}
def _word_frequency(word, lang, wordlist, minimum):
tokens = lossy_tokenize(word, lang)
if not tokens:
return minimum
# Frequencies for multiple tokens are combined using the formula
# 1 / f = 1 / f1 + 1 / f2 + ...
# Thus the resulting frequency is less than any individual frequency, and
# the smallest frequency dominates the sum.
freqs = get_frequency_dict(lang, wordlist)
one_over_result = 0.0
for token in tokens:
if token not in freqs:
# If any word is missing, just return the default value
return minimum
one_over_result += 1.0 / freqs[token]
freq = 1.0 / one_over_result
if get_language_info(lang)['tokenizer'] == 'jieba':
# If we used the Jieba tokenizer, we could tokenize anything to match
# our wordlist, even nonsense. To counteract this, we multiply by a
# probability for each word break that was inferred.
freq /= INFERRED_SPACE_FACTOR ** (len(tokens) - 1)
# All our frequency data is only precise to within 1% anyway, so round
# it to 3 significant digits
unrounded = max(freq, minimum)
if unrounded == 0.:
return 0.
else:
leading_zeroes = math.floor(-math.log(unrounded, 10))
return round(unrounded, leading_zeroes + 3)
def word_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity.
"""
args = (word, lang, wordlist, minimum)
try:
return _wf_cache[args]
except KeyError:
if len(_wf_cache) >= CACHE_SIZE:
_wf_cache.clear()
_wf_cache[args] = _word_frequency(*args)
return _wf_cache[args]
def zipf_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word`, in the language with code `lang`, on the Zipf
scale.
The Zipf scale is a logarithmic frequency scale proposed by Marc Brysbaert,
who compiled the SUBTLEX data. The goal of the Zipf scale is to map
reasonable word frequencies to understandable, small positive numbers.
A word rates as x on the Zipf scale when it occurs 10**x times per billion
words. For example, a word that occurs once per million words is at 3.0 on
the Zipf scale.
Zipf values for reasonable words are between 0 and 8. The value this
function returns will always be at last as large as `minimum`, even for a
word that never appears. The default minimum is 0, representing words
that appear once per billion words or less.
wordfreq internally quantizes its frequencies to centibels, which are
1/100 of a Zipf unit. The output of `zipf_frequency` will be rounded to
the nearest hundredth to match this quantization.
"""
freq_min = zipf_to_freq(minimum)
freq = word_frequency(word, lang, wordlist, freq_min)
return round(freq_to_zipf(freq), 2)
@lru_cache(maxsize=100)
def top_n_list(lang, n, wordlist='best', ascii_only=False):
"""
Return a frequency list of length `n` in descending order of frequency.
This list contains words from `wordlist`, of the given language.
If `ascii_only`, then only ascii words are considered.
"""
results = []
for word in iter_wordlist(lang, wordlist):
if (not ascii_only) or max(word) <= '~':
results.append(word)
if len(results) >= n:
break
return results
def random_words(lang='en', wordlist='best', nwords=5, bits_per_word=12,
ascii_only=False):
"""
Returns a string of random, space separated words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
You can restrict the selection of words to those written in ASCII
characters by setting `ascii_only` to True.
"""
n_choices = 2 ** bits_per_word
choices = top_n_list(lang, n_choices, wordlist, ascii_only=ascii_only)
if len(choices) < n_choices:
raise ValueError(
"There aren't enough words in the wordlist to provide %d bits of "
"entropy per word." % bits_per_word
)
return ' '.join([random.choice(choices) for i in range(nwords)])
|
LuminosoInsight/wordfreq | wordfreq/chinese.py | jieba_tokenize | python | def jieba_tokenize(text, external_wordlist=False):
global jieba_tokenizer, jieba_orig_tokenizer
if external_wordlist:
if jieba_orig_tokenizer is None:
jieba_orig_tokenizer = jieba.Tokenizer(dictionary=ORIG_DICT_FILENAME)
return jieba_orig_tokenizer.lcut(text)
else:
if jieba_tokenizer is None:
jieba_tokenizer = jieba.Tokenizer(dictionary=DICT_FILENAME)
# Tokenize the Simplified Chinese version of the text, but return
# those spans from the original text, even if it's in Traditional
# Chinese
tokens = []
for _token, start, end in jieba_tokenizer.tokenize(simplify_chinese(text), HMM=False):
tokens.append(text[start:end])
return tokens | Tokenize the given text into tokens whose word frequencies can probably
be looked up. This uses Jieba, a word-frequency-based tokenizer.
If `external_wordlist` is False, we tell Jieba to default to using
wordfreq's own Chinese wordlist, and not to infer unknown words using a
hidden Markov model. This ensures that the multi-character tokens that it
outputs will be ones whose word frequencies we can look up.
If `external_wordlist` is True, this will use the largest version of
Jieba's original dictionary, with HMM enabled, so its results will be
independent of the data in wordfreq. These results will be better optimized
for purposes that aren't looking up word frequencies, such as general-
purpose tokenization, or collecting word frequencies in the first place. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/chinese.py#L28-L59 | [
"def simplify_chinese(text):\n \"\"\"\n Convert Chinese text character-by-character to Simplified Chinese, for the\n purpose of looking up word frequencies.\n\n This is far too simple to be a proper Chinese-to-Chinese \"translation\"; it\n will sometimes produce nonsense words by simplifying characters that would\n not be simplified in context, or by simplifying words that would only be\n used in a Traditional Chinese locale. But the resulting text is still a\n reasonable key for looking up word frequenices.\n \"\"\"\n return text.translate(SIMPLIFIED_MAP).casefold()\n"
] | from pkg_resources import resource_filename
import jieba
import msgpack
import gzip
DICT_FILENAME = resource_filename('wordfreq', 'data/jieba_zh.txt')
ORIG_DICT_FILENAME = resource_filename('wordfreq', 'data/jieba_zh_orig.txt')
SIMP_MAP_FILENAME = resource_filename('wordfreq', 'data/_chinese_mapping.msgpack.gz')
SIMPLIFIED_MAP = msgpack.load(gzip.open(SIMP_MAP_FILENAME), raw=False)
jieba_tokenizer = None
jieba_orig_tokenizer = None
def simplify_chinese(text):
"""
Convert Chinese text character-by-character to Simplified Chinese, for the
purpose of looking up word frequencies.
This is far too simple to be a proper Chinese-to-Chinese "translation"; it
will sometimes produce nonsense words by simplifying characters that would
not be simplified in context, or by simplifying words that would only be
used in a Traditional Chinese locale. But the resulting text is still a
reasonable key for looking up word frequenices.
"""
return text.translate(SIMPLIFIED_MAP).casefold()
|
LuminosoInsight/wordfreq | wordfreq/preprocess.py | preprocess_text | python | def preprocess_text(text, language):
# NFC or NFKC normalization, as needed for the language
info = get_language_info(language)
text = unicodedata.normalize(info['normal_form'], text)
# Transliteration of multi-script languages
if info['transliteration'] is not None:
text = transliterate(info['transliteration'], text)
# Abjad mark removal
if info['remove_marks']:
text = remove_marks(text)
# Case folding
if info['dotless_i']:
text = casefold_with_i_dots(text)
else:
text = text.casefold()
# Fixing of diacritics
if info['diacritics_under'] == 'commas':
text = cedillas_to_commas(text)
elif info['diacritics_under'] == 'cedillas':
text = commas_to_cedillas(text)
return text | This function applies pre-processing steps that convert forms of words
considered equivalent into one standardized form.
As one straightforward step, it case-folds the text. For the purposes of
wordfreq and related tools, a capitalized word shouldn't have a different
frequency from its lowercase version.
The steps that are applied in order, only some of which apply to each
language, are:
- NFC or NFKC normalization, as needed for the language
- Transliteration of multi-script languages
- Abjad mark removal
- Case folding
- Fixing of diacritics
We'll describe these steps out of order, to start with the more obvious
steps.
Case folding
------------
The most common effect of this function is that it case-folds alphabetic
text to lowercase:
>>> preprocess_text('Word', 'en')
'word'
This is proper Unicode-aware case-folding, so it eliminates distinctions
in lowercase letters that would not appear in uppercase. This accounts for
the German ß and the Greek final sigma:
>>> preprocess_text('groß', 'de')
'gross'
>>> preprocess_text('λέξις', 'el')
'λέξισ'
In Turkish (and Azerbaijani), case-folding is different, because the
uppercase and lowercase I come in two variants, one with a dot and one
without. They are matched in a way that preserves the number of dots, which
the usual pair of "I" and "i" do not.
>>> preprocess_text('HAKKINDA İSTANBUL', 'tr')
'hakkında istanbul'
Fixing of diacritics
--------------------
While we're talking about Turkish: the Turkish alphabet contains letters
with cedillas attached to the bottom. In the case of "ş" and "ţ", these
letters are very similar to two Romanian letters, "ș" and "ț", which have
separate _commas_ below them.
(Did you know that a cedilla is not the same as a comma under a letter? I
didn't until I started dealing with text normalization. My keyboard layout
even inputs a letter with a cedilla when you hit Compose+comma.)
Because these letters look so similar, and because some fonts only include
one pair of letters and not the other, there are many cases where the
letters are confused with each other. Our preprocessing normalizes these
Turkish and Romanian letters to the letters each language prefers.
>>> preprocess_text('kișinin', 'tr') # comma to cedilla
'kişinin'
>>> preprocess_text('ACELAŞI', 'ro') # cedilla to comma
'același'
Unicode normalization
---------------------
Unicode text is NFC normalized in most languages, removing trivial
distinctions between strings that should be considered equivalent in all
cases:
>>> word = preprocess_text('natu\N{COMBINING DIAERESIS}rlich', 'de')
>>> word
'natürlich'
>>> '\N{LATIN SMALL LETTER U WITH DIAERESIS}' in word
True
NFC normalization is sufficient (and NFKC normalization is a bit too strong)
for many languages that are written in cased, alphabetic scripts.
Languages in other scripts tend to need stronger normalization to properly
compare text. So we use NFC normalization when the language's script is
Latin, Greek, or Cyrillic, and we use NFKC normalization for all other
languages.
Here's an example in Japanese, where preprocessing changes the width (and
the case) of a Latin letter that's used as part of a word:
>>> preprocess_text('Uターン', 'ja')
'uターン'
In Korean, NFKC normalization is important because it aligns two different
ways of encoding text -- as individual letters that are grouped together
into square characters, or as the entire syllables that those characters
represent:
>>> word = '\u1102\u1161\u11c0\u1106\u1161\u11af'
>>> word
'낱말'
>>> len(word)
6
>>> word = preprocess_text(word, 'ko')
>>> word
'낱말'
>>> len(word)
2
Abjad mark removal
------------------
There are many abjad languages, such as Arabic, Hebrew, Persian, and Urdu,
where words can be marked with vowel points but rarely are. In languages
that use abjad scripts, we remove all modifiers that are classified by
Unicode as "marks". We also remove an Arabic character called the tatweel,
which is used to visually lengthen a word.
>>> preprocess_text("كَلِمَة", 'ar')
'كلمة'
>>> preprocess_text("الحمــــــد", 'ar')
'الحمد'
Transliteration of multi-script languages
-----------------------------------------
Some languages are written in multiple scripts, and require special care.
These languages include Chinese, Serbian, and Azerbaijani.
In Serbian, there is a well-established mapping from Cyrillic letters to
Latin letters. We apply this mapping so that Serbian is always represented
in Latin letters.
>>> preprocess_text('схваташ', 'sr')
'shvataš'
The transliteration is more complete than it needs to be to cover just
Serbian, so that -- for example -- borrowings from Russian can be
transliterated, instead of coming out in a mixed script.
>>> preprocess_text('культуры', 'sr')
"kul'tury"
Azerbaijani (Azeri) has a similar transliteration step to Serbian,
and then the Latin-alphabet text is handled similarly to Turkish.
>>> preprocess_text('бағырты', 'az')
'bağırtı'
We don't transliterate Traditional to Simplified Chinese in this step.
There are some steps where we unify them internally: see chinese.py
for more information. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/preprocess.py#L13-L196 | [
"def remove_marks(text):\n \"\"\"\n Remove decorations from words in abjad scripts:\n\n - Combining marks of class Mn, which tend to represent non-essential\n vowel markings.\n - Tatweels, horizontal segments that are used to extend or justify an\n Arabic word.\n \"\"\"\n return MARK_RE.sub('', text)\n",
"def transliterate(table, text):\n \"\"\"\n Transliterate text according to one of the tables above.\n\n `table` chooses the table. It looks like a language code but comes from a\n very restricted set:\n\n - 'sr-Latn' means to convert Serbian, which may be in Cyrillic, into the\n Latin alphabet.\n - 'az-Latn' means the same for Azerbaijani Cyrillic to Latn.\n \"\"\"\n if table == 'sr-Latn':\n return text.translate(SR_LATN_TABLE)\n elif table == 'az-Latn':\n return text.translate(AZ_LATN_TABLE)\n else:\n raise ValueError(\"Unknown transliteration table: {!r}\".format(table))\n",
"def casefold_with_i_dots(text):\n \"\"\"\n Convert capital I's and capital dotted İ's to lowercase in the way\n that's appropriate for Turkish and related languages, then case-fold\n the rest of the letters.\n \"\"\"\n text = unicodedata.normalize('NFC', text).replace('İ', 'i').replace('I', 'ı')\n return text.casefold()\n",
"def cedillas_to_commas(text):\n \"\"\"\n Convert s and t with cedillas (ş and ţ) to commas (ș and ț), which is\n preferred in Romanian.\n\n Only the lowercase versions are replaced, because this assumes the\n text has already been case-folded.\n \"\"\"\n return text.replace(\n '\\N{LATIN SMALL LETTER S WITH CEDILLA}',\n '\\N{LATIN SMALL LETTER S WITH COMMA BELOW}'\n ).replace(\n '\\N{LATIN SMALL LETTER T WITH CEDILLA}',\n '\\N{LATIN SMALL LETTER T WITH COMMA BELOW}'\n )\n",
"def commas_to_cedillas(text):\n \"\"\"\n Convert s and t with commas (ș and ț) to cedillas (ş and ţ), which is\n preferred in Turkish.\n\n Only the lowercase versions are replaced, because this assumes the\n text has already been case-folded.\n \"\"\"\n return text.replace(\n '\\N{LATIN SMALL LETTER S WITH COMMA BELOW}',\n '\\N{LATIN SMALL LETTER S WITH CEDILLA}'\n ).replace(\n '\\N{LATIN SMALL LETTER T WITH COMMA BELOW}',\n '\\N{LATIN SMALL LETTER T WITH CEDILLA}'\n )\n"
] | import regex
import unicodedata
from .language_info import get_language_info
from .transliterate import transliterate
MARK_RE = regex.compile(r'[\p{Mn}\N{ARABIC TATWEEL}]', regex.V1)
DIGIT_RE = regex.compile('\d')
MULTI_DIGIT_RE = regex.compile('\d[\d.,]+')
def remove_marks(text):
"""
Remove decorations from words in abjad scripts:
- Combining marks of class Mn, which tend to represent non-essential
vowel markings.
- Tatweels, horizontal segments that are used to extend or justify an
Arabic word.
"""
return MARK_RE.sub('', text)
def casefold_with_i_dots(text):
"""
Convert capital I's and capital dotted İ's to lowercase in the way
that's appropriate for Turkish and related languages, then case-fold
the rest of the letters.
"""
text = unicodedata.normalize('NFC', text).replace('İ', 'i').replace('I', 'ı')
return text.casefold()
def commas_to_cedillas(text):
"""
Convert s and t with commas (ș and ț) to cedillas (ş and ţ), which is
preferred in Turkish.
Only the lowercase versions are replaced, because this assumes the
text has already been case-folded.
"""
return text.replace(
'\N{LATIN SMALL LETTER S WITH COMMA BELOW}',
'\N{LATIN SMALL LETTER S WITH CEDILLA}'
).replace(
'\N{LATIN SMALL LETTER T WITH COMMA BELOW}',
'\N{LATIN SMALL LETTER T WITH CEDILLA}'
)
def cedillas_to_commas(text):
"""
Convert s and t with cedillas (ş and ţ) to commas (ș and ț), which is
preferred in Romanian.
Only the lowercase versions are replaced, because this assumes the
text has already been case-folded.
"""
return text.replace(
'\N{LATIN SMALL LETTER S WITH CEDILLA}',
'\N{LATIN SMALL LETTER S WITH COMMA BELOW}'
).replace(
'\N{LATIN SMALL LETTER T WITH CEDILLA}',
'\N{LATIN SMALL LETTER T WITH COMMA BELOW}'
)
def _sub_zeroes(match):
"""
Given a regex match, return what it matched with digits replaced by
zeroes.
"""
return DIGIT_RE.sub('0', match.group(0))
def smash_numbers(text):
"""
Replace sequences of multiple digits with zeroes, so we don't need to
distinguish the frequencies of thousands of numbers.
"""
return MULTI_DIGIT_RE.sub(_sub_zeroes, text)
|
LuminosoInsight/wordfreq | wordfreq/preprocess.py | casefold_with_i_dots | python | def casefold_with_i_dots(text):
text = unicodedata.normalize('NFC', text).replace('İ', 'i').replace('I', 'ı')
return text.casefold() | Convert capital I's and capital dotted İ's to lowercase in the way
that's appropriate for Turkish and related languages, then case-fold
the rest of the letters. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/preprocess.py#L211-L218 | null | import regex
import unicodedata
from .language_info import get_language_info
from .transliterate import transliterate
MARK_RE = regex.compile(r'[\p{Mn}\N{ARABIC TATWEEL}]', regex.V1)
DIGIT_RE = regex.compile('\d')
MULTI_DIGIT_RE = regex.compile('\d[\d.,]+')
def preprocess_text(text, language):
"""
This function applies pre-processing steps that convert forms of words
considered equivalent into one standardized form.
As one straightforward step, it case-folds the text. For the purposes of
wordfreq and related tools, a capitalized word shouldn't have a different
frequency from its lowercase version.
The steps that are applied in order, only some of which apply to each
language, are:
- NFC or NFKC normalization, as needed for the language
- Transliteration of multi-script languages
- Abjad mark removal
- Case folding
- Fixing of diacritics
We'll describe these steps out of order, to start with the more obvious
steps.
Case folding
------------
The most common effect of this function is that it case-folds alphabetic
text to lowercase:
>>> preprocess_text('Word', 'en')
'word'
This is proper Unicode-aware case-folding, so it eliminates distinctions
in lowercase letters that would not appear in uppercase. This accounts for
the German ß and the Greek final sigma:
>>> preprocess_text('groß', 'de')
'gross'
>>> preprocess_text('λέξις', 'el')
'λέξισ'
In Turkish (and Azerbaijani), case-folding is different, because the
uppercase and lowercase I come in two variants, one with a dot and one
without. They are matched in a way that preserves the number of dots, which
the usual pair of "I" and "i" do not.
>>> preprocess_text('HAKKINDA İSTANBUL', 'tr')
'hakkında istanbul'
Fixing of diacritics
--------------------
While we're talking about Turkish: the Turkish alphabet contains letters
with cedillas attached to the bottom. In the case of "ş" and "ţ", these
letters are very similar to two Romanian letters, "ș" and "ț", which have
separate _commas_ below them.
(Did you know that a cedilla is not the same as a comma under a letter? I
didn't until I started dealing with text normalization. My keyboard layout
even inputs a letter with a cedilla when you hit Compose+comma.)
Because these letters look so similar, and because some fonts only include
one pair of letters and not the other, there are many cases where the
letters are confused with each other. Our preprocessing normalizes these
Turkish and Romanian letters to the letters each language prefers.
>>> preprocess_text('kișinin', 'tr') # comma to cedilla
'kişinin'
>>> preprocess_text('ACELAŞI', 'ro') # cedilla to comma
'același'
Unicode normalization
---------------------
Unicode text is NFC normalized in most languages, removing trivial
distinctions between strings that should be considered equivalent in all
cases:
>>> word = preprocess_text('natu\N{COMBINING DIAERESIS}rlich', 'de')
>>> word
'natürlich'
>>> '\N{LATIN SMALL LETTER U WITH DIAERESIS}' in word
True
NFC normalization is sufficient (and NFKC normalization is a bit too strong)
for many languages that are written in cased, alphabetic scripts.
Languages in other scripts tend to need stronger normalization to properly
compare text. So we use NFC normalization when the language's script is
Latin, Greek, or Cyrillic, and we use NFKC normalization for all other
languages.
Here's an example in Japanese, where preprocessing changes the width (and
the case) of a Latin letter that's used as part of a word:
>>> preprocess_text('Uターン', 'ja')
'uターン'
In Korean, NFKC normalization is important because it aligns two different
ways of encoding text -- as individual letters that are grouped together
into square characters, or as the entire syllables that those characters
represent:
>>> word = '\u1102\u1161\u11c0\u1106\u1161\u11af'
>>> word
'낱말'
>>> len(word)
6
>>> word = preprocess_text(word, 'ko')
>>> word
'낱말'
>>> len(word)
2
Abjad mark removal
------------------
There are many abjad languages, such as Arabic, Hebrew, Persian, and Urdu,
where words can be marked with vowel points but rarely are. In languages
that use abjad scripts, we remove all modifiers that are classified by
Unicode as "marks". We also remove an Arabic character called the tatweel,
which is used to visually lengthen a word.
>>> preprocess_text("كَلِمَة", 'ar')
'كلمة'
>>> preprocess_text("الحمــــــد", 'ar')
'الحمد'
Transliteration of multi-script languages
-----------------------------------------
Some languages are written in multiple scripts, and require special care.
These languages include Chinese, Serbian, and Azerbaijani.
In Serbian, there is a well-established mapping from Cyrillic letters to
Latin letters. We apply this mapping so that Serbian is always represented
in Latin letters.
>>> preprocess_text('схваташ', 'sr')
'shvataš'
The transliteration is more complete than it needs to be to cover just
Serbian, so that -- for example -- borrowings from Russian can be
transliterated, instead of coming out in a mixed script.
>>> preprocess_text('культуры', 'sr')
"kul'tury"
Azerbaijani (Azeri) has a similar transliteration step to Serbian,
and then the Latin-alphabet text is handled similarly to Turkish.
>>> preprocess_text('бағырты', 'az')
'bağırtı'
We don't transliterate Traditional to Simplified Chinese in this step.
There are some steps where we unify them internally: see chinese.py
for more information.
"""
# NFC or NFKC normalization, as needed for the language
info = get_language_info(language)
text = unicodedata.normalize(info['normal_form'], text)
# Transliteration of multi-script languages
if info['transliteration'] is not None:
text = transliterate(info['transliteration'], text)
# Abjad mark removal
if info['remove_marks']:
text = remove_marks(text)
# Case folding
if info['dotless_i']:
text = casefold_with_i_dots(text)
else:
text = text.casefold()
# Fixing of diacritics
if info['diacritics_under'] == 'commas':
text = cedillas_to_commas(text)
elif info['diacritics_under'] == 'cedillas':
text = commas_to_cedillas(text)
return text
def remove_marks(text):
"""
Remove decorations from words in abjad scripts:
- Combining marks of class Mn, which tend to represent non-essential
vowel markings.
- Tatweels, horizontal segments that are used to extend or justify an
Arabic word.
"""
return MARK_RE.sub('', text)
def commas_to_cedillas(text):
"""
Convert s and t with commas (ș and ț) to cedillas (ş and ţ), which is
preferred in Turkish.
Only the lowercase versions are replaced, because this assumes the
text has already been case-folded.
"""
return text.replace(
'\N{LATIN SMALL LETTER S WITH COMMA BELOW}',
'\N{LATIN SMALL LETTER S WITH CEDILLA}'
).replace(
'\N{LATIN SMALL LETTER T WITH COMMA BELOW}',
'\N{LATIN SMALL LETTER T WITH CEDILLA}'
)
def cedillas_to_commas(text):
"""
Convert s and t with cedillas (ş and ţ) to commas (ș and ț), which is
preferred in Romanian.
Only the lowercase versions are replaced, because this assumes the
text has already been case-folded.
"""
return text.replace(
'\N{LATIN SMALL LETTER S WITH CEDILLA}',
'\N{LATIN SMALL LETTER S WITH COMMA BELOW}'
).replace(
'\N{LATIN SMALL LETTER T WITH CEDILLA}',
'\N{LATIN SMALL LETTER T WITH COMMA BELOW}'
)
def _sub_zeroes(match):
"""
Given a regex match, return what it matched with digits replaced by
zeroes.
"""
return DIGIT_RE.sub('0', match.group(0))
def smash_numbers(text):
"""
Replace sequences of multiple digits with zeroes, so we don't need to
distinguish the frequencies of thousands of numbers.
"""
return MULTI_DIGIT_RE.sub(_sub_zeroes, text)
|
LuminosoInsight/wordfreq | wordfreq/language_info.py | _language_in_list | python | def _language_in_list(language, targets, min_score=80):
matched = best_match(language, targets, min_score=min_score)
return matched[1] > 0 | A helper function to determine whether this language matches one of the
target languages, with a match score above a certain threshold.
The languages can be given as strings (language tags) or as Language
objects. `targets` can be any iterable of such languages. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/language_info.py#L48-L57 | null | from functools import lru_cache
from langcodes import Language, best_match
# Text in scripts written without spaces has to be handled specially in our
# tokenization regex (see TOKEN_RE in tokens.py). Also, when one of these is
# the script of the language we're analyzing, then we need to either have
# a specific tokenizer for the language or give up.
SPACELESS_SCRIPTS = [
# Han ideographs are spaceless, but they don't need to appear in this list
# because _almost_ all of them, except for some exceptional Japanese
# characters, are covered by the \p{IsIdeo} check. Checking for
# Script=Hani and IsIdeo slows down our regexes with huge, redundant
# classes of characters. Instead, we'll list the exceptions below.
'Hira', # Hiragana
'Kana', # Katakana
'Thai', # Thai script
'Khmr', # Khmer script
'Laoo', # Lao script
'Mymr', # Burmese script
'Tale', # Tai Le script
'Talu', # Tai Lü script
'Lana', # Lanna script
]
EXTRA_JAPANESE_CHARACTERS = 'ー々〻〆'
# ー is a lengthening mark that's both hiragana and katakana. Unicode
# segmentation handles it as a special case, but we're overriding standard
# Unicode segmentation, so we need to have the special case too.
#
# 々 and 〻 are "iteration marks" that stand for the previous kanji. So they
# act identically to kanji (ideograms) without technically _being_ kanji. That
# technicality doesn't matter to us.
#
# 〆 is a Japanese abbreviation for "total", and even this can be used in the
# middle of words. Why isn't it just considered an ideograph? I don't know, I
# didn't come up with this language, or Unicode for that matter.
#
# None of this even comes up when we're trying to tokenize Chinese and
# Japanese. It comes up when we're trying to _not_ tokenize a word because
# it's Chinese or Japanese and the tokenization doesn't really matter, which
# happens in ConceptNet.
@lru_cache(maxsize=None)
def get_language_info(language):
"""
Looks up the things we need to know about how to handle text in a given
language. This will return a dictionary with the following fields:
'script': a BCP 47 script code such as 'Latn', 'Cyrl', 'Hans'...
Indicates the script that tokens in this language should be in,
_after_ our preprocessing. The script for 'zh' is 'Hans', for example,
because even if the input is in Traditional Chinese ('Hant'), we
convert it to Simplified.
'tokenizer': 'regex', 'jieba', 'mecab', or None
Indicates the best way we know to separate tokens in the language.
'regex' is what will be used for most languages, meaning that we can
segment the text with a Unicode-aware regular expression. If a language
generally uses spaces to separate words, the regex will work well.
'jieba' and 'mecab' are tokenizers for specific languages written
without spaces.
A tokenizer of None means we don't have a good way to segment the
language. We'll use the regex anyway, but the results will be pretty
bad.
'normal_form': 'NFC' or 'NFKC'
How "should" Unicode be normalized when comparing text in this
language? This is not a standard, it's just based on experience.
Many languages need NFKC normalization for text comparisons to work
properly, but in many European languages, NFKC normalization is
excessive and loses information.
'remove_marks': True or False
Determines whether marks and decorations, such as vowel points and
tatweels, should be removed. True for languages in abjad scripts.
'dotless_i': True or False
Is "ı" the lowercase of "I" in this language, as in Turkish?
'diacritics_under': 'cedillas', 'commas', or None
Should we convert any diacritics that are under the letters "s" and
"t" in this language? 'cedillas' means we should convert commas to
cedillas, and 'commas' means we should convert cedillas to commas.
'transliteration': 'sr-Latn', 'az-Latn', or None
Indicates a type of transliteration that we should use for normalizing
a multi-script language. 'sr-Latn' means to use Serbian romanization,
and 'az-Latn' means to use Azerbaijani romanization.
'lookup_transliteration': 'zh-Hans' or None
Indicates a lossy transliteration that should be not be used for output,
but should be applied when looking up words in a list. 'zh-Hans' means
that we should convert Traditional Chinese characters to Simplified.
"""
# The input is probably a string, so parse it into a Language. If it's
# already a Language, it will pass through.
language = Language.get(language)
# Assume additional things about the language, such as what script it's in,
# using the "likely subtags" table
language_full = language.maximize()
# Start the `info` dictionary with default values, including the 'script'
# value that we now know from `language_full`.
info = {
'script': language_full.script,
'tokenizer': 'regex',
'normal_form': 'NFKC',
'remove_marks': False,
'dotless_i': False,
'diacritics_under': None,
'transliteration': None,
'lookup_transliteration': None
}
if _language_in_list(language, ['ja', 'ko']):
info['tokenizer'] = 'mecab'
elif _language_in_list(language, ['zh', 'yue']):
info['tokenizer'] = 'jieba'
elif info['script'] in SPACELESS_SCRIPTS:
info['tokenizer'] = None
# Cased alphabetic scripts get NFC normal form
if info['script'] in ['Latn', 'Grek', 'Cyrl']:
info['normal_form'] = 'NFC'
if info['script'] in ['Arab', 'Hebr']:
info['remove_marks'] = True
if _language_in_list(language, ['tr', 'az', 'kk']):
info['dotless_i'] = True
info['diacritics_under'] = 'cedillas'
elif _language_in_list(language, ['ro']):
info['diacritics_under'] = 'commas'
if _language_in_list(language, ['sr']):
info['transliteration'] = 'sr-Latn'
elif _language_in_list(language, ['az']):
info['transliteration'] = 'az-Latn'
if language.language == 'zh' and language.script != 'Hant':
info['lookup_transliteration'] = 'zh-Hans'
return info
|
LuminosoInsight/wordfreq | wordfreq/language_info.py | get_language_info | python | def get_language_info(language):
# The input is probably a string, so parse it into a Language. If it's
# already a Language, it will pass through.
language = Language.get(language)
# Assume additional things about the language, such as what script it's in,
# using the "likely subtags" table
language_full = language.maximize()
# Start the `info` dictionary with default values, including the 'script'
# value that we now know from `language_full`.
info = {
'script': language_full.script,
'tokenizer': 'regex',
'normal_form': 'NFKC',
'remove_marks': False,
'dotless_i': False,
'diacritics_under': None,
'transliteration': None,
'lookup_transliteration': None
}
if _language_in_list(language, ['ja', 'ko']):
info['tokenizer'] = 'mecab'
elif _language_in_list(language, ['zh', 'yue']):
info['tokenizer'] = 'jieba'
elif info['script'] in SPACELESS_SCRIPTS:
info['tokenizer'] = None
# Cased alphabetic scripts get NFC normal form
if info['script'] in ['Latn', 'Grek', 'Cyrl']:
info['normal_form'] = 'NFC'
if info['script'] in ['Arab', 'Hebr']:
info['remove_marks'] = True
if _language_in_list(language, ['tr', 'az', 'kk']):
info['dotless_i'] = True
info['diacritics_under'] = 'cedillas'
elif _language_in_list(language, ['ro']):
info['diacritics_under'] = 'commas'
if _language_in_list(language, ['sr']):
info['transliteration'] = 'sr-Latn'
elif _language_in_list(language, ['az']):
info['transliteration'] = 'az-Latn'
if language.language == 'zh' and language.script != 'Hant':
info['lookup_transliteration'] = 'zh-Hans'
return info | Looks up the things we need to know about how to handle text in a given
language. This will return a dictionary with the following fields:
'script': a BCP 47 script code such as 'Latn', 'Cyrl', 'Hans'...
Indicates the script that tokens in this language should be in,
_after_ our preprocessing. The script for 'zh' is 'Hans', for example,
because even if the input is in Traditional Chinese ('Hant'), we
convert it to Simplified.
'tokenizer': 'regex', 'jieba', 'mecab', or None
Indicates the best way we know to separate tokens in the language.
'regex' is what will be used for most languages, meaning that we can
segment the text with a Unicode-aware regular expression. If a language
generally uses spaces to separate words, the regex will work well.
'jieba' and 'mecab' are tokenizers for specific languages written
without spaces.
A tokenizer of None means we don't have a good way to segment the
language. We'll use the regex anyway, but the results will be pretty
bad.
'normal_form': 'NFC' or 'NFKC'
How "should" Unicode be normalized when comparing text in this
language? This is not a standard, it's just based on experience.
Many languages need NFKC normalization for text comparisons to work
properly, but in many European languages, NFKC normalization is
excessive and loses information.
'remove_marks': True or False
Determines whether marks and decorations, such as vowel points and
tatweels, should be removed. True for languages in abjad scripts.
'dotless_i': True or False
Is "ı" the lowercase of "I" in this language, as in Turkish?
'diacritics_under': 'cedillas', 'commas', or None
Should we convert any diacritics that are under the letters "s" and
"t" in this language? 'cedillas' means we should convert commas to
cedillas, and 'commas' means we should convert cedillas to commas.
'transliteration': 'sr-Latn', 'az-Latn', or None
Indicates a type of transliteration that we should use for normalizing
a multi-script language. 'sr-Latn' means to use Serbian romanization,
and 'az-Latn' means to use Azerbaijani romanization.
'lookup_transliteration': 'zh-Hans' or None
Indicates a lossy transliteration that should be not be used for output,
but should be applied when looking up words in a list. 'zh-Hans' means
that we should convert Traditional Chinese characters to Simplified. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/language_info.py#L61-L172 | [
"def _language_in_list(language, targets, min_score=80):\n \"\"\"\n A helper function to determine whether this language matches one of the\n target languages, with a match score above a certain threshold.\n\n The languages can be given as strings (language tags) or as Language\n objects. `targets` can be any iterable of such languages.\n \"\"\"\n matched = best_match(language, targets, min_score=min_score)\n return matched[1] > 0\n"
] | from functools import lru_cache
from langcodes import Language, best_match
# Text in scripts written without spaces has to be handled specially in our
# tokenization regex (see TOKEN_RE in tokens.py). Also, when one of these is
# the script of the language we're analyzing, then we need to either have
# a specific tokenizer for the language or give up.
SPACELESS_SCRIPTS = [
# Han ideographs are spaceless, but they don't need to appear in this list
# because _almost_ all of them, except for some exceptional Japanese
# characters, are covered by the \p{IsIdeo} check. Checking for
# Script=Hani and IsIdeo slows down our regexes with huge, redundant
# classes of characters. Instead, we'll list the exceptions below.
'Hira', # Hiragana
'Kana', # Katakana
'Thai', # Thai script
'Khmr', # Khmer script
'Laoo', # Lao script
'Mymr', # Burmese script
'Tale', # Tai Le script
'Talu', # Tai Lü script
'Lana', # Lanna script
]
EXTRA_JAPANESE_CHARACTERS = 'ー々〻〆'
# ー is a lengthening mark that's both hiragana and katakana. Unicode
# segmentation handles it as a special case, but we're overriding standard
# Unicode segmentation, so we need to have the special case too.
#
# 々 and 〻 are "iteration marks" that stand for the previous kanji. So they
# act identically to kanji (ideograms) without technically _being_ kanji. That
# technicality doesn't matter to us.
#
# 〆 is a Japanese abbreviation for "total", and even this can be used in the
# middle of words. Why isn't it just considered an ideograph? I don't know, I
# didn't come up with this language, or Unicode for that matter.
#
# None of this even comes up when we're trying to tokenize Chinese and
# Japanese. It comes up when we're trying to _not_ tokenize a word because
# it's Chinese or Japanese and the tokenization doesn't really matter, which
# happens in ConceptNet.
def _language_in_list(language, targets, min_score=80):
"""
A helper function to determine whether this language matches one of the
target languages, with a match score above a certain threshold.
The languages can be given as strings (language tags) or as Language
objects. `targets` can be any iterable of such languages.
"""
matched = best_match(language, targets, min_score=min_score)
return matched[1] > 0
@lru_cache(maxsize=None)
|
LuminosoInsight/wordfreq | wordfreq/mecab.py | find_mecab_dictionary | python | def find_mecab_dictionary(names):
suggested_pkg = names[0]
paths = [
os.path.expanduser('~/.local/lib/mecab/dic'),
'/var/lib/mecab/dic',
'/var/local/lib/mecab/dic',
'/usr/lib/mecab/dic',
'/usr/local/lib/mecab/dic',
'/usr/lib/x86_64-linux-gnu/mecab/dic',
]
full_paths = [os.path.join(path, name) for path in paths for name in names]
checked_paths = [path for path in full_paths if len(path) <= MAX_PATH_LENGTH]
for path in checked_paths:
if os.path.exists(path):
return path
error_lines = [
"Couldn't find the MeCab dictionary named %r." % suggested_pkg,
"You should download or use your system's package manager to install",
"the %r package." % suggested_pkg,
"",
"We looked in the following locations:"
] + ["\t%s" % path for path in checked_paths]
skipped_paths = [path for path in full_paths if len(path) > MAX_PATH_LENGTH]
if skipped_paths:
error_lines += [
"We had to skip these paths that are too long for MeCab to find:",
] + ["\t%s" % path for path in skipped_paths]
raise OSError('\n'.join(error_lines)) | Find a MeCab dictionary with a given name. The dictionary has to be
installed separately -- see wordfreq's README for instructions. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/mecab.py#L11-L45 | null | from pkg_resources import resource_filename
import MeCab
import unicodedata
import os
# MeCab has fixed-sized buffers for many things, including the dictionary path
MAX_PATH_LENGTH = 58
def make_mecab_analyzer(names):
"""
Get a MeCab analyzer object, given a list of names the dictionary might
have.
"""
return MeCab.Tagger('-d %s' % find_mecab_dictionary(names))
# Describe how to get the MeCab analyzers for each language.
MECAB_DICTIONARY_NAMES = {
'ja': ['mecab-ipadic-utf8', 'ipadic-utf8'],
'ko': ['mecab-ko-dic', 'ko-dic']
}
# The constructed analyzers will go in this dictionary.
MECAB_ANALYZERS = {}
def mecab_tokenize(text, lang):
"""
Use the mecab-python3 package to tokenize the given text. The `lang`
must be 'ja' for Japanese or 'ko' for Korean.
The simplest output from mecab-python3 is the single-string form, which
contains the same table that the command-line version of MeCab would output.
We find the tokens in the first column of this table.
"""
if lang not in MECAB_DICTIONARY_NAMES:
raise ValueError("Can't run MeCab on language %r" % lang)
if lang not in MECAB_ANALYZERS:
MECAB_ANALYZERS[lang] = make_mecab_analyzer(MECAB_DICTIONARY_NAMES[lang])
analyzer = MECAB_ANALYZERS[lang]
text = unicodedata.normalize('NFKC', text.strip())
analyzed = analyzer.parse(text)
if not analyzed:
return []
return [line.split('\t')[0]
for line in analyzed.split('\n')
if line != '' and line != 'EOS']
|
LuminosoInsight/wordfreq | wordfreq/mecab.py | mecab_tokenize | python | def mecab_tokenize(text, lang):
if lang not in MECAB_DICTIONARY_NAMES:
raise ValueError("Can't run MeCab on language %r" % lang)
if lang not in MECAB_ANALYZERS:
MECAB_ANALYZERS[lang] = make_mecab_analyzer(MECAB_DICTIONARY_NAMES[lang])
analyzer = MECAB_ANALYZERS[lang]
text = unicodedata.normalize('NFKC', text.strip())
analyzed = analyzer.parse(text)
if not analyzed:
return []
return [line.split('\t')[0]
for line in analyzed.split('\n')
if line != '' and line != 'EOS'] | Use the mecab-python3 package to tokenize the given text. The `lang`
must be 'ja' for Japanese or 'ko' for Korean.
The simplest output from mecab-python3 is the single-string form, which
contains the same table that the command-line version of MeCab would output.
We find the tokens in the first column of this table. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/mecab.py#L65-L86 | [
"def make_mecab_analyzer(names):\n \"\"\"\n Get a MeCab analyzer object, given a list of names the dictionary might\n have.\n \"\"\"\n return MeCab.Tagger('-d %s' % find_mecab_dictionary(names))\n"
] | from pkg_resources import resource_filename
import MeCab
import unicodedata
import os
# MeCab has fixed-sized buffers for many things, including the dictionary path
MAX_PATH_LENGTH = 58
def find_mecab_dictionary(names):
"""
Find a MeCab dictionary with a given name. The dictionary has to be
installed separately -- see wordfreq's README for instructions.
"""
suggested_pkg = names[0]
paths = [
os.path.expanduser('~/.local/lib/mecab/dic'),
'/var/lib/mecab/dic',
'/var/local/lib/mecab/dic',
'/usr/lib/mecab/dic',
'/usr/local/lib/mecab/dic',
'/usr/lib/x86_64-linux-gnu/mecab/dic',
]
full_paths = [os.path.join(path, name) for path in paths for name in names]
checked_paths = [path for path in full_paths if len(path) <= MAX_PATH_LENGTH]
for path in checked_paths:
if os.path.exists(path):
return path
error_lines = [
"Couldn't find the MeCab dictionary named %r." % suggested_pkg,
"You should download or use your system's package manager to install",
"the %r package." % suggested_pkg,
"",
"We looked in the following locations:"
] + ["\t%s" % path for path in checked_paths]
skipped_paths = [path for path in full_paths if len(path) > MAX_PATH_LENGTH]
if skipped_paths:
error_lines += [
"We had to skip these paths that are too long for MeCab to find:",
] + ["\t%s" % path for path in skipped_paths]
raise OSError('\n'.join(error_lines))
def make_mecab_analyzer(names):
"""
Get a MeCab analyzer object, given a list of names the dictionary might
have.
"""
return MeCab.Tagger('-d %s' % find_mecab_dictionary(names))
# Describe how to get the MeCab analyzers for each language.
MECAB_DICTIONARY_NAMES = {
'ja': ['mecab-ipadic-utf8', 'ipadic-utf8'],
'ko': ['mecab-ko-dic', 'ko-dic']
}
# The constructed analyzers will go in this dictionary.
MECAB_ANALYZERS = {}
|
LuminosoInsight/wordfreq | wordfreq/transliterate.py | transliterate | python | def transliterate(table, text):
if table == 'sr-Latn':
return text.translate(SR_LATN_TABLE)
elif table == 'az-Latn':
return text.translate(AZ_LATN_TABLE)
else:
raise ValueError("Unknown transliteration table: {!r}".format(table)) | Transliterate text according to one of the tables above.
`table` chooses the table. It looks like a language code but comes from a
very restricted set:
- 'sr-Latn' means to convert Serbian, which may be in Cyrillic, into the
Latin alphabet.
- 'az-Latn' means the same for Azerbaijani Cyrillic to Latn. | train | https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/transliterate.py#L92-L108 | null | # This table comes from https://github.com/opendatakosovo/cyrillic-transliteration/blob/master/cyrtranslit/mapping.py,
# from the 'cyrtranslit' module. We originally had to reimplement it because
# 'cyrtranslit' didn't work in Python 3; now it does, but we've made the table
# more robust than the one in cyrtranslit.
SR_LATN_TABLE = {
ord('А'): 'A', ord('а'): 'a',
ord('Б'): 'B', ord('б'): 'b',
ord('В'): 'V', ord('в'): 'v',
ord('Г'): 'G', ord('г'): 'g',
ord('Д'): 'D', ord('д'): 'd',
ord('Ђ'): 'Đ', ord('ђ'): 'đ',
ord('Е'): 'E', ord('е'): 'e',
ord('Ж'): 'Ž', ord('ж'): 'ž',
ord('З'): 'Z', ord('з'): 'z',
ord('И'): 'I', ord('и'): 'i',
ord('Ј'): 'J', ord('ј'): 'j',
ord('К'): 'K', ord('к'): 'k',
ord('Л'): 'L', ord('л'): 'l',
ord('Љ'): 'Lj', ord('љ'): 'lj',
ord('М'): 'M', ord('м'): 'm',
ord('Н'): 'N', ord('н'): 'n',
ord('Њ'): 'Nj', ord('њ'): 'nj',
ord('О'): 'O', ord('о'): 'o',
ord('П'): 'P', ord('п'): 'p',
ord('Р'): 'R', ord('р'): 'r',
ord('С'): 'S', ord('с'): 's',
ord('Т'): 'T', ord('т'): 't',
ord('Ћ'): 'Ć', ord('ћ'): 'ć',
ord('У'): 'U', ord('у'): 'u',
ord('Ф'): 'F', ord('ф'): 'f',
ord('Х'): 'H', ord('х'): 'h',
ord('Ц'): 'C', ord('ц'): 'c',
ord('Ч'): 'Č', ord('ч'): 'č',
ord('Џ'): 'Dž', ord('џ'): 'dž',
ord('Ш'): 'Š', ord('ш'): 'š',
# Handle Cyrillic letters from other languages. We hope these cases don't
# come up often when we're trying to transliterate Serbian, but if these
# letters show up in loan-words or code-switching text, we can at least
# transliterate them approximately instead of leaving them as Cyrillic
# letters surrounded by Latin.
# Russian letters
ord('Ё'): 'Jo', ord('ё'): 'jo',
ord('Й'): 'J', ord('й'): 'j',
ord('Щ'): 'Šč', ord('щ'): 'šč',
ord('Ъ'): '', ord('ъ'): '',
ord('Ы'): 'Y', ord('ы'): 'y',
ord('Ь'): "'", ord('ь'): "'",
ord('Э'): 'E', ord('э'): 'e',
ord('Ю'): 'Ju', ord('ю'): 'ju',
ord('Я'): 'Ja', ord('я'): 'ja',
# Belarusian letter
ord('Ў'): 'Ŭ', ord('ў'): 'ŭ',
# Ukrainian letters
ord('Є'): 'Je', ord('є'): 'je',
ord('І'): 'I', ord('і'): 'i',
ord('Ї'): 'Ï', ord('ї'): 'ï',
ord('Ґ'): 'G', ord('ґ'): 'g',
# Macedonian letters
ord('Ѕ'): 'Dz', ord('ѕ'): 'dz',
ord('Ѓ'): 'Ǵ', ord('ѓ'): 'ǵ',
ord('Ќ'): 'Ḱ', ord('ќ'): 'ḱ',
}
AZ_LATN_TABLE = SR_LATN_TABLE.copy()
AZ_LATN_TABLE.update({
# Distinct Azerbaijani letters
ord('Ҹ'): 'C', ord('ҹ'): 'c',
ord('Ә'): 'Ə', ord('ә'): 'ə',
ord('Ғ'): 'Ğ', ord('ғ'): 'ğ',
ord('Һ'): 'H', ord('һ'): 'h',
ord('Ө'): 'Ö', ord('ө'): 'ö',
ord('Ҝ'): 'G', ord('ҝ'): 'g',
ord('Ү'): 'Ü', ord('ү'): 'ü',
# Azerbaijani letters with different transliterations
ord('Ч'): 'Ç', ord('ч'): 'ç',
ord('Х'): 'X', ord('х'): 'x',
ord('Ы'): 'I', ord('ы'): 'ı',
ord('И'): 'İ', ord('и'): 'i',
ord('Ж'): 'J', ord('ж'): 'j',
ord('Ј'): 'Y', ord('ј'): 'y',
ord('Г'): 'Q', ord('г'): 'q',
ord('Ш'): 'Ş', ord('ш'): 'ş',
})
|
bfontaine/term2048 | term2048/game.py | Game.adjustColors | python | def adjustColors(self, mode='dark'):
rp = Game.__color_modes.get(mode, {})
for k, color in self.__colors.items():
self.__colors[k] = rp.get(color, color) | Change a few colors depending on the mode to use. The default mode
doesn't assume anything and avoid using white & black colors. The dark
mode use white and avoid dark blue while the light mode use black and
avoid yellow, to give a few examples. | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/game.py#L97-L106 | null | class Game(object):
"""
A 2048 game
"""
__dirs = {
keypress.UP: Board.UP,
keypress.DOWN: Board.DOWN,
keypress.LEFT: Board.LEFT,
keypress.RIGHT: Board.RIGHT,
keypress.SPACE: Board.PAUSE,
}
__is_windows = os.name == 'nt'
COLORS = {
2: Fore.GREEN,
4: Fore.BLUE + Style.BRIGHT,
8: Fore.CYAN,
16: Fore.RED,
# Don't use MAGENTA directly; it doesn't display well on Windows.
# see https://github.com/bfontaine/term2048/issues/24
32: Fore.MAGENTA + Style.BRIGHT,
64: Fore.CYAN,
128: Fore.BLUE + Style.BRIGHT,
256: Fore.MAGENTA + Style.BRIGHT,
512: Fore.GREEN,
1024: Fore.RED,
2048: Fore.YELLOW,
# just in case people set an higher goal they still have colors
4096: Fore.RED,
8192: Fore.CYAN,
}
# see Game#adjustColors
# these are color replacements for various modes
__color_modes = {
'dark': {
Fore.BLUE: Fore.WHITE,
Fore.BLUE + Style.BRIGHT: Fore.WHITE,
},
'light': {
Fore.YELLOW: Fore.BLACK,
},
}
SCORES_FILE = '%s/.term2048.scores' % os.path.expanduser('~')
STORE_FILE = '%s/.term2048.store' % os.path.expanduser('~')
def __init__(self, scores_file=SCORES_FILE, colors=None,
store_file=STORE_FILE, clear_screen=True,
mode=None, azmode=False, **kws):
"""
Create a new game.
scores_file: file to use for the best score (default
is ~/.term2048.scores)
colors: dictionnary with colors to use for each tile
store_file: file that stores game session's snapshot
mode: color mode. This adjust a few colors and can be 'dark' or
'light'. See the adjustColors functions for more info.
other options are passed to the underlying Board object.
"""
self.board = Board(**kws)
self.score = 0
self.scores_file = scores_file
self.store_file = store_file
self.clear_screen = clear_screen
self.best_score = 0
self.__colors = colors or self.COLORS
self.__azmode = azmode
self.loadBestScore()
self.adjustColors(mode)
def loadBestScore(self):
"""
load local best score from the default file
"""
try:
with open(self.scores_file, 'r') as f:
self.best_score = int(f.readline(), 10)
except:
return False
return True
def saveBestScore(self):
"""
save current best score in the default file
"""
if self.score > self.best_score:
self.best_score = self.score
try:
with open(self.scores_file, 'w') as f:
f.write(str(self.best_score))
except:
return False
return True
def incScore(self, pts):
"""
update the current score by adding it the specified number of points
"""
self.score += pts
if self.score > self.best_score:
self.best_score = self.score
def readMove(self):
"""
read and return a move to pass to a board
"""
k = keypress.getKey()
return Game.__dirs.get(k)
def store(self):
"""
save the current game session's score and data for further use
"""
size = self.board.SIZE
cells = []
for i in range(size):
for j in range(size):
cells.append(str(self.board.getCell(j, i)))
score_str = "%s\n%d" % (' '.join(cells), self.score)
try:
with open(self.store_file, 'w') as f:
f.write(score_str)
except:
return False
return True
def restore(self):
"""
restore the saved game score and data
"""
size = self.board.SIZE
try:
with open(self.store_file, 'r') as f:
lines = f.readlines()
score_str = lines[0]
self.score = int(lines[1])
except:
return False
score_str_list = score_str.split(' ')
count = 0
for i in range(size):
for j in range(size):
value = score_str_list[count]
self.board.setCell(j, i, int(value))
count += 1
return True
def clearScreen(self):
"""Clear the console"""
if self.clear_screen:
os.system('cls' if self.__is_windows else 'clear')
else:
print('\n')
def hideCursor(self):
"""
Hide the cursor. Don't forget to call ``showCursor`` to restore
the normal shell behavior. This is a no-op if ``clear_screen`` is
falsy.
"""
if not self.clear_screen:
return
if not self.__is_windows:
sys.stdout.write('\033[?25l')
def showCursor(self):
"""Show the cursor."""
if not self.__is_windows:
sys.stdout.write('\033[?25h')
def loop(self):
"""
main game loop. returns the final score.
"""
pause_key = self.board.PAUSE
margins = {'left': 4, 'top': 4, 'bottom': 4}
atexit.register(self.showCursor)
try:
self.hideCursor()
while True:
self.clearScreen()
print(self.__str__(margins=margins))
if self.board.won() or not self.board.canMove():
break
m = self.readMove()
if m == pause_key:
self.saveBestScore()
if self.store():
print("Game successfully saved. "
"Resume it with `term2048 --resume`.")
return self.score
print("An error ocurred while saving your game.")
return None
self.incScore(self.board.move(m))
except KeyboardInterrupt:
self.saveBestScore()
return None
self.saveBestScore()
print('You won!' if self.board.won() else 'Game Over')
return self.score
def getCellStr(self, x, y): # TODO: refactor regarding issue #11
"""
return a string representation of the cell located at x,y.
"""
c = self.board.getCell(x, y)
if c == 0:
return '.' if self.__azmode else ' .'
elif self.__azmode:
az = {}
for i in range(1, int(math.log(self.board.goal(), 2))):
az[2 ** i] = chr(i + 96)
if c not in az:
return '?'
s = az[c]
elif c == 1024:
s = ' 1k'
elif c == 2048:
s = ' 2k'
else:
s = '%3d' % c
return self.__colors.get(c, Fore.RESET) + s + Style.RESET_ALL
def boardToString(self, margins=None):
"""
return a string representation of the current board.
"""
if margins is None:
margins = {}
b = self.board
rg = range(b.size())
left = ' '*margins.get('left', 0)
s = '\n'.join(
[left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])
return s
def __str__(self, margins=None):
if margins is None:
margins = {}
b = self.boardToString(margins=margins)
top = '\n'*margins.get('top', 0)
bottom = '\n'*margins.get('bottom', 0)
scores = ' \tScore: %5d Best: %5d\n' % (self.score, self.best_score)
return top + b.replace('\n', scores, 1) + bottom
|
bfontaine/term2048 | term2048/game.py | Game.loadBestScore | python | def loadBestScore(self):
try:
with open(self.scores_file, 'r') as f:
self.best_score = int(f.readline(), 10)
except:
return False
return True | load local best score from the default file | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/game.py#L108-L117 | null | class Game(object):
"""
A 2048 game
"""
__dirs = {
keypress.UP: Board.UP,
keypress.DOWN: Board.DOWN,
keypress.LEFT: Board.LEFT,
keypress.RIGHT: Board.RIGHT,
keypress.SPACE: Board.PAUSE,
}
__is_windows = os.name == 'nt'
COLORS = {
2: Fore.GREEN,
4: Fore.BLUE + Style.BRIGHT,
8: Fore.CYAN,
16: Fore.RED,
# Don't use MAGENTA directly; it doesn't display well on Windows.
# see https://github.com/bfontaine/term2048/issues/24
32: Fore.MAGENTA + Style.BRIGHT,
64: Fore.CYAN,
128: Fore.BLUE + Style.BRIGHT,
256: Fore.MAGENTA + Style.BRIGHT,
512: Fore.GREEN,
1024: Fore.RED,
2048: Fore.YELLOW,
# just in case people set an higher goal they still have colors
4096: Fore.RED,
8192: Fore.CYAN,
}
# see Game#adjustColors
# these are color replacements for various modes
__color_modes = {
'dark': {
Fore.BLUE: Fore.WHITE,
Fore.BLUE + Style.BRIGHT: Fore.WHITE,
},
'light': {
Fore.YELLOW: Fore.BLACK,
},
}
SCORES_FILE = '%s/.term2048.scores' % os.path.expanduser('~')
STORE_FILE = '%s/.term2048.store' % os.path.expanduser('~')
def __init__(self, scores_file=SCORES_FILE, colors=None,
store_file=STORE_FILE, clear_screen=True,
mode=None, azmode=False, **kws):
"""
Create a new game.
scores_file: file to use for the best score (default
is ~/.term2048.scores)
colors: dictionnary with colors to use for each tile
store_file: file that stores game session's snapshot
mode: color mode. This adjust a few colors and can be 'dark' or
'light'. See the adjustColors functions for more info.
other options are passed to the underlying Board object.
"""
self.board = Board(**kws)
self.score = 0
self.scores_file = scores_file
self.store_file = store_file
self.clear_screen = clear_screen
self.best_score = 0
self.__colors = colors or self.COLORS
self.__azmode = azmode
self.loadBestScore()
self.adjustColors(mode)
def adjustColors(self, mode='dark'):
"""
Change a few colors depending on the mode to use. The default mode
doesn't assume anything and avoid using white & black colors. The dark
mode use white and avoid dark blue while the light mode use black and
avoid yellow, to give a few examples.
"""
rp = Game.__color_modes.get(mode, {})
for k, color in self.__colors.items():
self.__colors[k] = rp.get(color, color)
def saveBestScore(self):
"""
save current best score in the default file
"""
if self.score > self.best_score:
self.best_score = self.score
try:
with open(self.scores_file, 'w') as f:
f.write(str(self.best_score))
except:
return False
return True
def incScore(self, pts):
"""
update the current score by adding it the specified number of points
"""
self.score += pts
if self.score > self.best_score:
self.best_score = self.score
def readMove(self):
"""
read and return a move to pass to a board
"""
k = keypress.getKey()
return Game.__dirs.get(k)
def store(self):
"""
save the current game session's score and data for further use
"""
size = self.board.SIZE
cells = []
for i in range(size):
for j in range(size):
cells.append(str(self.board.getCell(j, i)))
score_str = "%s\n%d" % (' '.join(cells), self.score)
try:
with open(self.store_file, 'w') as f:
f.write(score_str)
except:
return False
return True
def restore(self):
"""
restore the saved game score and data
"""
size = self.board.SIZE
try:
with open(self.store_file, 'r') as f:
lines = f.readlines()
score_str = lines[0]
self.score = int(lines[1])
except:
return False
score_str_list = score_str.split(' ')
count = 0
for i in range(size):
for j in range(size):
value = score_str_list[count]
self.board.setCell(j, i, int(value))
count += 1
return True
def clearScreen(self):
"""Clear the console"""
if self.clear_screen:
os.system('cls' if self.__is_windows else 'clear')
else:
print('\n')
def hideCursor(self):
"""
Hide the cursor. Don't forget to call ``showCursor`` to restore
the normal shell behavior. This is a no-op if ``clear_screen`` is
falsy.
"""
if not self.clear_screen:
return
if not self.__is_windows:
sys.stdout.write('\033[?25l')
def showCursor(self):
"""Show the cursor."""
if not self.__is_windows:
sys.stdout.write('\033[?25h')
def loop(self):
"""
main game loop. returns the final score.
"""
pause_key = self.board.PAUSE
margins = {'left': 4, 'top': 4, 'bottom': 4}
atexit.register(self.showCursor)
try:
self.hideCursor()
while True:
self.clearScreen()
print(self.__str__(margins=margins))
if self.board.won() or not self.board.canMove():
break
m = self.readMove()
if m == pause_key:
self.saveBestScore()
if self.store():
print("Game successfully saved. "
"Resume it with `term2048 --resume`.")
return self.score
print("An error ocurred while saving your game.")
return None
self.incScore(self.board.move(m))
except KeyboardInterrupt:
self.saveBestScore()
return None
self.saveBestScore()
print('You won!' if self.board.won() else 'Game Over')
return self.score
def getCellStr(self, x, y): # TODO: refactor regarding issue #11
"""
return a string representation of the cell located at x,y.
"""
c = self.board.getCell(x, y)
if c == 0:
return '.' if self.__azmode else ' .'
elif self.__azmode:
az = {}
for i in range(1, int(math.log(self.board.goal(), 2))):
az[2 ** i] = chr(i + 96)
if c not in az:
return '?'
s = az[c]
elif c == 1024:
s = ' 1k'
elif c == 2048:
s = ' 2k'
else:
s = '%3d' % c
return self.__colors.get(c, Fore.RESET) + s + Style.RESET_ALL
def boardToString(self, margins=None):
"""
return a string representation of the current board.
"""
if margins is None:
margins = {}
b = self.board
rg = range(b.size())
left = ' '*margins.get('left', 0)
s = '\n'.join(
[left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])
return s
def __str__(self, margins=None):
if margins is None:
margins = {}
b = self.boardToString(margins=margins)
top = '\n'*margins.get('top', 0)
bottom = '\n'*margins.get('bottom', 0)
scores = ' \tScore: %5d Best: %5d\n' % (self.score, self.best_score)
return top + b.replace('\n', scores, 1) + bottom
|
bfontaine/term2048 | term2048/game.py | Game.saveBestScore | python | def saveBestScore(self):
if self.score > self.best_score:
self.best_score = self.score
try:
with open(self.scores_file, 'w') as f:
f.write(str(self.best_score))
except:
return False
return True | save current best score in the default file | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/game.py#L119-L130 | null | class Game(object):
"""
A 2048 game
"""
__dirs = {
keypress.UP: Board.UP,
keypress.DOWN: Board.DOWN,
keypress.LEFT: Board.LEFT,
keypress.RIGHT: Board.RIGHT,
keypress.SPACE: Board.PAUSE,
}
__is_windows = os.name == 'nt'
COLORS = {
2: Fore.GREEN,
4: Fore.BLUE + Style.BRIGHT,
8: Fore.CYAN,
16: Fore.RED,
# Don't use MAGENTA directly; it doesn't display well on Windows.
# see https://github.com/bfontaine/term2048/issues/24
32: Fore.MAGENTA + Style.BRIGHT,
64: Fore.CYAN,
128: Fore.BLUE + Style.BRIGHT,
256: Fore.MAGENTA + Style.BRIGHT,
512: Fore.GREEN,
1024: Fore.RED,
2048: Fore.YELLOW,
# just in case people set an higher goal they still have colors
4096: Fore.RED,
8192: Fore.CYAN,
}
# see Game#adjustColors
# these are color replacements for various modes
__color_modes = {
'dark': {
Fore.BLUE: Fore.WHITE,
Fore.BLUE + Style.BRIGHT: Fore.WHITE,
},
'light': {
Fore.YELLOW: Fore.BLACK,
},
}
SCORES_FILE = '%s/.term2048.scores' % os.path.expanduser('~')
STORE_FILE = '%s/.term2048.store' % os.path.expanduser('~')
def __init__(self, scores_file=SCORES_FILE, colors=None,
store_file=STORE_FILE, clear_screen=True,
mode=None, azmode=False, **kws):
"""
Create a new game.
scores_file: file to use for the best score (default
is ~/.term2048.scores)
colors: dictionnary with colors to use for each tile
store_file: file that stores game session's snapshot
mode: color mode. This adjust a few colors and can be 'dark' or
'light'. See the adjustColors functions for more info.
other options are passed to the underlying Board object.
"""
self.board = Board(**kws)
self.score = 0
self.scores_file = scores_file
self.store_file = store_file
self.clear_screen = clear_screen
self.best_score = 0
self.__colors = colors or self.COLORS
self.__azmode = azmode
self.loadBestScore()
self.adjustColors(mode)
def adjustColors(self, mode='dark'):
"""
Change a few colors depending on the mode to use. The default mode
doesn't assume anything and avoid using white & black colors. The dark
mode use white and avoid dark blue while the light mode use black and
avoid yellow, to give a few examples.
"""
rp = Game.__color_modes.get(mode, {})
for k, color in self.__colors.items():
self.__colors[k] = rp.get(color, color)
def loadBestScore(self):
"""
load local best score from the default file
"""
try:
with open(self.scores_file, 'r') as f:
self.best_score = int(f.readline(), 10)
except:
return False
return True
def incScore(self, pts):
"""
update the current score by adding it the specified number of points
"""
self.score += pts
if self.score > self.best_score:
self.best_score = self.score
def readMove(self):
"""
read and return a move to pass to a board
"""
k = keypress.getKey()
return Game.__dirs.get(k)
def store(self):
"""
save the current game session's score and data for further use
"""
size = self.board.SIZE
cells = []
for i in range(size):
for j in range(size):
cells.append(str(self.board.getCell(j, i)))
score_str = "%s\n%d" % (' '.join(cells), self.score)
try:
with open(self.store_file, 'w') as f:
f.write(score_str)
except:
return False
return True
def restore(self):
"""
restore the saved game score and data
"""
size = self.board.SIZE
try:
with open(self.store_file, 'r') as f:
lines = f.readlines()
score_str = lines[0]
self.score = int(lines[1])
except:
return False
score_str_list = score_str.split(' ')
count = 0
for i in range(size):
for j in range(size):
value = score_str_list[count]
self.board.setCell(j, i, int(value))
count += 1
return True
def clearScreen(self):
"""Clear the console"""
if self.clear_screen:
os.system('cls' if self.__is_windows else 'clear')
else:
print('\n')
def hideCursor(self):
"""
Hide the cursor. Don't forget to call ``showCursor`` to restore
the normal shell behavior. This is a no-op if ``clear_screen`` is
falsy.
"""
if not self.clear_screen:
return
if not self.__is_windows:
sys.stdout.write('\033[?25l')
def showCursor(self):
"""Show the cursor."""
if not self.__is_windows:
sys.stdout.write('\033[?25h')
def loop(self):
"""
main game loop. returns the final score.
"""
pause_key = self.board.PAUSE
margins = {'left': 4, 'top': 4, 'bottom': 4}
atexit.register(self.showCursor)
try:
self.hideCursor()
while True:
self.clearScreen()
print(self.__str__(margins=margins))
if self.board.won() or not self.board.canMove():
break
m = self.readMove()
if m == pause_key:
self.saveBestScore()
if self.store():
print("Game successfully saved. "
"Resume it with `term2048 --resume`.")
return self.score
print("An error ocurred while saving your game.")
return None
self.incScore(self.board.move(m))
except KeyboardInterrupt:
self.saveBestScore()
return None
self.saveBestScore()
print('You won!' if self.board.won() else 'Game Over')
return self.score
def getCellStr(self, x, y): # TODO: refactor regarding issue #11
"""
return a string representation of the cell located at x,y.
"""
c = self.board.getCell(x, y)
if c == 0:
return '.' if self.__azmode else ' .'
elif self.__azmode:
az = {}
for i in range(1, int(math.log(self.board.goal(), 2))):
az[2 ** i] = chr(i + 96)
if c not in az:
return '?'
s = az[c]
elif c == 1024:
s = ' 1k'
elif c == 2048:
s = ' 2k'
else:
s = '%3d' % c
return self.__colors.get(c, Fore.RESET) + s + Style.RESET_ALL
def boardToString(self, margins=None):
"""
return a string representation of the current board.
"""
if margins is None:
margins = {}
b = self.board
rg = range(b.size())
left = ' '*margins.get('left', 0)
s = '\n'.join(
[left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])
return s
def __str__(self, margins=None):
if margins is None:
margins = {}
b = self.boardToString(margins=margins)
top = '\n'*margins.get('top', 0)
bottom = '\n'*margins.get('bottom', 0)
scores = ' \tScore: %5d Best: %5d\n' % (self.score, self.best_score)
return top + b.replace('\n', scores, 1) + bottom
|
bfontaine/term2048 | term2048/game.py | Game.incScore | python | def incScore(self, pts):
self.score += pts
if self.score > self.best_score:
self.best_score = self.score | update the current score by adding it the specified number of points | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/game.py#L132-L138 | null | class Game(object):
"""
A 2048 game
"""
__dirs = {
keypress.UP: Board.UP,
keypress.DOWN: Board.DOWN,
keypress.LEFT: Board.LEFT,
keypress.RIGHT: Board.RIGHT,
keypress.SPACE: Board.PAUSE,
}
__is_windows = os.name == 'nt'
COLORS = {
2: Fore.GREEN,
4: Fore.BLUE + Style.BRIGHT,
8: Fore.CYAN,
16: Fore.RED,
# Don't use MAGENTA directly; it doesn't display well on Windows.
# see https://github.com/bfontaine/term2048/issues/24
32: Fore.MAGENTA + Style.BRIGHT,
64: Fore.CYAN,
128: Fore.BLUE + Style.BRIGHT,
256: Fore.MAGENTA + Style.BRIGHT,
512: Fore.GREEN,
1024: Fore.RED,
2048: Fore.YELLOW,
# just in case people set an higher goal they still have colors
4096: Fore.RED,
8192: Fore.CYAN,
}
# see Game#adjustColors
# these are color replacements for various modes
__color_modes = {
'dark': {
Fore.BLUE: Fore.WHITE,
Fore.BLUE + Style.BRIGHT: Fore.WHITE,
},
'light': {
Fore.YELLOW: Fore.BLACK,
},
}
SCORES_FILE = '%s/.term2048.scores' % os.path.expanduser('~')
STORE_FILE = '%s/.term2048.store' % os.path.expanduser('~')
def __init__(self, scores_file=SCORES_FILE, colors=None,
store_file=STORE_FILE, clear_screen=True,
mode=None, azmode=False, **kws):
"""
Create a new game.
scores_file: file to use for the best score (default
is ~/.term2048.scores)
colors: dictionnary with colors to use for each tile
store_file: file that stores game session's snapshot
mode: color mode. This adjust a few colors and can be 'dark' or
'light'. See the adjustColors functions for more info.
other options are passed to the underlying Board object.
"""
self.board = Board(**kws)
self.score = 0
self.scores_file = scores_file
self.store_file = store_file
self.clear_screen = clear_screen
self.best_score = 0
self.__colors = colors or self.COLORS
self.__azmode = azmode
self.loadBestScore()
self.adjustColors(mode)
def adjustColors(self, mode='dark'):
"""
Change a few colors depending on the mode to use. The default mode
doesn't assume anything and avoid using white & black colors. The dark
mode use white and avoid dark blue while the light mode use black and
avoid yellow, to give a few examples.
"""
rp = Game.__color_modes.get(mode, {})
for k, color in self.__colors.items():
self.__colors[k] = rp.get(color, color)
def loadBestScore(self):
"""
load local best score from the default file
"""
try:
with open(self.scores_file, 'r') as f:
self.best_score = int(f.readline(), 10)
except:
return False
return True
def saveBestScore(self):
"""
save current best score in the default file
"""
if self.score > self.best_score:
self.best_score = self.score
try:
with open(self.scores_file, 'w') as f:
f.write(str(self.best_score))
except:
return False
return True
def readMove(self):
"""
read and return a move to pass to a board
"""
k = keypress.getKey()
return Game.__dirs.get(k)
def store(self):
"""
save the current game session's score and data for further use
"""
size = self.board.SIZE
cells = []
for i in range(size):
for j in range(size):
cells.append(str(self.board.getCell(j, i)))
score_str = "%s\n%d" % (' '.join(cells), self.score)
try:
with open(self.store_file, 'w') as f:
f.write(score_str)
except:
return False
return True
def restore(self):
"""
restore the saved game score and data
"""
size = self.board.SIZE
try:
with open(self.store_file, 'r') as f:
lines = f.readlines()
score_str = lines[0]
self.score = int(lines[1])
except:
return False
score_str_list = score_str.split(' ')
count = 0
for i in range(size):
for j in range(size):
value = score_str_list[count]
self.board.setCell(j, i, int(value))
count += 1
return True
def clearScreen(self):
"""Clear the console"""
if self.clear_screen:
os.system('cls' if self.__is_windows else 'clear')
else:
print('\n')
def hideCursor(self):
"""
Hide the cursor. Don't forget to call ``showCursor`` to restore
the normal shell behavior. This is a no-op if ``clear_screen`` is
falsy.
"""
if not self.clear_screen:
return
if not self.__is_windows:
sys.stdout.write('\033[?25l')
def showCursor(self):
"""Show the cursor."""
if not self.__is_windows:
sys.stdout.write('\033[?25h')
def loop(self):
"""
main game loop. returns the final score.
"""
pause_key = self.board.PAUSE
margins = {'left': 4, 'top': 4, 'bottom': 4}
atexit.register(self.showCursor)
try:
self.hideCursor()
while True:
self.clearScreen()
print(self.__str__(margins=margins))
if self.board.won() or not self.board.canMove():
break
m = self.readMove()
if m == pause_key:
self.saveBestScore()
if self.store():
print("Game successfully saved. "
"Resume it with `term2048 --resume`.")
return self.score
print("An error ocurred while saving your game.")
return None
self.incScore(self.board.move(m))
except KeyboardInterrupt:
self.saveBestScore()
return None
self.saveBestScore()
print('You won!' if self.board.won() else 'Game Over')
return self.score
def getCellStr(self, x, y): # TODO: refactor regarding issue #11
"""
return a string representation of the cell located at x,y.
"""
c = self.board.getCell(x, y)
if c == 0:
return '.' if self.__azmode else ' .'
elif self.__azmode:
az = {}
for i in range(1, int(math.log(self.board.goal(), 2))):
az[2 ** i] = chr(i + 96)
if c not in az:
return '?'
s = az[c]
elif c == 1024:
s = ' 1k'
elif c == 2048:
s = ' 2k'
else:
s = '%3d' % c
return self.__colors.get(c, Fore.RESET) + s + Style.RESET_ALL
def boardToString(self, margins=None):
"""
return a string representation of the current board.
"""
if margins is None:
margins = {}
b = self.board
rg = range(b.size())
left = ' '*margins.get('left', 0)
s = '\n'.join(
[left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])
return s
def __str__(self, margins=None):
if margins is None:
margins = {}
b = self.boardToString(margins=margins)
top = '\n'*margins.get('top', 0)
bottom = '\n'*margins.get('bottom', 0)
scores = ' \tScore: %5d Best: %5d\n' % (self.score, self.best_score)
return top + b.replace('\n', scores, 1) + bottom
|
bfontaine/term2048 | term2048/game.py | Game.store | python | def store(self):
size = self.board.SIZE
cells = []
for i in range(size):
for j in range(size):
cells.append(str(self.board.getCell(j, i)))
score_str = "%s\n%d" % (' '.join(cells), self.score)
try:
with open(self.store_file, 'w') as f:
f.write(score_str)
except:
return False
return True | save the current game session's score and data for further use | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/game.py#L147-L165 | [
"def getCell(self, x, y):\n \"\"\"return the cell value at x,y\"\"\"\n return self.cells[y][x]\n"
] | class Game(object):
"""
A 2048 game
"""
__dirs = {
keypress.UP: Board.UP,
keypress.DOWN: Board.DOWN,
keypress.LEFT: Board.LEFT,
keypress.RIGHT: Board.RIGHT,
keypress.SPACE: Board.PAUSE,
}
__is_windows = os.name == 'nt'
COLORS = {
2: Fore.GREEN,
4: Fore.BLUE + Style.BRIGHT,
8: Fore.CYAN,
16: Fore.RED,
# Don't use MAGENTA directly; it doesn't display well on Windows.
# see https://github.com/bfontaine/term2048/issues/24
32: Fore.MAGENTA + Style.BRIGHT,
64: Fore.CYAN,
128: Fore.BLUE + Style.BRIGHT,
256: Fore.MAGENTA + Style.BRIGHT,
512: Fore.GREEN,
1024: Fore.RED,
2048: Fore.YELLOW,
# just in case people set an higher goal they still have colors
4096: Fore.RED,
8192: Fore.CYAN,
}
# see Game#adjustColors
# these are color replacements for various modes
__color_modes = {
'dark': {
Fore.BLUE: Fore.WHITE,
Fore.BLUE + Style.BRIGHT: Fore.WHITE,
},
'light': {
Fore.YELLOW: Fore.BLACK,
},
}
SCORES_FILE = '%s/.term2048.scores' % os.path.expanduser('~')
STORE_FILE = '%s/.term2048.store' % os.path.expanduser('~')
def __init__(self, scores_file=SCORES_FILE, colors=None,
store_file=STORE_FILE, clear_screen=True,
mode=None, azmode=False, **kws):
"""
Create a new game.
scores_file: file to use for the best score (default
is ~/.term2048.scores)
colors: dictionnary with colors to use for each tile
store_file: file that stores game session's snapshot
mode: color mode. This adjust a few colors and can be 'dark' or
'light'. See the adjustColors functions for more info.
other options are passed to the underlying Board object.
"""
self.board = Board(**kws)
self.score = 0
self.scores_file = scores_file
self.store_file = store_file
self.clear_screen = clear_screen
self.best_score = 0
self.__colors = colors or self.COLORS
self.__azmode = azmode
self.loadBestScore()
self.adjustColors(mode)
def adjustColors(self, mode='dark'):
"""
Change a few colors depending on the mode to use. The default mode
doesn't assume anything and avoid using white & black colors. The dark
mode use white and avoid dark blue while the light mode use black and
avoid yellow, to give a few examples.
"""
rp = Game.__color_modes.get(mode, {})
for k, color in self.__colors.items():
self.__colors[k] = rp.get(color, color)
def loadBestScore(self):
"""
load local best score from the default file
"""
try:
with open(self.scores_file, 'r') as f:
self.best_score = int(f.readline(), 10)
except:
return False
return True
def saveBestScore(self):
"""
save current best score in the default file
"""
if self.score > self.best_score:
self.best_score = self.score
try:
with open(self.scores_file, 'w') as f:
f.write(str(self.best_score))
except:
return False
return True
def incScore(self, pts):
"""
update the current score by adding it the specified number of points
"""
self.score += pts
if self.score > self.best_score:
self.best_score = self.score
def readMove(self):
"""
read and return a move to pass to a board
"""
k = keypress.getKey()
return Game.__dirs.get(k)
def restore(self):
"""
restore the saved game score and data
"""
size = self.board.SIZE
try:
with open(self.store_file, 'r') as f:
lines = f.readlines()
score_str = lines[0]
self.score = int(lines[1])
except:
return False
score_str_list = score_str.split(' ')
count = 0
for i in range(size):
for j in range(size):
value = score_str_list[count]
self.board.setCell(j, i, int(value))
count += 1
return True
def clearScreen(self):
"""Clear the console"""
if self.clear_screen:
os.system('cls' if self.__is_windows else 'clear')
else:
print('\n')
def hideCursor(self):
"""
Hide the cursor. Don't forget to call ``showCursor`` to restore
the normal shell behavior. This is a no-op if ``clear_screen`` is
falsy.
"""
if not self.clear_screen:
return
if not self.__is_windows:
sys.stdout.write('\033[?25l')
def showCursor(self):
"""Show the cursor."""
if not self.__is_windows:
sys.stdout.write('\033[?25h')
def loop(self):
"""
main game loop. returns the final score.
"""
pause_key = self.board.PAUSE
margins = {'left': 4, 'top': 4, 'bottom': 4}
atexit.register(self.showCursor)
try:
self.hideCursor()
while True:
self.clearScreen()
print(self.__str__(margins=margins))
if self.board.won() or not self.board.canMove():
break
m = self.readMove()
if m == pause_key:
self.saveBestScore()
if self.store():
print("Game successfully saved. "
"Resume it with `term2048 --resume`.")
return self.score
print("An error ocurred while saving your game.")
return None
self.incScore(self.board.move(m))
except KeyboardInterrupt:
self.saveBestScore()
return None
self.saveBestScore()
print('You won!' if self.board.won() else 'Game Over')
return self.score
def getCellStr(self, x, y): # TODO: refactor regarding issue #11
"""
return a string representation of the cell located at x,y.
"""
c = self.board.getCell(x, y)
if c == 0:
return '.' if self.__azmode else ' .'
elif self.__azmode:
az = {}
for i in range(1, int(math.log(self.board.goal(), 2))):
az[2 ** i] = chr(i + 96)
if c not in az:
return '?'
s = az[c]
elif c == 1024:
s = ' 1k'
elif c == 2048:
s = ' 2k'
else:
s = '%3d' % c
return self.__colors.get(c, Fore.RESET) + s + Style.RESET_ALL
def boardToString(self, margins=None):
"""
return a string representation of the current board.
"""
if margins is None:
margins = {}
b = self.board
rg = range(b.size())
left = ' '*margins.get('left', 0)
s = '\n'.join(
[left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])
return s
def __str__(self, margins=None):
if margins is None:
margins = {}
b = self.boardToString(margins=margins)
top = '\n'*margins.get('top', 0)
bottom = '\n'*margins.get('bottom', 0)
scores = ' \tScore: %5d Best: %5d\n' % (self.score, self.best_score)
return top + b.replace('\n', scores, 1) + bottom
|
bfontaine/term2048 | term2048/game.py | Game.restore | python | def restore(self):
size = self.board.SIZE
try:
with open(self.store_file, 'r') as f:
lines = f.readlines()
score_str = lines[0]
self.score = int(lines[1])
except:
return False
score_str_list = score_str.split(' ')
count = 0
for i in range(size):
for j in range(size):
value = score_str_list[count]
self.board.setCell(j, i, int(value))
count += 1
return True | restore the saved game score and data | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/game.py#L167-L191 | [
"def setCell(self, x, y, v):\n \"\"\"set the cell value at x,y\"\"\"\n self.cells[y][x] = v\n"
] | class Game(object):
"""
A 2048 game
"""
__dirs = {
keypress.UP: Board.UP,
keypress.DOWN: Board.DOWN,
keypress.LEFT: Board.LEFT,
keypress.RIGHT: Board.RIGHT,
keypress.SPACE: Board.PAUSE,
}
__is_windows = os.name == 'nt'
COLORS = {
2: Fore.GREEN,
4: Fore.BLUE + Style.BRIGHT,
8: Fore.CYAN,
16: Fore.RED,
# Don't use MAGENTA directly; it doesn't display well on Windows.
# see https://github.com/bfontaine/term2048/issues/24
32: Fore.MAGENTA + Style.BRIGHT,
64: Fore.CYAN,
128: Fore.BLUE + Style.BRIGHT,
256: Fore.MAGENTA + Style.BRIGHT,
512: Fore.GREEN,
1024: Fore.RED,
2048: Fore.YELLOW,
# just in case people set an higher goal they still have colors
4096: Fore.RED,
8192: Fore.CYAN,
}
# see Game#adjustColors
# these are color replacements for various modes
__color_modes = {
'dark': {
Fore.BLUE: Fore.WHITE,
Fore.BLUE + Style.BRIGHT: Fore.WHITE,
},
'light': {
Fore.YELLOW: Fore.BLACK,
},
}
SCORES_FILE = '%s/.term2048.scores' % os.path.expanduser('~')
STORE_FILE = '%s/.term2048.store' % os.path.expanduser('~')
def __init__(self, scores_file=SCORES_FILE, colors=None,
store_file=STORE_FILE, clear_screen=True,
mode=None, azmode=False, **kws):
"""
Create a new game.
scores_file: file to use for the best score (default
is ~/.term2048.scores)
colors: dictionnary with colors to use for each tile
store_file: file that stores game session's snapshot
mode: color mode. This adjust a few colors and can be 'dark' or
'light'. See the adjustColors functions for more info.
other options are passed to the underlying Board object.
"""
self.board = Board(**kws)
self.score = 0
self.scores_file = scores_file
self.store_file = store_file
self.clear_screen = clear_screen
self.best_score = 0
self.__colors = colors or self.COLORS
self.__azmode = azmode
self.loadBestScore()
self.adjustColors(mode)
def adjustColors(self, mode='dark'):
"""
Change a few colors depending on the mode to use. The default mode
doesn't assume anything and avoid using white & black colors. The dark
mode use white and avoid dark blue while the light mode use black and
avoid yellow, to give a few examples.
"""
rp = Game.__color_modes.get(mode, {})
for k, color in self.__colors.items():
self.__colors[k] = rp.get(color, color)
def loadBestScore(self):
"""
load local best score from the default file
"""
try:
with open(self.scores_file, 'r') as f:
self.best_score = int(f.readline(), 10)
except:
return False
return True
def saveBestScore(self):
"""
save current best score in the default file
"""
if self.score > self.best_score:
self.best_score = self.score
try:
with open(self.scores_file, 'w') as f:
f.write(str(self.best_score))
except:
return False
return True
def incScore(self, pts):
"""
update the current score by adding it the specified number of points
"""
self.score += pts
if self.score > self.best_score:
self.best_score = self.score
def readMove(self):
"""
read and return a move to pass to a board
"""
k = keypress.getKey()
return Game.__dirs.get(k)
def store(self):
"""
save the current game session's score and data for further use
"""
size = self.board.SIZE
cells = []
for i in range(size):
for j in range(size):
cells.append(str(self.board.getCell(j, i)))
score_str = "%s\n%d" % (' '.join(cells), self.score)
try:
with open(self.store_file, 'w') as f:
f.write(score_str)
except:
return False
return True
def clearScreen(self):
"""Clear the console"""
if self.clear_screen:
os.system('cls' if self.__is_windows else 'clear')
else:
print('\n')
def hideCursor(self):
"""
Hide the cursor. Don't forget to call ``showCursor`` to restore
the normal shell behavior. This is a no-op if ``clear_screen`` is
falsy.
"""
if not self.clear_screen:
return
if not self.__is_windows:
sys.stdout.write('\033[?25l')
def showCursor(self):
"""Show the cursor."""
if not self.__is_windows:
sys.stdout.write('\033[?25h')
def loop(self):
"""
main game loop. returns the final score.
"""
pause_key = self.board.PAUSE
margins = {'left': 4, 'top': 4, 'bottom': 4}
atexit.register(self.showCursor)
try:
self.hideCursor()
while True:
self.clearScreen()
print(self.__str__(margins=margins))
if self.board.won() or not self.board.canMove():
break
m = self.readMove()
if m == pause_key:
self.saveBestScore()
if self.store():
print("Game successfully saved. "
"Resume it with `term2048 --resume`.")
return self.score
print("An error ocurred while saving your game.")
return None
self.incScore(self.board.move(m))
except KeyboardInterrupt:
self.saveBestScore()
return None
self.saveBestScore()
print('You won!' if self.board.won() else 'Game Over')
return self.score
def getCellStr(self, x, y): # TODO: refactor regarding issue #11
"""
return a string representation of the cell located at x,y.
"""
c = self.board.getCell(x, y)
if c == 0:
return '.' if self.__azmode else ' .'
elif self.__azmode:
az = {}
for i in range(1, int(math.log(self.board.goal(), 2))):
az[2 ** i] = chr(i + 96)
if c not in az:
return '?'
s = az[c]
elif c == 1024:
s = ' 1k'
elif c == 2048:
s = ' 2k'
else:
s = '%3d' % c
return self.__colors.get(c, Fore.RESET) + s + Style.RESET_ALL
def boardToString(self, margins=None):
"""
return a string representation of the current board.
"""
if margins is None:
margins = {}
b = self.board
rg = range(b.size())
left = ' '*margins.get('left', 0)
s = '\n'.join(
[left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])
return s
def __str__(self, margins=None):
if margins is None:
margins = {}
b = self.boardToString(margins=margins)
top = '\n'*margins.get('top', 0)
bottom = '\n'*margins.get('bottom', 0)
scores = ' \tScore: %5d Best: %5d\n' % (self.score, self.best_score)
return top + b.replace('\n', scores, 1) + bottom
|
bfontaine/term2048 | term2048/game.py | Game.loop | python | def loop(self):
pause_key = self.board.PAUSE
margins = {'left': 4, 'top': 4, 'bottom': 4}
atexit.register(self.showCursor)
try:
self.hideCursor()
while True:
self.clearScreen()
print(self.__str__(margins=margins))
if self.board.won() or not self.board.canMove():
break
m = self.readMove()
if m == pause_key:
self.saveBestScore()
if self.store():
print("Game successfully saved. "
"Resume it with `term2048 --resume`.")
return self.score
print("An error ocurred while saving your game.")
return None
self.incScore(self.board.move(m))
except KeyboardInterrupt:
self.saveBestScore()
return None
self.saveBestScore()
print('You won!' if self.board.won() else 'Game Over')
return self.score | main game loop. returns the final score. | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/game.py#L216-L252 | [
"def won(self):\n \"\"\"\n return True if the board contains at least one tile with the board goal\n \"\"\"\n return self.__won\n",
"def saveBestScore(self):\n \"\"\"\n save current best score in the default file\n \"\"\"\n if self.score > self.best_score:\n self.best_score = self.score\n try:\n with open(self.scores_file, 'w') as f:\n f.write(str(self.best_score))\n except:\n return False\n return True\n",
"def clearScreen(self):\n \"\"\"Clear the console\"\"\"\n if self.clear_screen:\n os.system('cls' if self.__is_windows else 'clear')\n else:\n print('\\n')\n",
"def hideCursor(self):\n \"\"\"\n Hide the cursor. Don't forget to call ``showCursor`` to restore\n the normal shell behavior. This is a no-op if ``clear_screen`` is\n falsy.\n \"\"\"\n if not self.clear_screen:\n return\n if not self.__is_windows:\n sys.stdout.write('\\033[?25l')\n",
"def __str__(self, margins=None):\n if margins is None:\n margins = {}\n b = self.boardToString(margins=margins)\n top = '\\n'*margins.get('top', 0)\n bottom = '\\n'*margins.get('bottom', 0)\n scores = ' \\tScore: %5d Best: %5d\\n' % (self.score, self.best_score)\n return top + b.replace('\\n', scores, 1) + bottom\n"
] | class Game(object):
"""
A 2048 game
"""
__dirs = {
keypress.UP: Board.UP,
keypress.DOWN: Board.DOWN,
keypress.LEFT: Board.LEFT,
keypress.RIGHT: Board.RIGHT,
keypress.SPACE: Board.PAUSE,
}
__is_windows = os.name == 'nt'
COLORS = {
2: Fore.GREEN,
4: Fore.BLUE + Style.BRIGHT,
8: Fore.CYAN,
16: Fore.RED,
# Don't use MAGENTA directly; it doesn't display well on Windows.
# see https://github.com/bfontaine/term2048/issues/24
32: Fore.MAGENTA + Style.BRIGHT,
64: Fore.CYAN,
128: Fore.BLUE + Style.BRIGHT,
256: Fore.MAGENTA + Style.BRIGHT,
512: Fore.GREEN,
1024: Fore.RED,
2048: Fore.YELLOW,
# just in case people set an higher goal they still have colors
4096: Fore.RED,
8192: Fore.CYAN,
}
# see Game#adjustColors
# these are color replacements for various modes
__color_modes = {
'dark': {
Fore.BLUE: Fore.WHITE,
Fore.BLUE + Style.BRIGHT: Fore.WHITE,
},
'light': {
Fore.YELLOW: Fore.BLACK,
},
}
SCORES_FILE = '%s/.term2048.scores' % os.path.expanduser('~')
STORE_FILE = '%s/.term2048.store' % os.path.expanduser('~')
def __init__(self, scores_file=SCORES_FILE, colors=None,
store_file=STORE_FILE, clear_screen=True,
mode=None, azmode=False, **kws):
"""
Create a new game.
scores_file: file to use for the best score (default
is ~/.term2048.scores)
colors: dictionnary with colors to use for each tile
store_file: file that stores game session's snapshot
mode: color mode. This adjust a few colors and can be 'dark' or
'light'. See the adjustColors functions for more info.
other options are passed to the underlying Board object.
"""
self.board = Board(**kws)
self.score = 0
self.scores_file = scores_file
self.store_file = store_file
self.clear_screen = clear_screen
self.best_score = 0
self.__colors = colors or self.COLORS
self.__azmode = azmode
self.loadBestScore()
self.adjustColors(mode)
def adjustColors(self, mode='dark'):
"""
Change a few colors depending on the mode to use. The default mode
doesn't assume anything and avoid using white & black colors. The dark
mode use white and avoid dark blue while the light mode use black and
avoid yellow, to give a few examples.
"""
rp = Game.__color_modes.get(mode, {})
for k, color in self.__colors.items():
self.__colors[k] = rp.get(color, color)
def loadBestScore(self):
"""
load local best score from the default file
"""
try:
with open(self.scores_file, 'r') as f:
self.best_score = int(f.readline(), 10)
except:
return False
return True
def saveBestScore(self):
"""
save current best score in the default file
"""
if self.score > self.best_score:
self.best_score = self.score
try:
with open(self.scores_file, 'w') as f:
f.write(str(self.best_score))
except:
return False
return True
def incScore(self, pts):
"""
update the current score by adding it the specified number of points
"""
self.score += pts
if self.score > self.best_score:
self.best_score = self.score
def readMove(self):
"""
read and return a move to pass to a board
"""
k = keypress.getKey()
return Game.__dirs.get(k)
def store(self):
"""
save the current game session's score and data for further use
"""
size = self.board.SIZE
cells = []
for i in range(size):
for j in range(size):
cells.append(str(self.board.getCell(j, i)))
score_str = "%s\n%d" % (' '.join(cells), self.score)
try:
with open(self.store_file, 'w') as f:
f.write(score_str)
except:
return False
return True
def restore(self):
"""
restore the saved game score and data
"""
size = self.board.SIZE
try:
with open(self.store_file, 'r') as f:
lines = f.readlines()
score_str = lines[0]
self.score = int(lines[1])
except:
return False
score_str_list = score_str.split(' ')
count = 0
for i in range(size):
for j in range(size):
value = score_str_list[count]
self.board.setCell(j, i, int(value))
count += 1
return True
def clearScreen(self):
"""Clear the console"""
if self.clear_screen:
os.system('cls' if self.__is_windows else 'clear')
else:
print('\n')
def hideCursor(self):
"""
Hide the cursor. Don't forget to call ``showCursor`` to restore
the normal shell behavior. This is a no-op if ``clear_screen`` is
falsy.
"""
if not self.clear_screen:
return
if not self.__is_windows:
sys.stdout.write('\033[?25l')
def showCursor(self):
"""Show the cursor."""
if not self.__is_windows:
sys.stdout.write('\033[?25h')
def getCellStr(self, x, y): # TODO: refactor regarding issue #11
"""
return a string representation of the cell located at x,y.
"""
c = self.board.getCell(x, y)
if c == 0:
return '.' if self.__azmode else ' .'
elif self.__azmode:
az = {}
for i in range(1, int(math.log(self.board.goal(), 2))):
az[2 ** i] = chr(i + 96)
if c not in az:
return '?'
s = az[c]
elif c == 1024:
s = ' 1k'
elif c == 2048:
s = ' 2k'
else:
s = '%3d' % c
return self.__colors.get(c, Fore.RESET) + s + Style.RESET_ALL
def boardToString(self, margins=None):
"""
return a string representation of the current board.
"""
if margins is None:
margins = {}
b = self.board
rg = range(b.size())
left = ' '*margins.get('left', 0)
s = '\n'.join(
[left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])
return s
def __str__(self, margins=None):
if margins is None:
margins = {}
b = self.boardToString(margins=margins)
top = '\n'*margins.get('top', 0)
bottom = '\n'*margins.get('bottom', 0)
scores = ' \tScore: %5d Best: %5d\n' % (self.score, self.best_score)
return top + b.replace('\n', scores, 1) + bottom
|
bfontaine/term2048 | term2048/game.py | Game.getCellStr | python | def getCellStr(self, x, y): # TODO: refactor regarding issue #11
c = self.board.getCell(x, y)
if c == 0:
return '.' if self.__azmode else ' .'
elif self.__azmode:
az = {}
for i in range(1, int(math.log(self.board.goal(), 2))):
az[2 ** i] = chr(i + 96)
if c not in az:
return '?'
s = az[c]
elif c == 1024:
s = ' 1k'
elif c == 2048:
s = ' 2k'
else:
s = '%3d' % c
return self.__colors.get(c, Fore.RESET) + s + Style.RESET_ALL | return a string representation of the cell located at x,y. | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/game.py#L254-L278 | [
"def goal(self):\n \"\"\"return the board goal\"\"\"\n return self.__goal\n",
"def getCell(self, x, y):\n \"\"\"return the cell value at x,y\"\"\"\n return self.cells[y][x]\n"
] | class Game(object):
"""
A 2048 game
"""
__dirs = {
keypress.UP: Board.UP,
keypress.DOWN: Board.DOWN,
keypress.LEFT: Board.LEFT,
keypress.RIGHT: Board.RIGHT,
keypress.SPACE: Board.PAUSE,
}
__is_windows = os.name == 'nt'
COLORS = {
2: Fore.GREEN,
4: Fore.BLUE + Style.BRIGHT,
8: Fore.CYAN,
16: Fore.RED,
# Don't use MAGENTA directly; it doesn't display well on Windows.
# see https://github.com/bfontaine/term2048/issues/24
32: Fore.MAGENTA + Style.BRIGHT,
64: Fore.CYAN,
128: Fore.BLUE + Style.BRIGHT,
256: Fore.MAGENTA + Style.BRIGHT,
512: Fore.GREEN,
1024: Fore.RED,
2048: Fore.YELLOW,
# just in case people set an higher goal they still have colors
4096: Fore.RED,
8192: Fore.CYAN,
}
# see Game#adjustColors
# these are color replacements for various modes
__color_modes = {
'dark': {
Fore.BLUE: Fore.WHITE,
Fore.BLUE + Style.BRIGHT: Fore.WHITE,
},
'light': {
Fore.YELLOW: Fore.BLACK,
},
}
SCORES_FILE = '%s/.term2048.scores' % os.path.expanduser('~')
STORE_FILE = '%s/.term2048.store' % os.path.expanduser('~')
def __init__(self, scores_file=SCORES_FILE, colors=None,
store_file=STORE_FILE, clear_screen=True,
mode=None, azmode=False, **kws):
"""
Create a new game.
scores_file: file to use for the best score (default
is ~/.term2048.scores)
colors: dictionnary with colors to use for each tile
store_file: file that stores game session's snapshot
mode: color mode. This adjust a few colors and can be 'dark' or
'light'. See the adjustColors functions for more info.
other options are passed to the underlying Board object.
"""
self.board = Board(**kws)
self.score = 0
self.scores_file = scores_file
self.store_file = store_file
self.clear_screen = clear_screen
self.best_score = 0
self.__colors = colors or self.COLORS
self.__azmode = azmode
self.loadBestScore()
self.adjustColors(mode)
def adjustColors(self, mode='dark'):
"""
Change a few colors depending on the mode to use. The default mode
doesn't assume anything and avoid using white & black colors. The dark
mode use white and avoid dark blue while the light mode use black and
avoid yellow, to give a few examples.
"""
rp = Game.__color_modes.get(mode, {})
for k, color in self.__colors.items():
self.__colors[k] = rp.get(color, color)
def loadBestScore(self):
"""
load local best score from the default file
"""
try:
with open(self.scores_file, 'r') as f:
self.best_score = int(f.readline(), 10)
except:
return False
return True
def saveBestScore(self):
"""
save current best score in the default file
"""
if self.score > self.best_score:
self.best_score = self.score
try:
with open(self.scores_file, 'w') as f:
f.write(str(self.best_score))
except:
return False
return True
def incScore(self, pts):
"""
update the current score by adding it the specified number of points
"""
self.score += pts
if self.score > self.best_score:
self.best_score = self.score
def readMove(self):
"""
read and return a move to pass to a board
"""
k = keypress.getKey()
return Game.__dirs.get(k)
def store(self):
"""
save the current game session's score and data for further use
"""
size = self.board.SIZE
cells = []
for i in range(size):
for j in range(size):
cells.append(str(self.board.getCell(j, i)))
score_str = "%s\n%d" % (' '.join(cells), self.score)
try:
with open(self.store_file, 'w') as f:
f.write(score_str)
except:
return False
return True
def restore(self):
"""
restore the saved game score and data
"""
size = self.board.SIZE
try:
with open(self.store_file, 'r') as f:
lines = f.readlines()
score_str = lines[0]
self.score = int(lines[1])
except:
return False
score_str_list = score_str.split(' ')
count = 0
for i in range(size):
for j in range(size):
value = score_str_list[count]
self.board.setCell(j, i, int(value))
count += 1
return True
def clearScreen(self):
"""Clear the console"""
if self.clear_screen:
os.system('cls' if self.__is_windows else 'clear')
else:
print('\n')
def hideCursor(self):
"""
Hide the cursor. Don't forget to call ``showCursor`` to restore
the normal shell behavior. This is a no-op if ``clear_screen`` is
falsy.
"""
if not self.clear_screen:
return
if not self.__is_windows:
sys.stdout.write('\033[?25l')
def showCursor(self):
"""Show the cursor."""
if not self.__is_windows:
sys.stdout.write('\033[?25h')
def loop(self):
"""
main game loop. returns the final score.
"""
pause_key = self.board.PAUSE
margins = {'left': 4, 'top': 4, 'bottom': 4}
atexit.register(self.showCursor)
try:
self.hideCursor()
while True:
self.clearScreen()
print(self.__str__(margins=margins))
if self.board.won() or not self.board.canMove():
break
m = self.readMove()
if m == pause_key:
self.saveBestScore()
if self.store():
print("Game successfully saved. "
"Resume it with `term2048 --resume`.")
return self.score
print("An error ocurred while saving your game.")
return None
self.incScore(self.board.move(m))
except KeyboardInterrupt:
self.saveBestScore()
return None
self.saveBestScore()
print('You won!' if self.board.won() else 'Game Over')
return self.score
def boardToString(self, margins=None):
"""
return a string representation of the current board.
"""
if margins is None:
margins = {}
b = self.board
rg = range(b.size())
left = ' '*margins.get('left', 0)
s = '\n'.join(
[left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])
return s
def __str__(self, margins=None):
if margins is None:
margins = {}
b = self.boardToString(margins=margins)
top = '\n'*margins.get('top', 0)
bottom = '\n'*margins.get('bottom', 0)
scores = ' \tScore: %5d Best: %5d\n' % (self.score, self.best_score)
return top + b.replace('\n', scores, 1) + bottom
|
bfontaine/term2048 | term2048/game.py | Game.boardToString | python | def boardToString(self, margins=None):
if margins is None:
margins = {}
b = self.board
rg = range(b.size())
left = ' '*margins.get('left', 0)
s = '\n'.join(
[left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])
return s | return a string representation of the current board. | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/game.py#L280-L292 | null | class Game(object):
"""
A 2048 game
"""
__dirs = {
keypress.UP: Board.UP,
keypress.DOWN: Board.DOWN,
keypress.LEFT: Board.LEFT,
keypress.RIGHT: Board.RIGHT,
keypress.SPACE: Board.PAUSE,
}
__is_windows = os.name == 'nt'
COLORS = {
2: Fore.GREEN,
4: Fore.BLUE + Style.BRIGHT,
8: Fore.CYAN,
16: Fore.RED,
# Don't use MAGENTA directly; it doesn't display well on Windows.
# see https://github.com/bfontaine/term2048/issues/24
32: Fore.MAGENTA + Style.BRIGHT,
64: Fore.CYAN,
128: Fore.BLUE + Style.BRIGHT,
256: Fore.MAGENTA + Style.BRIGHT,
512: Fore.GREEN,
1024: Fore.RED,
2048: Fore.YELLOW,
# just in case people set an higher goal they still have colors
4096: Fore.RED,
8192: Fore.CYAN,
}
# see Game#adjustColors
# these are color replacements for various modes
__color_modes = {
'dark': {
Fore.BLUE: Fore.WHITE,
Fore.BLUE + Style.BRIGHT: Fore.WHITE,
},
'light': {
Fore.YELLOW: Fore.BLACK,
},
}
SCORES_FILE = '%s/.term2048.scores' % os.path.expanduser('~')
STORE_FILE = '%s/.term2048.store' % os.path.expanduser('~')
def __init__(self, scores_file=SCORES_FILE, colors=None,
store_file=STORE_FILE, clear_screen=True,
mode=None, azmode=False, **kws):
"""
Create a new game.
scores_file: file to use for the best score (default
is ~/.term2048.scores)
colors: dictionnary with colors to use for each tile
store_file: file that stores game session's snapshot
mode: color mode. This adjust a few colors and can be 'dark' or
'light'. See the adjustColors functions for more info.
other options are passed to the underlying Board object.
"""
self.board = Board(**kws)
self.score = 0
self.scores_file = scores_file
self.store_file = store_file
self.clear_screen = clear_screen
self.best_score = 0
self.__colors = colors or self.COLORS
self.__azmode = azmode
self.loadBestScore()
self.adjustColors(mode)
def adjustColors(self, mode='dark'):
"""
Change a few colors depending on the mode to use. The default mode
doesn't assume anything and avoid using white & black colors. The dark
mode use white and avoid dark blue while the light mode use black and
avoid yellow, to give a few examples.
"""
rp = Game.__color_modes.get(mode, {})
for k, color in self.__colors.items():
self.__colors[k] = rp.get(color, color)
def loadBestScore(self):
"""
load local best score from the default file
"""
try:
with open(self.scores_file, 'r') as f:
self.best_score = int(f.readline(), 10)
except:
return False
return True
def saveBestScore(self):
"""
save current best score in the default file
"""
if self.score > self.best_score:
self.best_score = self.score
try:
with open(self.scores_file, 'w') as f:
f.write(str(self.best_score))
except:
return False
return True
def incScore(self, pts):
"""
update the current score by adding it the specified number of points
"""
self.score += pts
if self.score > self.best_score:
self.best_score = self.score
def readMove(self):
"""
read and return a move to pass to a board
"""
k = keypress.getKey()
return Game.__dirs.get(k)
def store(self):
"""
save the current game session's score and data for further use
"""
size = self.board.SIZE
cells = []
for i in range(size):
for j in range(size):
cells.append(str(self.board.getCell(j, i)))
score_str = "%s\n%d" % (' '.join(cells), self.score)
try:
with open(self.store_file, 'w') as f:
f.write(score_str)
except:
return False
return True
def restore(self):
"""
restore the saved game score and data
"""
size = self.board.SIZE
try:
with open(self.store_file, 'r') as f:
lines = f.readlines()
score_str = lines[0]
self.score = int(lines[1])
except:
return False
score_str_list = score_str.split(' ')
count = 0
for i in range(size):
for j in range(size):
value = score_str_list[count]
self.board.setCell(j, i, int(value))
count += 1
return True
def clearScreen(self):
"""Clear the console"""
if self.clear_screen:
os.system('cls' if self.__is_windows else 'clear')
else:
print('\n')
def hideCursor(self):
"""
Hide the cursor. Don't forget to call ``showCursor`` to restore
the normal shell behavior. This is a no-op if ``clear_screen`` is
falsy.
"""
if not self.clear_screen:
return
if not self.__is_windows:
sys.stdout.write('\033[?25l')
def showCursor(self):
"""Show the cursor."""
if not self.__is_windows:
sys.stdout.write('\033[?25h')
def loop(self):
"""
main game loop. returns the final score.
"""
pause_key = self.board.PAUSE
margins = {'left': 4, 'top': 4, 'bottom': 4}
atexit.register(self.showCursor)
try:
self.hideCursor()
while True:
self.clearScreen()
print(self.__str__(margins=margins))
if self.board.won() or not self.board.canMove():
break
m = self.readMove()
if m == pause_key:
self.saveBestScore()
if self.store():
print("Game successfully saved. "
"Resume it with `term2048 --resume`.")
return self.score
print("An error ocurred while saving your game.")
return None
self.incScore(self.board.move(m))
except KeyboardInterrupt:
self.saveBestScore()
return None
self.saveBestScore()
print('You won!' if self.board.won() else 'Game Over')
return self.score
def getCellStr(self, x, y): # TODO: refactor regarding issue #11
"""
return a string representation of the cell located at x,y.
"""
c = self.board.getCell(x, y)
if c == 0:
return '.' if self.__azmode else ' .'
elif self.__azmode:
az = {}
for i in range(1, int(math.log(self.board.goal(), 2))):
az[2 ** i] = chr(i + 96)
if c not in az:
return '?'
s = az[c]
elif c == 1024:
s = ' 1k'
elif c == 2048:
s = ' 2k'
else:
s = '%3d' % c
return self.__colors.get(c, Fore.RESET) + s + Style.RESET_ALL
def __str__(self, margins=None):
if margins is None:
margins = {}
b = self.boardToString(margins=margins)
top = '\n'*margins.get('top', 0)
bottom = '\n'*margins.get('bottom', 0)
scores = ' \tScore: %5d Best: %5d\n' % (self.score, self.best_score)
return top + b.replace('\n', scores, 1) + bottom
|
bfontaine/term2048 | term2048/board.py | Board.canMove | python | def canMove(self):
if not self.filled():
return True
for y in self.__size_range:
for x in self.__size_range:
c = self.getCell(x, y)
if (x < self.__size-1 and c == self.getCell(x+1, y)) \
or (y < self.__size-1 and c == self.getCell(x, y+1)):
return True
return False | test if a move is possible | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/board.py#L49-L63 | [
"def filled(self):\n \"\"\"\n return true if the game is filled\n \"\"\"\n return len(self.getEmptyCells()) == 0\n"
] | class Board(object):
"""
A 2048 board
"""
UP, DOWN, LEFT, RIGHT, PAUSE = 1, 2, 3, 4, 5
GOAL = 2048
SIZE = 4
def __init__(self, goal=GOAL, size=SIZE, **_kwargs):
self.__size = size
self.__size_range = xrange(0, self.__size)
self.__goal = goal
self.__won = False
self.cells = [[0]*self.__size for _ in xrange(self.__size)]
self.addTile()
self.addTile()
def size(self):
"""return the board size"""
return self.__size
def goal(self):
"""return the board goal"""
return self.__goal
def won(self):
"""
return True if the board contains at least one tile with the board goal
"""
return self.__won
def filled(self):
"""
return true if the game is filled
"""
return len(self.getEmptyCells()) == 0
def addTile(self, value=None, choices=None):
"""
add a random tile in an empty cell
value: value of the tile to add.
choices: a list of possible choices for the value of the tile. if
``None`` (the default), it uses
``[2, 2, 2, 2, 2, 2, 2, 2, 2, 4]``.
"""
if choices is None:
choices = [2] * 9 + [4]
if value:
choices = [value]
v = random.choice(choices)
empty = self.getEmptyCells()
if empty:
x, y = random.choice(empty)
self.setCell(x, y, v)
def getCell(self, x, y):
"""return the cell value at x,y"""
return self.cells[y][x]
def setCell(self, x, y, v):
"""set the cell value at x,y"""
self.cells[y][x] = v
def getLine(self, y):
"""return the y-th line, starting at 0"""
return self.cells[y]
def getCol(self, x):
"""return the x-th column, starting at 0"""
return [self.getCell(x, i) for i in self.__size_range]
def setLine(self, y, l):
"""set the y-th line, starting at 0"""
self.cells[y] = l[:]
def setCol(self, x, l):
"""set the x-th column, starting at 0"""
for i in xrange(0, self.__size):
self.setCell(x, i, l[i])
def getEmptyCells(self):
"""return a (x, y) pair for each empty cell"""
return [(x, y)
for x in self.__size_range
for y in self.__size_range if self.getCell(x, y) == 0]
def __collapseLineOrCol(self, line, d):
"""
Merge tiles in a line or column according to a direction and return a
tuple with the new line and the score for the move on this line
"""
if (d == Board.LEFT or d == Board.UP):
inc = 1
rg = xrange(0, self.__size-1, inc)
else:
inc = -1
rg = xrange(self.__size-1, 0, inc)
pts = 0
for i in rg:
if line[i] == 0:
continue
if line[i] == line[i+inc]:
v = line[i]*2
if v == self.__goal:
self.__won = True
line[i] = v
line[i+inc] = 0
pts += v
return (line, pts)
def __moveLineOrCol(self, line, d):
"""
Move a line or column to a given direction (d)
"""
nl = [c for c in line if c != 0]
if d == Board.UP or d == Board.LEFT:
return nl + [0] * (self.__size - len(nl))
return [0] * (self.__size - len(nl)) + nl
def move(self, d, add_tile=True):
"""
move and return the move score
"""
if d == Board.LEFT or d == Board.RIGHT:
chg, get = self.setLine, self.getLine
elif d == Board.UP or d == Board.DOWN:
chg, get = self.setCol, self.getCol
else:
return 0
moved = False
score = 0
for i in self.__size_range:
# save the original line/col
origin = get(i)
# move it
line = self.__moveLineOrCol(origin, d)
# merge adjacent tiles
collapsed, pts = self.__collapseLineOrCol(line, d)
# move it again (for when tiles are merged, because empty cells are
# inserted in the middle of the line/col)
new = self.__moveLineOrCol(collapsed, d)
# set it back in the board
chg(i, new)
# did it change?
if origin != new:
moved = True
score += pts
# don't add a new tile if nothing changed
if moved and add_tile:
self.addTile()
return score
|
bfontaine/term2048 | term2048/board.py | Board.addTile | python | def addTile(self, value=None, choices=None):
if choices is None:
choices = [2] * 9 + [4]
if value:
choices = [value]
v = random.choice(choices)
empty = self.getEmptyCells()
if empty:
x, y = random.choice(empty)
self.setCell(x, y, v) | add a random tile in an empty cell
value: value of the tile to add.
choices: a list of possible choices for the value of the tile. if
``None`` (the default), it uses
``[2, 2, 2, 2, 2, 2, 2, 2, 2, 4]``. | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/board.py#L71-L89 | [
"def setCell(self, x, y, v):\n \"\"\"set the cell value at x,y\"\"\"\n self.cells[y][x] = v\n",
"def getEmptyCells(self):\n \"\"\"return a (x, y) pair for each empty cell\"\"\"\n return [(x, y)\n for x in self.__size_range\n for y in self.__size_range if self.getCell(x, y) == 0]\n"
] | class Board(object):
"""
A 2048 board
"""
UP, DOWN, LEFT, RIGHT, PAUSE = 1, 2, 3, 4, 5
GOAL = 2048
SIZE = 4
def __init__(self, goal=GOAL, size=SIZE, **_kwargs):
self.__size = size
self.__size_range = xrange(0, self.__size)
self.__goal = goal
self.__won = False
self.cells = [[0]*self.__size for _ in xrange(self.__size)]
self.addTile()
self.addTile()
def size(self):
"""return the board size"""
return self.__size
def goal(self):
"""return the board goal"""
return self.__goal
def won(self):
"""
return True if the board contains at least one tile with the board goal
"""
return self.__won
def canMove(self):
"""
test if a move is possible
"""
if not self.filled():
return True
for y in self.__size_range:
for x in self.__size_range:
c = self.getCell(x, y)
if (x < self.__size-1 and c == self.getCell(x+1, y)) \
or (y < self.__size-1 and c == self.getCell(x, y+1)):
return True
return False
def filled(self):
"""
return true if the game is filled
"""
return len(self.getEmptyCells()) == 0
def getCell(self, x, y):
"""return the cell value at x,y"""
return self.cells[y][x]
def setCell(self, x, y, v):
"""set the cell value at x,y"""
self.cells[y][x] = v
def getLine(self, y):
"""return the y-th line, starting at 0"""
return self.cells[y]
def getCol(self, x):
"""return the x-th column, starting at 0"""
return [self.getCell(x, i) for i in self.__size_range]
def setLine(self, y, l):
"""set the y-th line, starting at 0"""
self.cells[y] = l[:]
def setCol(self, x, l):
"""set the x-th column, starting at 0"""
for i in xrange(0, self.__size):
self.setCell(x, i, l[i])
def getEmptyCells(self):
"""return a (x, y) pair for each empty cell"""
return [(x, y)
for x in self.__size_range
for y in self.__size_range if self.getCell(x, y) == 0]
def __collapseLineOrCol(self, line, d):
"""
Merge tiles in a line or column according to a direction and return a
tuple with the new line and the score for the move on this line
"""
if (d == Board.LEFT or d == Board.UP):
inc = 1
rg = xrange(0, self.__size-1, inc)
else:
inc = -1
rg = xrange(self.__size-1, 0, inc)
pts = 0
for i in rg:
if line[i] == 0:
continue
if line[i] == line[i+inc]:
v = line[i]*2
if v == self.__goal:
self.__won = True
line[i] = v
line[i+inc] = 0
pts += v
return (line, pts)
def __moveLineOrCol(self, line, d):
"""
Move a line or column to a given direction (d)
"""
nl = [c for c in line if c != 0]
if d == Board.UP or d == Board.LEFT:
return nl + [0] * (self.__size - len(nl))
return [0] * (self.__size - len(nl)) + nl
def move(self, d, add_tile=True):
"""
move and return the move score
"""
if d == Board.LEFT or d == Board.RIGHT:
chg, get = self.setLine, self.getLine
elif d == Board.UP or d == Board.DOWN:
chg, get = self.setCol, self.getCol
else:
return 0
moved = False
score = 0
for i in self.__size_range:
# save the original line/col
origin = get(i)
# move it
line = self.__moveLineOrCol(origin, d)
# merge adjacent tiles
collapsed, pts = self.__collapseLineOrCol(line, d)
# move it again (for when tiles are merged, because empty cells are
# inserted in the middle of the line/col)
new = self.__moveLineOrCol(collapsed, d)
# set it back in the board
chg(i, new)
# did it change?
if origin != new:
moved = True
score += pts
# don't add a new tile if nothing changed
if moved and add_tile:
self.addTile()
return score
|
bfontaine/term2048 | term2048/board.py | Board.setCell | python | def setCell(self, x, y, v):
self.cells[y][x] = v | set the cell value at x,y | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/board.py#L95-L97 | null | class Board(object):
"""
A 2048 board
"""
UP, DOWN, LEFT, RIGHT, PAUSE = 1, 2, 3, 4, 5
GOAL = 2048
SIZE = 4
def __init__(self, goal=GOAL, size=SIZE, **_kwargs):
self.__size = size
self.__size_range = xrange(0, self.__size)
self.__goal = goal
self.__won = False
self.cells = [[0]*self.__size for _ in xrange(self.__size)]
self.addTile()
self.addTile()
def size(self):
"""return the board size"""
return self.__size
def goal(self):
"""return the board goal"""
return self.__goal
def won(self):
"""
return True if the board contains at least one tile with the board goal
"""
return self.__won
def canMove(self):
"""
test if a move is possible
"""
if not self.filled():
return True
for y in self.__size_range:
for x in self.__size_range:
c = self.getCell(x, y)
if (x < self.__size-1 and c == self.getCell(x+1, y)) \
or (y < self.__size-1 and c == self.getCell(x, y+1)):
return True
return False
def filled(self):
"""
return true if the game is filled
"""
return len(self.getEmptyCells()) == 0
def addTile(self, value=None, choices=None):
"""
add a random tile in an empty cell
value: value of the tile to add.
choices: a list of possible choices for the value of the tile. if
``None`` (the default), it uses
``[2, 2, 2, 2, 2, 2, 2, 2, 2, 4]``.
"""
if choices is None:
choices = [2] * 9 + [4]
if value:
choices = [value]
v = random.choice(choices)
empty = self.getEmptyCells()
if empty:
x, y = random.choice(empty)
self.setCell(x, y, v)
def getCell(self, x, y):
"""return the cell value at x,y"""
return self.cells[y][x]
def getLine(self, y):
"""return the y-th line, starting at 0"""
return self.cells[y]
def getCol(self, x):
"""return the x-th column, starting at 0"""
return [self.getCell(x, i) for i in self.__size_range]
def setLine(self, y, l):
"""set the y-th line, starting at 0"""
self.cells[y] = l[:]
def setCol(self, x, l):
"""set the x-th column, starting at 0"""
for i in xrange(0, self.__size):
self.setCell(x, i, l[i])
def getEmptyCells(self):
"""return a (x, y) pair for each empty cell"""
return [(x, y)
for x in self.__size_range
for y in self.__size_range if self.getCell(x, y) == 0]
def __collapseLineOrCol(self, line, d):
"""
Merge tiles in a line or column according to a direction and return a
tuple with the new line and the score for the move on this line
"""
if (d == Board.LEFT or d == Board.UP):
inc = 1
rg = xrange(0, self.__size-1, inc)
else:
inc = -1
rg = xrange(self.__size-1, 0, inc)
pts = 0
for i in rg:
if line[i] == 0:
continue
if line[i] == line[i+inc]:
v = line[i]*2
if v == self.__goal:
self.__won = True
line[i] = v
line[i+inc] = 0
pts += v
return (line, pts)
def __moveLineOrCol(self, line, d):
"""
Move a line or column to a given direction (d)
"""
nl = [c for c in line if c != 0]
if d == Board.UP or d == Board.LEFT:
return nl + [0] * (self.__size - len(nl))
return [0] * (self.__size - len(nl)) + nl
def move(self, d, add_tile=True):
"""
move and return the move score
"""
if d == Board.LEFT or d == Board.RIGHT:
chg, get = self.setLine, self.getLine
elif d == Board.UP or d == Board.DOWN:
chg, get = self.setCol, self.getCol
else:
return 0
moved = False
score = 0
for i in self.__size_range:
# save the original line/col
origin = get(i)
# move it
line = self.__moveLineOrCol(origin, d)
# merge adjacent tiles
collapsed, pts = self.__collapseLineOrCol(line, d)
# move it again (for when tiles are merged, because empty cells are
# inserted in the middle of the line/col)
new = self.__moveLineOrCol(collapsed, d)
# set it back in the board
chg(i, new)
# did it change?
if origin != new:
moved = True
score += pts
# don't add a new tile if nothing changed
if moved and add_tile:
self.addTile()
return score
|
bfontaine/term2048 | term2048/board.py | Board.getCol | python | def getCol(self, x):
return [self.getCell(x, i) for i in self.__size_range] | return the x-th column, starting at 0 | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/board.py#L103-L105 | null | class Board(object):
"""
A 2048 board
"""
UP, DOWN, LEFT, RIGHT, PAUSE = 1, 2, 3, 4, 5
GOAL = 2048
SIZE = 4
def __init__(self, goal=GOAL, size=SIZE, **_kwargs):
self.__size = size
self.__size_range = xrange(0, self.__size)
self.__goal = goal
self.__won = False
self.cells = [[0]*self.__size for _ in xrange(self.__size)]
self.addTile()
self.addTile()
def size(self):
"""return the board size"""
return self.__size
def goal(self):
"""return the board goal"""
return self.__goal
def won(self):
"""
return True if the board contains at least one tile with the board goal
"""
return self.__won
def canMove(self):
"""
test if a move is possible
"""
if not self.filled():
return True
for y in self.__size_range:
for x in self.__size_range:
c = self.getCell(x, y)
if (x < self.__size-1 and c == self.getCell(x+1, y)) \
or (y < self.__size-1 and c == self.getCell(x, y+1)):
return True
return False
def filled(self):
"""
return true if the game is filled
"""
return len(self.getEmptyCells()) == 0
def addTile(self, value=None, choices=None):
"""
add a random tile in an empty cell
value: value of the tile to add.
choices: a list of possible choices for the value of the tile. if
``None`` (the default), it uses
``[2, 2, 2, 2, 2, 2, 2, 2, 2, 4]``.
"""
if choices is None:
choices = [2] * 9 + [4]
if value:
choices = [value]
v = random.choice(choices)
empty = self.getEmptyCells()
if empty:
x, y = random.choice(empty)
self.setCell(x, y, v)
def getCell(self, x, y):
"""return the cell value at x,y"""
return self.cells[y][x]
def setCell(self, x, y, v):
"""set the cell value at x,y"""
self.cells[y][x] = v
def getLine(self, y):
"""return the y-th line, starting at 0"""
return self.cells[y]
def setLine(self, y, l):
"""set the y-th line, starting at 0"""
self.cells[y] = l[:]
def setCol(self, x, l):
"""set the x-th column, starting at 0"""
for i in xrange(0, self.__size):
self.setCell(x, i, l[i])
def getEmptyCells(self):
"""return a (x, y) pair for each empty cell"""
return [(x, y)
for x in self.__size_range
for y in self.__size_range if self.getCell(x, y) == 0]
def __collapseLineOrCol(self, line, d):
"""
Merge tiles in a line or column according to a direction and return a
tuple with the new line and the score for the move on this line
"""
if (d == Board.LEFT or d == Board.UP):
inc = 1
rg = xrange(0, self.__size-1, inc)
else:
inc = -1
rg = xrange(self.__size-1, 0, inc)
pts = 0
for i in rg:
if line[i] == 0:
continue
if line[i] == line[i+inc]:
v = line[i]*2
if v == self.__goal:
self.__won = True
line[i] = v
line[i+inc] = 0
pts += v
return (line, pts)
def __moveLineOrCol(self, line, d):
"""
Move a line or column to a given direction (d)
"""
nl = [c for c in line if c != 0]
if d == Board.UP or d == Board.LEFT:
return nl + [0] * (self.__size - len(nl))
return [0] * (self.__size - len(nl)) + nl
def move(self, d, add_tile=True):
"""
move and return the move score
"""
if d == Board.LEFT or d == Board.RIGHT:
chg, get = self.setLine, self.getLine
elif d == Board.UP or d == Board.DOWN:
chg, get = self.setCol, self.getCol
else:
return 0
moved = False
score = 0
for i in self.__size_range:
# save the original line/col
origin = get(i)
# move it
line = self.__moveLineOrCol(origin, d)
# merge adjacent tiles
collapsed, pts = self.__collapseLineOrCol(line, d)
# move it again (for when tiles are merged, because empty cells are
# inserted in the middle of the line/col)
new = self.__moveLineOrCol(collapsed, d)
# set it back in the board
chg(i, new)
# did it change?
if origin != new:
moved = True
score += pts
# don't add a new tile if nothing changed
if moved and add_tile:
self.addTile()
return score
|
bfontaine/term2048 | term2048/board.py | Board.setCol | python | def setCol(self, x, l):
for i in xrange(0, self.__size):
self.setCell(x, i, l[i]) | set the x-th column, starting at 0 | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/board.py#L111-L114 | [
"def setCell(self, x, y, v):\n \"\"\"set the cell value at x,y\"\"\"\n self.cells[y][x] = v\n"
] | class Board(object):
"""
A 2048 board
"""
UP, DOWN, LEFT, RIGHT, PAUSE = 1, 2, 3, 4, 5
GOAL = 2048
SIZE = 4
def __init__(self, goal=GOAL, size=SIZE, **_kwargs):
self.__size = size
self.__size_range = xrange(0, self.__size)
self.__goal = goal
self.__won = False
self.cells = [[0]*self.__size for _ in xrange(self.__size)]
self.addTile()
self.addTile()
def size(self):
"""return the board size"""
return self.__size
def goal(self):
"""return the board goal"""
return self.__goal
def won(self):
"""
return True if the board contains at least one tile with the board goal
"""
return self.__won
def canMove(self):
"""
test if a move is possible
"""
if not self.filled():
return True
for y in self.__size_range:
for x in self.__size_range:
c = self.getCell(x, y)
if (x < self.__size-1 and c == self.getCell(x+1, y)) \
or (y < self.__size-1 and c == self.getCell(x, y+1)):
return True
return False
def filled(self):
"""
return true if the game is filled
"""
return len(self.getEmptyCells()) == 0
def addTile(self, value=None, choices=None):
"""
add a random tile in an empty cell
value: value of the tile to add.
choices: a list of possible choices for the value of the tile. if
``None`` (the default), it uses
``[2, 2, 2, 2, 2, 2, 2, 2, 2, 4]``.
"""
if choices is None:
choices = [2] * 9 + [4]
if value:
choices = [value]
v = random.choice(choices)
empty = self.getEmptyCells()
if empty:
x, y = random.choice(empty)
self.setCell(x, y, v)
def getCell(self, x, y):
"""return the cell value at x,y"""
return self.cells[y][x]
def setCell(self, x, y, v):
"""set the cell value at x,y"""
self.cells[y][x] = v
def getLine(self, y):
"""return the y-th line, starting at 0"""
return self.cells[y]
def getCol(self, x):
"""return the x-th column, starting at 0"""
return [self.getCell(x, i) for i in self.__size_range]
def setLine(self, y, l):
"""set the y-th line, starting at 0"""
self.cells[y] = l[:]
def getEmptyCells(self):
"""return a (x, y) pair for each empty cell"""
return [(x, y)
for x in self.__size_range
for y in self.__size_range if self.getCell(x, y) == 0]
def __collapseLineOrCol(self, line, d):
"""
Merge tiles in a line or column according to a direction and return a
tuple with the new line and the score for the move on this line
"""
if (d == Board.LEFT or d == Board.UP):
inc = 1
rg = xrange(0, self.__size-1, inc)
else:
inc = -1
rg = xrange(self.__size-1, 0, inc)
pts = 0
for i in rg:
if line[i] == 0:
continue
if line[i] == line[i+inc]:
v = line[i]*2
if v == self.__goal:
self.__won = True
line[i] = v
line[i+inc] = 0
pts += v
return (line, pts)
def __moveLineOrCol(self, line, d):
"""
Move a line or column to a given direction (d)
"""
nl = [c for c in line if c != 0]
if d == Board.UP or d == Board.LEFT:
return nl + [0] * (self.__size - len(nl))
return [0] * (self.__size - len(nl)) + nl
def move(self, d, add_tile=True):
"""
move and return the move score
"""
if d == Board.LEFT or d == Board.RIGHT:
chg, get = self.setLine, self.getLine
elif d == Board.UP or d == Board.DOWN:
chg, get = self.setCol, self.getCol
else:
return 0
moved = False
score = 0
for i in self.__size_range:
# save the original line/col
origin = get(i)
# move it
line = self.__moveLineOrCol(origin, d)
# merge adjacent tiles
collapsed, pts = self.__collapseLineOrCol(line, d)
# move it again (for when tiles are merged, because empty cells are
# inserted in the middle of the line/col)
new = self.__moveLineOrCol(collapsed, d)
# set it back in the board
chg(i, new)
# did it change?
if origin != new:
moved = True
score += pts
# don't add a new tile if nothing changed
if moved and add_tile:
self.addTile()
return score
|
bfontaine/term2048 | term2048/board.py | Board.getEmptyCells | python | def getEmptyCells(self):
return [(x, y)
for x in self.__size_range
for y in self.__size_range if self.getCell(x, y) == 0] | return a (x, y) pair for each empty cell | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/board.py#L116-L120 | null | class Board(object):
"""
A 2048 board
"""
UP, DOWN, LEFT, RIGHT, PAUSE = 1, 2, 3, 4, 5
GOAL = 2048
SIZE = 4
def __init__(self, goal=GOAL, size=SIZE, **_kwargs):
self.__size = size
self.__size_range = xrange(0, self.__size)
self.__goal = goal
self.__won = False
self.cells = [[0]*self.__size for _ in xrange(self.__size)]
self.addTile()
self.addTile()
def size(self):
"""return the board size"""
return self.__size
def goal(self):
"""return the board goal"""
return self.__goal
def won(self):
"""
return True if the board contains at least one tile with the board goal
"""
return self.__won
def canMove(self):
"""
test if a move is possible
"""
if not self.filled():
return True
for y in self.__size_range:
for x in self.__size_range:
c = self.getCell(x, y)
if (x < self.__size-1 and c == self.getCell(x+1, y)) \
or (y < self.__size-1 and c == self.getCell(x, y+1)):
return True
return False
def filled(self):
"""
return true if the game is filled
"""
return len(self.getEmptyCells()) == 0
def addTile(self, value=None, choices=None):
"""
add a random tile in an empty cell
value: value of the tile to add.
choices: a list of possible choices for the value of the tile. if
``None`` (the default), it uses
``[2, 2, 2, 2, 2, 2, 2, 2, 2, 4]``.
"""
if choices is None:
choices = [2] * 9 + [4]
if value:
choices = [value]
v = random.choice(choices)
empty = self.getEmptyCells()
if empty:
x, y = random.choice(empty)
self.setCell(x, y, v)
def getCell(self, x, y):
"""return the cell value at x,y"""
return self.cells[y][x]
def setCell(self, x, y, v):
"""set the cell value at x,y"""
self.cells[y][x] = v
def getLine(self, y):
"""return the y-th line, starting at 0"""
return self.cells[y]
def getCol(self, x):
"""return the x-th column, starting at 0"""
return [self.getCell(x, i) for i in self.__size_range]
def setLine(self, y, l):
"""set the y-th line, starting at 0"""
self.cells[y] = l[:]
def setCol(self, x, l):
"""set the x-th column, starting at 0"""
for i in xrange(0, self.__size):
self.setCell(x, i, l[i])
def __collapseLineOrCol(self, line, d):
"""
Merge tiles in a line or column according to a direction and return a
tuple with the new line and the score for the move on this line
"""
if (d == Board.LEFT or d == Board.UP):
inc = 1
rg = xrange(0, self.__size-1, inc)
else:
inc = -1
rg = xrange(self.__size-1, 0, inc)
pts = 0
for i in rg:
if line[i] == 0:
continue
if line[i] == line[i+inc]:
v = line[i]*2
if v == self.__goal:
self.__won = True
line[i] = v
line[i+inc] = 0
pts += v
return (line, pts)
def __moveLineOrCol(self, line, d):
"""
Move a line or column to a given direction (d)
"""
nl = [c for c in line if c != 0]
if d == Board.UP or d == Board.LEFT:
return nl + [0] * (self.__size - len(nl))
return [0] * (self.__size - len(nl)) + nl
def move(self, d, add_tile=True):
"""
move and return the move score
"""
if d == Board.LEFT or d == Board.RIGHT:
chg, get = self.setLine, self.getLine
elif d == Board.UP or d == Board.DOWN:
chg, get = self.setCol, self.getCol
else:
return 0
moved = False
score = 0
for i in self.__size_range:
# save the original line/col
origin = get(i)
# move it
line = self.__moveLineOrCol(origin, d)
# merge adjacent tiles
collapsed, pts = self.__collapseLineOrCol(line, d)
# move it again (for when tiles are merged, because empty cells are
# inserted in the middle of the line/col)
new = self.__moveLineOrCol(collapsed, d)
# set it back in the board
chg(i, new)
# did it change?
if origin != new:
moved = True
score += pts
# don't add a new tile if nothing changed
if moved and add_tile:
self.addTile()
return score
|
bfontaine/term2048 | term2048/board.py | Board.__collapseLineOrCol | python | def __collapseLineOrCol(self, line, d):
if (d == Board.LEFT or d == Board.UP):
inc = 1
rg = xrange(0, self.__size-1, inc)
else:
inc = -1
rg = xrange(self.__size-1, 0, inc)
pts = 0
for i in rg:
if line[i] == 0:
continue
if line[i] == line[i+inc]:
v = line[i]*2
if v == self.__goal:
self.__won = True
line[i] = v
line[i+inc] = 0
pts += v
return (line, pts) | Merge tiles in a line or column according to a direction and return a
tuple with the new line and the score for the move on this line | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/board.py#L122-L147 | null | class Board(object):
"""
A 2048 board
"""
UP, DOWN, LEFT, RIGHT, PAUSE = 1, 2, 3, 4, 5
GOAL = 2048
SIZE = 4
def __init__(self, goal=GOAL, size=SIZE, **_kwargs):
self.__size = size
self.__size_range = xrange(0, self.__size)
self.__goal = goal
self.__won = False
self.cells = [[0]*self.__size for _ in xrange(self.__size)]
self.addTile()
self.addTile()
def size(self):
"""return the board size"""
return self.__size
def goal(self):
"""return the board goal"""
return self.__goal
def won(self):
"""
return True if the board contains at least one tile with the board goal
"""
return self.__won
def canMove(self):
"""
test if a move is possible
"""
if not self.filled():
return True
for y in self.__size_range:
for x in self.__size_range:
c = self.getCell(x, y)
if (x < self.__size-1 and c == self.getCell(x+1, y)) \
or (y < self.__size-1 and c == self.getCell(x, y+1)):
return True
return False
def filled(self):
"""
return true if the game is filled
"""
return len(self.getEmptyCells()) == 0
def addTile(self, value=None, choices=None):
"""
add a random tile in an empty cell
value: value of the tile to add.
choices: a list of possible choices for the value of the tile. if
``None`` (the default), it uses
``[2, 2, 2, 2, 2, 2, 2, 2, 2, 4]``.
"""
if choices is None:
choices = [2] * 9 + [4]
if value:
choices = [value]
v = random.choice(choices)
empty = self.getEmptyCells()
if empty:
x, y = random.choice(empty)
self.setCell(x, y, v)
def getCell(self, x, y):
"""return the cell value at x,y"""
return self.cells[y][x]
def setCell(self, x, y, v):
"""set the cell value at x,y"""
self.cells[y][x] = v
def getLine(self, y):
"""return the y-th line, starting at 0"""
return self.cells[y]
def getCol(self, x):
"""return the x-th column, starting at 0"""
return [self.getCell(x, i) for i in self.__size_range]
def setLine(self, y, l):
"""set the y-th line, starting at 0"""
self.cells[y] = l[:]
def setCol(self, x, l):
"""set the x-th column, starting at 0"""
for i in xrange(0, self.__size):
self.setCell(x, i, l[i])
def getEmptyCells(self):
"""return a (x, y) pair for each empty cell"""
return [(x, y)
for x in self.__size_range
for y in self.__size_range if self.getCell(x, y) == 0]
def __moveLineOrCol(self, line, d):
"""
Move a line or column to a given direction (d)
"""
nl = [c for c in line if c != 0]
if d == Board.UP or d == Board.LEFT:
return nl + [0] * (self.__size - len(nl))
return [0] * (self.__size - len(nl)) + nl
def move(self, d, add_tile=True):
"""
move and return the move score
"""
if d == Board.LEFT or d == Board.RIGHT:
chg, get = self.setLine, self.getLine
elif d == Board.UP or d == Board.DOWN:
chg, get = self.setCol, self.getCol
else:
return 0
moved = False
score = 0
for i in self.__size_range:
# save the original line/col
origin = get(i)
# move it
line = self.__moveLineOrCol(origin, d)
# merge adjacent tiles
collapsed, pts = self.__collapseLineOrCol(line, d)
# move it again (for when tiles are merged, because empty cells are
# inserted in the middle of the line/col)
new = self.__moveLineOrCol(collapsed, d)
# set it back in the board
chg(i, new)
# did it change?
if origin != new:
moved = True
score += pts
# don't add a new tile if nothing changed
if moved and add_tile:
self.addTile()
return score
|
bfontaine/term2048 | term2048/board.py | Board.__moveLineOrCol | python | def __moveLineOrCol(self, line, d):
nl = [c for c in line if c != 0]
if d == Board.UP or d == Board.LEFT:
return nl + [0] * (self.__size - len(nl))
return [0] * (self.__size - len(nl)) + nl | Move a line or column to a given direction (d) | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/board.py#L149-L156 | null | class Board(object):
"""
A 2048 board
"""
UP, DOWN, LEFT, RIGHT, PAUSE = 1, 2, 3, 4, 5
GOAL = 2048
SIZE = 4
def __init__(self, goal=GOAL, size=SIZE, **_kwargs):
self.__size = size
self.__size_range = xrange(0, self.__size)
self.__goal = goal
self.__won = False
self.cells = [[0]*self.__size for _ in xrange(self.__size)]
self.addTile()
self.addTile()
def size(self):
"""return the board size"""
return self.__size
def goal(self):
"""return the board goal"""
return self.__goal
def won(self):
"""
return True if the board contains at least one tile with the board goal
"""
return self.__won
def canMove(self):
"""
test if a move is possible
"""
if not self.filled():
return True
for y in self.__size_range:
for x in self.__size_range:
c = self.getCell(x, y)
if (x < self.__size-1 and c == self.getCell(x+1, y)) \
or (y < self.__size-1 and c == self.getCell(x, y+1)):
return True
return False
def filled(self):
"""
return true if the game is filled
"""
return len(self.getEmptyCells()) == 0
def addTile(self, value=None, choices=None):
"""
add a random tile in an empty cell
value: value of the tile to add.
choices: a list of possible choices for the value of the tile. if
``None`` (the default), it uses
``[2, 2, 2, 2, 2, 2, 2, 2, 2, 4]``.
"""
if choices is None:
choices = [2] * 9 + [4]
if value:
choices = [value]
v = random.choice(choices)
empty = self.getEmptyCells()
if empty:
x, y = random.choice(empty)
self.setCell(x, y, v)
def getCell(self, x, y):
"""return the cell value at x,y"""
return self.cells[y][x]
def setCell(self, x, y, v):
"""set the cell value at x,y"""
self.cells[y][x] = v
def getLine(self, y):
"""return the y-th line, starting at 0"""
return self.cells[y]
def getCol(self, x):
"""return the x-th column, starting at 0"""
return [self.getCell(x, i) for i in self.__size_range]
def setLine(self, y, l):
"""set the y-th line, starting at 0"""
self.cells[y] = l[:]
def setCol(self, x, l):
"""set the x-th column, starting at 0"""
for i in xrange(0, self.__size):
self.setCell(x, i, l[i])
def getEmptyCells(self):
"""return a (x, y) pair for each empty cell"""
return [(x, y)
for x in self.__size_range
for y in self.__size_range if self.getCell(x, y) == 0]
def __collapseLineOrCol(self, line, d):
"""
Merge tiles in a line or column according to a direction and return a
tuple with the new line and the score for the move on this line
"""
if (d == Board.LEFT or d == Board.UP):
inc = 1
rg = xrange(0, self.__size-1, inc)
else:
inc = -1
rg = xrange(self.__size-1, 0, inc)
pts = 0
for i in rg:
if line[i] == 0:
continue
if line[i] == line[i+inc]:
v = line[i]*2
if v == self.__goal:
self.__won = True
line[i] = v
line[i+inc] = 0
pts += v
return (line, pts)
def move(self, d, add_tile=True):
"""
move and return the move score
"""
if d == Board.LEFT or d == Board.RIGHT:
chg, get = self.setLine, self.getLine
elif d == Board.UP or d == Board.DOWN:
chg, get = self.setCol, self.getCol
else:
return 0
moved = False
score = 0
for i in self.__size_range:
# save the original line/col
origin = get(i)
# move it
line = self.__moveLineOrCol(origin, d)
# merge adjacent tiles
collapsed, pts = self.__collapseLineOrCol(line, d)
# move it again (for when tiles are merged, because empty cells are
# inserted in the middle of the line/col)
new = self.__moveLineOrCol(collapsed, d)
# set it back in the board
chg(i, new)
# did it change?
if origin != new:
moved = True
score += pts
# don't add a new tile if nothing changed
if moved and add_tile:
self.addTile()
return score
|
bfontaine/term2048 | term2048/board.py | Board.move | python | def move(self, d, add_tile=True):
if d == Board.LEFT or d == Board.RIGHT:
chg, get = self.setLine, self.getLine
elif d == Board.UP or d == Board.DOWN:
chg, get = self.setCol, self.getCol
else:
return 0
moved = False
score = 0
for i in self.__size_range:
# save the original line/col
origin = get(i)
# move it
line = self.__moveLineOrCol(origin, d)
# merge adjacent tiles
collapsed, pts = self.__collapseLineOrCol(line, d)
# move it again (for when tiles are merged, because empty cells are
# inserted in the middle of the line/col)
new = self.__moveLineOrCol(collapsed, d)
# set it back in the board
chg(i, new)
# did it change?
if origin != new:
moved = True
score += pts
# don't add a new tile if nothing changed
if moved and add_tile:
self.addTile()
return score | move and return the move score | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/board.py#L158-L193 | [
"def addTile(self, value=None, choices=None):\n \"\"\"\n add a random tile in an empty cell\n value: value of the tile to add.\n choices: a list of possible choices for the value of the tile. if\n ``None`` (the default), it uses\n ``[2, 2, 2, 2, 2, 2, 2, 2, 2, 4]``.\n \"\"\"\n if choices is None:\n choices = [2] * 9 + [4]\n\n if value:\n choices = [value]\n\n v = random.choice(choices)\n empty = self.getEmptyCells()\n if empty:\n x, y = random.choice(empty)\n self.setCell(x, y, v)\n",
"def getLine(self, y):\n \"\"\"return the y-th line, starting at 0\"\"\"\n return self.cells[y]\n",
"def getCol(self, x):\n \"\"\"return the x-th column, starting at 0\"\"\"\n return [self.getCell(x, i) for i in self.__size_range]\n",
"def setLine(self, y, l):\n \"\"\"set the y-th line, starting at 0\"\"\"\n self.cells[y] = l[:]\n",
"def setCol(self, x, l):\n \"\"\"set the x-th column, starting at 0\"\"\"\n for i in xrange(0, self.__size):\n self.setCell(x, i, l[i])\n",
"def __collapseLineOrCol(self, line, d):\n \"\"\"\n Merge tiles in a line or column according to a direction and return a\n tuple with the new line and the score for the move on this line\n \"\"\"\n if (d == Board.LEFT or d == Board.UP):\n inc = 1\n rg = xrange(0, self.__size-1, inc)\n else:\n inc = -1\n rg = xrange(self.__size-1, 0, inc)\n\n pts = 0\n for i in rg:\n if line[i] == 0:\n continue\n if line[i] == line[i+inc]:\n v = line[i]*2\n if v == self.__goal:\n self.__won = True\n\n line[i] = v\n line[i+inc] = 0\n pts += v\n\n return (line, pts)\n",
"def __moveLineOrCol(self, line, d):\n \"\"\"\n Move a line or column to a given direction (d)\n \"\"\"\n nl = [c for c in line if c != 0]\n if d == Board.UP or d == Board.LEFT:\n return nl + [0] * (self.__size - len(nl))\n return [0] * (self.__size - len(nl)) + nl\n"
] | class Board(object):
"""
A 2048 board
"""
UP, DOWN, LEFT, RIGHT, PAUSE = 1, 2, 3, 4, 5
GOAL = 2048
SIZE = 4
def __init__(self, goal=GOAL, size=SIZE, **_kwargs):
self.__size = size
self.__size_range = xrange(0, self.__size)
self.__goal = goal
self.__won = False
self.cells = [[0]*self.__size for _ in xrange(self.__size)]
self.addTile()
self.addTile()
def size(self):
"""return the board size"""
return self.__size
def goal(self):
"""return the board goal"""
return self.__goal
def won(self):
"""
return True if the board contains at least one tile with the board goal
"""
return self.__won
def canMove(self):
"""
test if a move is possible
"""
if not self.filled():
return True
for y in self.__size_range:
for x in self.__size_range:
c = self.getCell(x, y)
if (x < self.__size-1 and c == self.getCell(x+1, y)) \
or (y < self.__size-1 and c == self.getCell(x, y+1)):
return True
return False
def filled(self):
"""
return true if the game is filled
"""
return len(self.getEmptyCells()) == 0
def addTile(self, value=None, choices=None):
"""
add a random tile in an empty cell
value: value of the tile to add.
choices: a list of possible choices for the value of the tile. if
``None`` (the default), it uses
``[2, 2, 2, 2, 2, 2, 2, 2, 2, 4]``.
"""
if choices is None:
choices = [2] * 9 + [4]
if value:
choices = [value]
v = random.choice(choices)
empty = self.getEmptyCells()
if empty:
x, y = random.choice(empty)
self.setCell(x, y, v)
def getCell(self, x, y):
"""return the cell value at x,y"""
return self.cells[y][x]
def setCell(self, x, y, v):
"""set the cell value at x,y"""
self.cells[y][x] = v
def getLine(self, y):
"""return the y-th line, starting at 0"""
return self.cells[y]
def getCol(self, x):
"""return the x-th column, starting at 0"""
return [self.getCell(x, i) for i in self.__size_range]
def setLine(self, y, l):
"""set the y-th line, starting at 0"""
self.cells[y] = l[:]
def setCol(self, x, l):
"""set the x-th column, starting at 0"""
for i in xrange(0, self.__size):
self.setCell(x, i, l[i])
def getEmptyCells(self):
"""return a (x, y) pair for each empty cell"""
return [(x, y)
for x in self.__size_range
for y in self.__size_range if self.getCell(x, y) == 0]
def __collapseLineOrCol(self, line, d):
"""
Merge tiles in a line or column according to a direction and return a
tuple with the new line and the score for the move on this line
"""
if (d == Board.LEFT or d == Board.UP):
inc = 1
rg = xrange(0, self.__size-1, inc)
else:
inc = -1
rg = xrange(self.__size-1, 0, inc)
pts = 0
for i in rg:
if line[i] == 0:
continue
if line[i] == line[i+inc]:
v = line[i]*2
if v == self.__goal:
self.__won = True
line[i] = v
line[i+inc] = 0
pts += v
return (line, pts)
def __moveLineOrCol(self, line, d):
"""
Move a line or column to a given direction (d)
"""
nl = [c for c in line if c != 0]
if d == Board.UP or d == Board.LEFT:
return nl + [0] * (self.__size - len(nl))
return [0] * (self.__size - len(nl)) + nl
|
bfontaine/term2048 | term2048/ui.py | parse_cli_args | python | def parse_cli_args():
parser = argparse.ArgumentParser(description='2048 in your terminal')
parser.add_argument('--mode', dest='mode', type=str,
default=None, help='colors mode (dark or light)')
parser.add_argument('--az', dest='azmode', action='store_true',
help='Use the letters a-z instead of numbers')
parser.add_argument('--resume', dest='resume', action='store_true',
help='restart the game from where you left')
parser.add_argument('-v', '--version', action='store_true')
parser.add_argument('-r', '--rules', action='store_true')
return vars(parser.parse_args()) | parse args from the CLI and return a dict | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/ui.py#L30-L41 | null | # -*- coding: UTF-8 -*-
"""
UI-related functions
"""
from __future__ import print_function
import sys
import argparse
from term2048.game import Game
def print_version_and_exit():
"""print term2048's current version and exit"""
from term2048 import __version__
print("term2048 v%s" % __version__)
sys.exit(0)
def print_rules_and_exit():
"""print 2048's rules and exit"""
print("""Use your arrow keys to move the tiles.
When two tiles with the same value touch they merge into one with the sum of
their value! Try to reach 2048 to win.""")
sys.exit(0)
def start_game(debug=False):
"""
Start a new game. If ``debug`` is set to ``True``, the game object is
returned and the game loop isn't fired.
"""
args = parse_cli_args()
if args['version']:
print_version_and_exit()
if args['rules']:
print_rules_and_exit()
game = Game(**args)
if args['resume']:
game.restore()
if debug:
return game
return game.loop()
|
bfontaine/term2048 | term2048/ui.py | start_game | python | def start_game(debug=False):
args = parse_cli_args()
if args['version']:
print_version_and_exit()
if args['rules']:
print_rules_and_exit()
game = Game(**args)
if args['resume']:
game.restore()
if debug:
return game
return game.loop() | Start a new game. If ``debug`` is set to ``True``, the game object is
returned and the game loop isn't fired. | train | https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/ui.py#L44-L64 | [
"def print_version_and_exit():\n \"\"\"print term2048's current version and exit\"\"\"\n from term2048 import __version__\n print(\"term2048 v%s\" % __version__)\n sys.exit(0)\n",
"def print_rules_and_exit():\n \"\"\"print 2048's rules and exit\"\"\"\n print(\"\"\"Use your arrow keys to move the tiles.\nWhen two tiles with the same value touch they merge into one with the sum of\ntheir value! Try to reach 2048 to win.\"\"\")\n sys.exit(0)\n",
"def parse_cli_args():\n \"\"\"parse args from the CLI and return a dict\"\"\"\n parser = argparse.ArgumentParser(description='2048 in your terminal')\n parser.add_argument('--mode', dest='mode', type=str,\n default=None, help='colors mode (dark or light)')\n parser.add_argument('--az', dest='azmode', action='store_true',\n help='Use the letters a-z instead of numbers')\n parser.add_argument('--resume', dest='resume', action='store_true',\n help='restart the game from where you left')\n parser.add_argument('-v', '--version', action='store_true')\n parser.add_argument('-r', '--rules', action='store_true')\n return vars(parser.parse_args())\n",
"def restore(self):\n \"\"\"\n restore the saved game score and data\n \"\"\"\n\n size = self.board.SIZE\n\n try:\n with open(self.store_file, 'r') as f:\n lines = f.readlines()\n score_str = lines[0]\n self.score = int(lines[1])\n except:\n return False\n\n score_str_list = score_str.split(' ')\n count = 0\n\n for i in range(size):\n for j in range(size):\n value = score_str_list[count]\n self.board.setCell(j, i, int(value))\n count += 1\n\n return True\n",
"def loop(self):\n \"\"\"\n main game loop. returns the final score.\n \"\"\"\n pause_key = self.board.PAUSE\n margins = {'left': 4, 'top': 4, 'bottom': 4}\n\n atexit.register(self.showCursor)\n\n try:\n self.hideCursor()\n while True:\n self.clearScreen()\n print(self.__str__(margins=margins))\n if self.board.won() or not self.board.canMove():\n break\n m = self.readMove()\n\n if m == pause_key:\n self.saveBestScore()\n if self.store():\n print(\"Game successfully saved. \"\n \"Resume it with `term2048 --resume`.\")\n return self.score\n\n print(\"An error ocurred while saving your game.\")\n return None\n\n self.incScore(self.board.move(m))\n\n except KeyboardInterrupt:\n self.saveBestScore()\n return None\n\n self.saveBestScore()\n print('You won!' if self.board.won() else 'Game Over')\n return self.score\n"
] | # -*- coding: UTF-8 -*-
"""
UI-related functions
"""
from __future__ import print_function
import sys
import argparse
from term2048.game import Game
def print_version_and_exit():
"""print term2048's current version and exit"""
from term2048 import __version__
print("term2048 v%s" % __version__)
sys.exit(0)
def print_rules_and_exit():
"""print 2048's rules and exit"""
print("""Use your arrow keys to move the tiles.
When two tiles with the same value touch they merge into one with the sum of
their value! Try to reach 2048 to win.""")
sys.exit(0)
def parse_cli_args():
"""parse args from the CLI and return a dict"""
parser = argparse.ArgumentParser(description='2048 in your terminal')
parser.add_argument('--mode', dest='mode', type=str,
default=None, help='colors mode (dark or light)')
parser.add_argument('--az', dest='azmode', action='store_true',
help='Use the letters a-z instead of numbers')
parser.add_argument('--resume', dest='resume', action='store_true',
help='restart the game from where you left')
parser.add_argument('-v', '--version', action='store_true')
parser.add_argument('-r', '--rules', action='store_true')
return vars(parser.parse_args())
|
charnley/rmsd | rmsd/calculate_rmsd.py | rmsd | python | def rmsd(V, W):
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N) | Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L40-L61 | null | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | kabsch_rmsd | python | def kabsch_rmsd(P, Q, translate=False):
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q) | Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L64-L87 | [
"def centroid(X):\n \"\"\"\n Centroid is the mean position of all the points in all of the coordinate\n directions, from a vectorset X.\n\n https://en.wikipedia.org/wiki/Centroid\n\n C = sum(X)/len(X)\n\n Parameters\n ----------\n X : array\n (N,D) matrix, where N is points and D is dimension.\n\n Returns\n -------\n C : float\n centroid\n \"\"\"\n C = X.mean(axis=0)\n return C\n",
"def kabsch_rotate(P, Q):\n \"\"\"\n Rotate matrix P unto matrix Q using Kabsch algorithm.\n\n Parameters\n ----------\n P : array\n (N,D) matrix, where N is points and D is dimension.\n Q : array\n (N,D) matrix, where N is points and D is dimension.\n\n Returns\n -------\n P : array\n (N,D) matrix, where N is points and D is dimension,\n rotated\n\n \"\"\"\n U = kabsch(P, Q)\n\n # Rotate P\n P = np.dot(P, U)\n return P\n",
"def rmsd(V, W):\n \"\"\"\n Calculate Root-mean-square deviation from two sets of vectors V and W.\n\n Parameters\n ----------\n V : array\n (N,D) matrix, where N is points and D is dimension.\n W : array\n (N,D) matrix, where N is points and D is dimension.\n\n Returns\n -------\n rmsd : float\n Root-mean-square deviation between the two vectors\n \"\"\"\n D = len(V[0])\n N = len(V)\n result = 0.0\n for v, w in zip(V, W):\n result += sum([(v[i] - w[i])**2.0 for i in range(D)])\n return np.sqrt(result/N)\n"
] | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | kabsch_rotate | python | def kabsch_rotate(P, Q):
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P | Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L90-L112 | [
"def kabsch(P, Q):\n \"\"\"\n Using the Kabsch algorithm with two sets of paired point P and Q, centered\n around the centroid. Each vector set is represented as an NxD\n matrix, where D is the the dimension of the space.\n\n The algorithm works in three steps:\n - a centroid translation of P and Q (assumed done before this function\n call)\n - the computation of a covariance matrix C\n - computation of the optimal rotation matrix U\n\n For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm\n\n Parameters\n ----------\n P : array\n (N,D) matrix, where N is points and D is dimension.\n Q : array\n (N,D) matrix, where N is points and D is dimension.\n\n Returns\n -------\n U : matrix\n Rotation matrix (D,D)\n \"\"\"\n\n # Computation of the covariance matrix\n C = np.dot(np.transpose(P), Q)\n\n # Computation of the optimal rotation matrix\n # This can be done using singular value decomposition (SVD)\n # Getting the sign of the det(V)*(W) to decide\n # whether we need to correct our rotation matrix to ensure a\n # right-handed coordinate system.\n # And finally calculating the optimal rotation matrix U\n # see http://en.wikipedia.org/wiki/Kabsch_algorithm\n V, S, W = np.linalg.svd(C)\n d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0\n\n if d:\n S[-1] = -S[-1]\n V[:, -1] = -V[:, -1]\n\n # Create Rotation matrix U\n U = np.dot(V, W)\n\n return U\n"
] | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | kabsch | python | def kabsch(P, Q):
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U | Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D) | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L115-L162 | null | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | quaternion_rmsd | python | def quaternion_rmsd(P, Q):
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q) | Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L165-L183 | [
"def quaternion_rotate(X, Y):\n \"\"\"\n Calculate the rotation\n\n Parameters\n ----------\n X : array\n (N,D) matrix, where N is points and D is dimension.\n Y: array\n (N,D) matrix, where N is points and D is dimension.\n\n Returns\n -------\n rot : matrix\n Rotation matrix (D,D)\n \"\"\"\n N = X.shape[0]\n W = np.asarray([makeW(*Y[k]) for k in range(N)])\n Q = np.asarray([makeQ(*X[k]) for k in range(N)])\n Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])\n W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])\n A = np.sum(Qt_dot_W, axis=0)\n eigen = np.linalg.eigh(A)\n r = eigen[1][:, eigen[0].argmax()]\n rot = quaternion_transform(r)\n return rot\n",
"def rmsd(V, W):\n \"\"\"\n Calculate Root-mean-square deviation from two sets of vectors V and W.\n\n Parameters\n ----------\n V : array\n (N,D) matrix, where N is points and D is dimension.\n W : array\n (N,D) matrix, where N is points and D is dimension.\n\n Returns\n -------\n rmsd : float\n Root-mean-square deviation between the two vectors\n \"\"\"\n D = len(V[0])\n N = len(V)\n result = 0.0\n for v, w in zip(V, W):\n result += sum([(v[i] - w[i])**2.0 for i in range(D)])\n return np.sqrt(result/N)\n"
] | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | quaternion_transform | python | def quaternion_transform(r):
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot | Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L186-L195 | [
"def makeW(r1, r2, r3, r4=0):\n \"\"\"\n matrix involved in quaternion rotation\n \"\"\"\n W = np.asarray([\n [r4, r3, -r2, r1],\n [-r3, r4, r1, r2],\n [r2, -r1, r4, r3],\n [-r1, -r2, -r3, r4]])\n return W\n",
"def makeQ(r1, r2, r3, r4=0):\n \"\"\"\n matrix involved in quaternion rotation\n \"\"\"\n Q = np.asarray([\n [r4, -r3, r2, r1],\n [r3, r4, -r1, r2],\n [-r2, r1, r4, r3],\n [-r1, -r2, -r3, r4]])\n return Q\n"
] | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | makeW | python | def makeW(r1, r2, r3, r4=0):
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W | matrix involved in quaternion rotation | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L198-L207 | null | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | makeQ | python | def makeQ(r1, r2, r3, r4=0):
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q | matrix involved in quaternion rotation | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L210-L219 | null | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | quaternion_rotate | python | def quaternion_rotate(X, Y):
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot | Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D) | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L222-L247 | [
"def quaternion_transform(r):\n \"\"\"\n Get optimal rotation\n note: translation will be zero when the centroids of each molecule are the\n same\n \"\"\"\n Wt_r = makeW(*r).T\n Q_r = makeQ(*r)\n rot = Wt_r.dot(Q_r)[:3, :3]\n return rot\n"
] | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | reorder_distance | python | def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder | Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered) | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L273-L319 | null | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | hungarian | python | def hungarian(A, B):
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b | Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L322-L336 | null | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | generate_permutations | python | def generate_permutations(elements, n):
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1 | Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L383-L403 | null | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | brute_permutation | python | def brute_permutation(A, B):
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min | Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L406-L448 | [
"def generate_permutations(elements, n):\n \"\"\"\n Heap's algorithm for generating all n! permutations in a list\n https://en.wikipedia.org/wiki/Heap%27s_algorithm\n\n \"\"\"\n c = [0] * n\n yield elements\n i = 0\n while i < n:\n if c[i] < i:\n if i % 2 == 0:\n elements[0], elements[i] = elements[i], elements[0]\n else:\n elements[c[i]], elements[i] = elements[i], elements[c[i]]\n yield elements\n c[i] += 1\n i = 0\n else:\n c[i] = 0\n i += 1\n",
"def kabsch_rmsd(P, Q, translate=False):\n \"\"\"\n Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.\n\n Parameters\n ----------\n P : array\n (N,D) matrix, where N is points and D is dimension.\n Q : array\n (N,D) matrix, where N is points and D is dimension.\n translate : bool\n Use centroids to translate vector P and Q unto each other.\n\n Returns\n -------\n rmsd : float\n root-mean squared deviation\n \"\"\"\n if translate:\n Q = Q - centroid(Q)\n P = P - centroid(P)\n\n P = kabsch_rotate(P, Q)\n return rmsd(P, Q)\n"
] | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | reorder_brute | python | def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder | Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L451-L492 | [
"def brute_permutation(A, B):\n \"\"\"\n Re-orders the input atom list and xyz coordinates using the brute force\n method of permuting all rows of the input coordinates\n\n Parameters\n ----------\n A : array\n (N,D) matrix, where N is points and D is dimension\n B : array\n (N,D) matrix, where N is points and D is dimension\n\n Returns\n -------\n view : array\n (N,1) matrix, reordered view of B projected to A\n \"\"\"\n\n rmsd_min = np.inf\n view_min = None\n\n # Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in\n # brute-force method\n\n num_atoms = A.shape[0]\n initial_order = list(range(num_atoms))\n\n for reorder_indices in generate_permutations(initial_order, num_atoms):\n\n # Re-order the atom array and coordinate matrix\n coords_ordered = B[reorder_indices]\n\n # Calculate the RMSD between structure 1 and the Hungarian re-ordered\n # structure 2\n rmsd_temp = kabsch_rmsd(A, coords_ordered)\n\n # Replaces the atoms and coordinates with the current structure if the\n # RMSD is lower\n if rmsd_temp < rmsd_min:\n rmsd_min = rmsd_temp\n view_min = copy.deepcopy(reorder_indices)\n\n return view_min\n"
] | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | check_reflections | python | def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review | Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L495-L564 | [
"def centroid(X):\n \"\"\"\n Centroid is the mean position of all the points in all of the coordinate\n directions, from a vectorset X.\n\n https://en.wikipedia.org/wiki/Centroid\n\n C = sum(X)/len(X)\n\n Parameters\n ----------\n X : array\n (N,D) matrix, where N is points and D is dimension.\n\n Returns\n -------\n C : float\n centroid\n \"\"\"\n C = X.mean(axis=0)\n return C\n",
"def kabsch_rmsd(P, Q, translate=False):\n \"\"\"\n Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.\n\n Parameters\n ----------\n P : array\n (N,D) matrix, where N is points and D is dimension.\n Q : array\n (N,D) matrix, where N is points and D is dimension.\n translate : bool\n Use centroids to translate vector P and Q unto each other.\n\n Returns\n -------\n rmsd : float\n root-mean squared deviation\n \"\"\"\n if translate:\n Q = Q - centroid(Q)\n P = P - centroid(P)\n\n P = kabsch_rotate(P, Q)\n return rmsd(P, Q)\n",
"def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):\n \"\"\"\n Re-orders the input atom list and xyz coordinates using the Hungarian\n method (using optimized column results)\n\n Parameters\n ----------\n p_atoms : array\n (N,1) matrix, where N is points holding the atoms' names\n p_atoms : array\n (N,1) matrix, where N is points holding the atoms' names\n p_coord : array\n (N,D) matrix, where N is points and D is dimension\n q_coord : array\n (N,D) matrix, where N is points and D is dimension\n\n Returns\n -------\n view_reorder : array\n (N,1) matrix, reordered indexes of atom alignment based on the\n coordinates of the atoms\n\n \"\"\"\n\n # Find unique atoms\n unique_atoms = np.unique(p_atoms)\n\n # generate full view from q shape to fill in atom view on the fly\n view_reorder = np.zeros(q_atoms.shape, dtype=int)\n view_reorder -= 1\n\n for atom in unique_atoms:\n p_atom_idx, = np.where(p_atoms == atom)\n q_atom_idx, = np.where(q_atoms == atom)\n\n A_coord = p_coord[p_atom_idx]\n B_coord = q_coord[q_atom_idx]\n\n view = hungarian(A_coord, B_coord)\n view_reorder[p_atom_idx] = q_atom_idx[view]\n\n return view_reorder\n",
"def rmsd(V, W):\n \"\"\"\n Calculate Root-mean-square deviation from two sets of vectors V and W.\n\n Parameters\n ----------\n V : array\n (N,D) matrix, where N is points and D is dimension.\n W : array\n (N,D) matrix, where N is points and D is dimension.\n\n Returns\n -------\n rmsd : float\n Root-mean-square deviation between the two vectors\n \"\"\"\n D = len(V[0])\n N = len(V)\n result = 0.0\n for v, w in zip(V, W):\n result += sum([(v[i] - w[i])**2.0 for i in range(D)])\n return np.sqrt(result/N)\n"
] | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | set_coordinates | python | def set_coordinates(atoms, V, title="", decimals=8):
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out) | Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L567-L600 | null | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | print_coordinates | python | def print_coordinates(atoms, V, title=""):
print(set_coordinates(atoms, V, title=title))
return | Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L603-L620 | [
"def set_coordinates(atoms, V, title=\"\", decimals=8):\n \"\"\"\n Print coordinates V with corresponding atoms to stdout in XYZ format.\n Parameters\n ----------\n atoms : list\n List of atomic types\n V : array\n (N,3) matrix of atomic coordinates\n title : string (optional)\n Title of molecule\n decimals : int (optional)\n number of decimals for the coordinates\n\n Return\n ------\n output : str\n Molecule in XYZ format\n\n \"\"\"\n N, D = V.shape\n\n fmt = \"{:2s}\" + (\" {:15.\"+str(decimals)+\"f}\")*3\n\n out = list()\n out += [str(N)]\n out += [title]\n\n for i in range(N):\n atom = atoms[i]\n atom = atom[0].upper() + atom[1:]\n out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]\n\n return \"\\n\".join(out)\n"
] | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | get_coordinates | python | def get_coordinates(filename, fmt):
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename) | Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L623-L646 | [
"def get_coordinates_pdb(filename):\n \"\"\"\n Get coordinates from the first chain in a pdb file\n and return a vectorset with all the coordinates.\n\n Parameters\n ----------\n filename : string\n Filename to read\n\n Returns\n -------\n atoms : list\n List of atomic types\n V : array\n (N,3) where N is number of atoms\n \"\"\"\n\n # PDB files tend to be a bit of a mess. The x, y and z coordinates\n # are supposed to be in column 31-38, 39-46 and 47-54, but this is\n # not always the case.\n # Because of this the three first columns containing a decimal is used.\n # Since the format doesn't require a space between columns, we use the\n # above column indices as a fallback.\n\n x_column = None\n V = list()\n\n # Same with atoms and atom naming.\n # The most robust way to do this is probably\n # to assume that the atomtype is given in column 3.\n\n atoms = list()\n\n with open(filename, 'r') as f:\n lines = f.readlines()\n for line in lines:\n if line.startswith(\"TER\") or line.startswith(\"END\"):\n break\n if line.startswith(\"ATOM\"):\n tokens = line.split()\n # Try to get the atomtype\n try:\n atom = tokens[2][0]\n if atom in (\"H\", \"C\", \"N\", \"O\", \"S\", \"P\"):\n atoms.append(atom)\n else:\n # e.g. 1HD1\n atom = tokens[2][1]\n if atom == \"H\":\n atoms.append(atom)\n else:\n raise Exception\n except:\n exit(\"error: Parsing atomtype for the following line: \\n{0:s}\".format(line))\n\n if x_column == None:\n try:\n # look for x column\n for i, x in enumerate(tokens):\n if \".\" in x and \".\" in tokens[i + 1] and \".\" in tokens[i + 2]:\n x_column = i\n break\n except IndexError:\n exit(\"error: Parsing coordinates for the following line: \\n{0:s}\".format(line))\n # Try to read the coordinates\n try:\n V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))\n except:\n # If that doesn't work, use hardcoded indices\n try:\n x = line[30:38]\n y = line[38:46]\n z = line[46:54]\n V.append(np.asarray([x, y ,z], dtype=float))\n except:\n exit(\"error: Parsing input for the following line: \\n{0:s}\".format(line))\n\n\n V = np.asarray(V)\n atoms = np.asarray(atoms)\n\n assert V.shape[0] == atoms.size\n\n return atoms, V\n",
"def get_coordinates_xyz(filename):\n \"\"\"\n Get coordinates from filename and return a vectorset with all the\n coordinates, in XYZ format.\n\n Parameters\n ----------\n filename : string\n Filename to read\n\n Returns\n -------\n atoms : list\n List of atomic types\n V : array\n (N,3) where N is number of atoms\n \"\"\"\n\n f = open(filename, 'r')\n V = list()\n atoms = list()\n n_atoms = 0\n\n # Read the first line to obtain the number of atoms to read\n try:\n n_atoms = int(f.readline())\n except ValueError:\n exit(\"error: Could not obtain the number of atoms in the .xyz file.\")\n\n # Skip the title line\n f.readline()\n\n # Use the number of atoms to not read beyond the end of a file\n for lines_read, line in enumerate(f):\n\n if lines_read == n_atoms:\n break\n\n atom = re.findall(r'[a-zA-Z]+', line)[0]\n atom = atom.upper()\n\n numbers = re.findall(r'[-]?\\d+\\.\\d*(?:[Ee][-\\+]\\d+)?', line)\n numbers = [float(number) for number in numbers]\n\n # The numbers are not valid unless we obtain exacly three\n if len(numbers) >= 3:\n V.append(np.array(numbers)[:3])\n atoms.append(atom)\n else:\n exit(\"Reading the .xyz file failed in line {0}. Please check the format.\".format(lines_read + 2))\n\n f.close()\n atoms = np.array(atoms)\n V = np.array(V)\n return atoms, V\n"
] | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | get_coordinates_pdb | python | def get_coordinates_pdb(filename):
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V | Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L649-L733 | null | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_xyz(filename):
"""
Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
charnley/rmsd | rmsd/calculate_rmsd.py | get_coordinates_xyz | python | def get_coordinates_xyz(filename):
f = open(filename, 'r')
V = list()
atoms = list()
n_atoms = 0
# Read the first line to obtain the number of atoms to read
try:
n_atoms = int(f.readline())
except ValueError:
exit("error: Could not obtain the number of atoms in the .xyz file.")
# Skip the title line
f.readline()
# Use the number of atoms to not read beyond the end of a file
for lines_read, line in enumerate(f):
if lines_read == n_atoms:
break
atom = re.findall(r'[a-zA-Z]+', line)[0]
atom = atom.upper()
numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line)
numbers = [float(number) for number in numbers]
# The numbers are not valid unless we obtain exacly three
if len(numbers) >= 3:
V.append(np.array(numbers)[:3])
atoms.append(atom)
else:
exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2))
f.close()
atoms = np.array(atoms)
V = np.array(V)
return atoms, V | Get coordinates from filename and return a vectorset with all the
coordinates, in XYZ format.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms | train | https://github.com/charnley/rmsd/blob/cd8af499fb63529a1b5b1f880fdb2dab2731544a/rmsd/calculate_rmsd.py#L736-L790 | null | #!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A = np.argsort(A_norms)
reorder_indices_B = np.argsort(B_norms)
# Project the order of P onto Q
translator = np.argsort(reorder_indices_A)
view = reorder_indices_B[translator]
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def hungarian(A, B):
"""
Hungarian reordering.
Assume A and B are coordinates for atoms of SAME type only
"""
# should be kabasch here i think
distances = cdist(A, B, 'euclidean')
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a, indices_b = linear_sum_assignment(distances)
return indices_b
def reorder_hungarian(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using the Hungarian
method (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = hungarian(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def generate_permutations(elements, n):
"""
Heap's algorithm for generating all n! permutations in a list
https://en.wikipedia.org/wiki/Heap%27s_algorithm
"""
c = [0] * n
yield elements
i = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
elements[0], elements[i] = elements[i], elements[0]
else:
elements[c[i]], elements[i] = elements[i], elements[c[i]]
yield elements
c[i] += 1
i = 0
else:
c[i] = 0
i += 1
def brute_permutation(A, B):
"""
Re-orders the input atom list and xyz coordinates using the brute force
method of permuting all rows of the input coordinates
Parameters
----------
A : array
(N,D) matrix, where N is points and D is dimension
B : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view : array
(N,1) matrix, reordered view of B projected to A
"""
rmsd_min = np.inf
view_min = None
# Sets initial ordering for row indices to [0, 1, 2, ..., len(A)], used in
# brute-force method
num_atoms = A.shape[0]
initial_order = list(range(num_atoms))
for reorder_indices in generate_permutations(initial_order, num_atoms):
# Re-order the atom array and coordinate matrix
coords_ordered = B[reorder_indices]
# Calculate the RMSD between structure 1 and the Hungarian re-ordered
# structure 2
rmsd_temp = kabsch_rmsd(A, coords_ordered)
# Replaces the atoms and coordinates with the current structure if the
# RMSD is lower
if rmsd_temp < rmsd_min:
rmsd_min = rmsd_temp
view_min = copy.deepcopy(reorder_indices)
return view_min
def reorder_brute(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates using all permutation of
rows (using optimized column results)
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
view_reorder : array
(N,1) matrix, reordered indexes of atom alignment based on the
coordinates of the atoms
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
view_reorder -= 1
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
view = brute_permutation(A_coord, B_coord)
view_reorder[p_atom_idx] = q_atom_idx[view]
return view_reorder
def check_reflections(p_atoms, q_atoms, p_coord, q_coord,
reorder_method=reorder_hungarian,
rotation_method=kabsch_rmsd,
keep_stereo=False):
"""
Minimize RMSD using reflection planes for molecule P and Q
Warning: This will affect stereo-chemistry
Parameters
----------
p_atoms : array
(N,1) matrix, where N is points holding the atoms' names
q_atoms : array
(N,1) matrix, where N is points holding the atoms' names
p_coord : array
(N,D) matrix, where N is points and D is dimension
q_coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
min_rmsd
min_swap
min_reflection
min_review
"""
min_rmsd = np.inf
min_swap = None
min_reflection = None
min_review = None
tmp_review = None
swap_mask = [1,-1,-1,1,-1,1]
reflection_mask = [1,-1,-1,-1,1,1,1,-1]
for swap, i in zip(AXIS_SWAPS, swap_mask):
for reflection, j in zip(AXIS_REFLECTIONS, reflection_mask):
if keep_stereo and i * j == -1: continue # skip enantiomers
tmp_atoms = copy.copy(q_atoms)
tmp_coord = copy.deepcopy(q_coord)
tmp_coord = tmp_coord[:, swap]
tmp_coord = np.dot(tmp_coord, np.diag(reflection))
tmp_coord -= centroid(tmp_coord)
# Reorder
if reorder_method is not None:
tmp_review = reorder_method(p_atoms, tmp_atoms, p_coord, tmp_coord)
tmp_coord = tmp_coord[tmp_review]
tmp_atoms = tmp_atoms[tmp_review]
# Rotation
if rotation_method is None:
this_rmsd = rmsd(p_coord, tmp_coord)
else:
this_rmsd = rotation_method(p_coord, tmp_coord)
if this_rmsd < min_rmsd:
min_rmsd = this_rmsd
min_swap = swap
min_reflection = reflection
min_review = tmp_review
if not (p_atoms == q_atoms[min_review]).all():
print("error: Not aligned")
quit()
return min_rmsd, min_swap, min_reflection, min_review
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out)
def print_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of element types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
print(set_coordinates(atoms, V, title=title))
return
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
get_func = get_coordinates_xyz
elif fmt == "pdb":
get_func = get_coordinates_pdb
else:
exit("Could not recognize file format: {:s}".format(fmt))
return get_func(filename)
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("error: Parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("error: Parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y ,z], dtype=float))
except:
exit("error: Parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert V.shape[0] == atoms.size
return atoms, V
def main():
import argparse
import sys
description = __doc__
version_msg = """
rmsd {}
See https://github.com/charnley/rmsd for citation information
"""
version_msg = version_msg.format(__version__)
epilog = """
"""
parser = argparse.ArgumentParser(
usage='calculate_rmsd [options] FILE_A FILE_B',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
# Input structures
parser.add_argument('structure_a', metavar='FILE_A', type=str, help='structures in .xyz or .pdb format')
parser.add_argument('structure_b', metavar='FILE_B', type=str)
# Admin
parser.add_argument('-v', '--version', action='version', version=version_msg)
# Rotation
parser.add_argument('-r', '--rotation', action='store', default="kabsch", help='select rotation method. "kabsch" (default), "quaternion" or "none"', metavar="METHOD")
# Reorder arguments
parser.add_argument('-e', '--reorder', action='store_true', help='align the atoms of molecules (default: Hungarian)')
parser.add_argument('--reorder-method', action='store', default="hungarian", metavar="METHOD", help='select which reorder method to use; hungarian (default), brute, distance')
parser.add_argument('--use-reflections', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). This will affect stereo-chemistry.')
parser.add_argument('--use-reflections-keep-stereo', action='store_true', help='scan through reflections in planes (eg Y transformed to -Y -> X, -Y, Z) and axis changes, (eg X and Z coords exchanged -> Z, Y, X). Stereo-chemistry will be kept.')
# Filter
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-nh', '--no-hydrogen', action='store_true', help='ignore hydrogens when calculating RMSD')
index_group.add_argument('--remove-idx', nargs='+', type=int, help='index list of atoms NOT to consider', metavar='IDX')
index_group.add_argument('--add-idx', nargs='+', type=int, help='index list of atoms to consider', metavar='IDX')
# format and print
parser.add_argument('--format', action='store', help='format of input files. valid format are xyz and pdb', metavar='FMT')
parser.add_argument('-p', '--output', '--print', action='store_true', help='print out structure B, centered and rotated unto structure A\'s coordinates in XYZ format')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# As default, load the extension as format
if args.format is None:
args.format = args.structure_a.split('.')[-1]
p_all_atoms, p_all = get_coordinates(args.structure_a, args.format)
q_all_atoms, q_all = get_coordinates(args.structure_b, args.format)
p_size = p_all.shape[0]
q_size = q_all.shape[0]
if not p_size == q_size:
print("error: Structures not same size")
quit()
if np.count_nonzero(p_all_atoms != q_all_atoms) and not args.reorder:
msg = """
error: Atoms are not in the same order.
Use --reorder to align the atoms (can be expensive for large structures).
Please see --help or documentation for more information or
https://github.com/charnley/rmsd for further examples.
"""
print(msg)
exit()
# Set local view
p_view = None
q_view = None
if args.no_hydrogen:
p_view = np.where(p_all_atoms != 'H')
q_view = np.where(q_all_atoms != 'H')
elif args.remove_idx:
index = range(p_size)
index = set(index) - set(args.remove_idx)
index = list(index)
p_view = index
q_view = index
elif args.add_idx:
p_view = args.add_idx
q_view = args.add_idx
# Set local view
if p_view is None:
p_coord = copy.deepcopy(p_all)
q_coord = copy.deepcopy(q_all)
p_atoms = copy.deepcopy(p_all_atoms)
q_atoms = copy.deepcopy(q_all_atoms)
else:
if args.reorder and args.output:
print("error: Cannot reorder atoms and print structure, when excluding atoms (such as --no-hydrogen)")
quit()
if args.use_reflections and args.output:
print("error: Cannot use reflections on atoms and print, when excluding atoms (such as --no-hydrogen)")
quit()
p_coord = copy.deepcopy(p_all[p_view])
q_coord = copy.deepcopy(q_all[q_view])
p_atoms = copy.deepcopy(p_all_atoms[p_view])
q_atoms = copy.deepcopy(q_all_atoms[q_view])
# Create the centroid of P and Q which is the geometric center of a
# N-dimensional region and translate P and Q onto that center.
# http://en.wikipedia.org/wiki/Centroid
p_cent = centroid(p_coord)
q_cent = centroid(q_coord)
p_coord -= p_cent
q_coord -= q_cent
# set rotation method
if args.rotation.lower() == "kabsch":
rotation_method = kabsch_rmsd
elif args.rotation.lower() == "quaternion":
rotation_method = quaternion_rmsd
elif args.rotation.lower() == "none":
rotation_method = None
else:
print("error: Unknown rotation method:", args.rotation)
quit()
# set reorder method
if not args.reorder:
reorder_method = None
if args.reorder_method == "hungarian":
reorder_method = reorder_hungarian
elif args.reorder_method == "brute":
reorder_method = reorder_brute
elif args.reorder_method == "distance":
reorder_method = reorder_distance
else:
print("error: Unknown reorder method:", args.reorder_method)
quit()
# Save the resulting RMSD
result_rmsd = None
if args.use_reflections:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method)
elif args.use_reflections_keep_stereo:
result_rmsd, q_swap, q_reflection, q_review = check_reflections(
p_atoms,
q_atoms,
p_coord,
q_coord,
reorder_method=reorder_method,
rotation_method=rotation_method,
keep_stereo=True)
elif args.reorder:
q_review = reorder_method(p_atoms, q_atoms, p_coord, q_coord)
q_coord = q_coord[q_review]
q_atoms = q_atoms[q_review]
if not all(p_atoms == q_atoms):
print("error: Structure not aligned")
quit()
# print result
if args.output:
if args.reorder:
if q_review.shape[0] != q_all.shape[0]:
print("error: Reorder length error. Full atom list needed for --print")
quit()
q_all = q_all[q_review]
q_all_atoms = q_all_atoms[q_review]
# Get rotation matrix
U = kabsch(q_coord, p_coord)
# recenter all atoms and rotate all atoms
q_all -= q_cent
q_all = np.dot(q_all, U)
# center q on p's original coordinates
q_all += p_cent
# done and done
xyz = set_coordinates(q_all_atoms, q_all, title="{} - modified".format(args.structure_b))
print(xyz)
else:
if result_rmsd:
pass
elif rotation_method is None:
result_rmsd = rmsd(p_coord, q_coord)
else:
result_rmsd = rotation_method(p_coord, q_coord)
print("{0}".format(result_rmsd))
return
if __name__ == "__main__":
main()
|
rckclmbr/pyportify | pyportify/gpsoauth/__init__.py | perform_master_login | python | def perform_master_login(email, password, android_id,
service='ac2dm', device_country='us',
operatorCountry='us', lang='en', sdk_version=17):
data = {
'accountType': 'HOSTED_OR_GOOGLE',
'Email': email,
'has_permission': 1,
'add_account': 1,
'EncryptedPasswd':
google.signature(email, password, android_key_7_3_29),
'service': service,
'source': 'android',
'androidId': android_id,
'device_country': device_country,
'operatorCountry': device_country,
'lang': lang,
'sdk_version': sdk_version,
}
return _perform_auth_request(data) | Perform a master login, which is what Android does when you first add a
Google account.
Return a dict, eg::
{
'Auth': '...',
'Email': 'email@gmail.com',
'GooglePlusUpgrade': '1',
'LSID': '...',
'PicasaUser': 'My Name',
'RopRevision': '1',
'RopText': ' ',
'SID': '...',
'Token': 'oauth2rt_1/...',
'firstName': 'My',
'lastName': 'Name',
'services': 'hist,mail,googleme,...'
} | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/gpsoauth/__init__.py#L26-L67 | [
"def signature(email, password, key):\n signature = bytearray(b'\\x00')\n\n struct = key_to_struct(key)\n signature.extend(hashlib.sha1(struct).digest()[:4])\n\n message = (email + u'\\x00' + password).encode('utf-8')\n encrypted_login = rsaes_oaep.encrypt(key, message)\n\n signature.extend(encrypted_login)\n\n return base64.urlsafe_b64encode(signature)\n",
"def _perform_auth_request(data):\n res = requests.post(auth_url, data,\n headers={'User-Agent': useragent})\n\n return google.parse_auth_response(res.text)\n"
] | import requests
from . import google
# The key is distirbuted with Google Play Services.
# This one is from version 7.3.29.
b64_key_7_3_29 = (b"AAAAgMom/1a/v0lblO2Ubrt60J2gcuXSljGFQXgcyZWveWLEwo6prwgi3"
b"iJIZdodyhKZQrNWp5nKJ3srRXcUW+F1BD3baEVGcmEgqaLZUNBjm057pK"
b"RI16kB0YppeGx5qIQ5QjKzsR8ETQbKLNWgRY0QRNVz34kMJR3P/LgHax/"
b"6rmf5AAAAAwEAAQ==")
android_key_7_3_29 = google.key_from_b64(b64_key_7_3_29)
auth_url = 'https://android.clients.google.com/auth'
useragent = 'gpsoauth-portify/1.0'
def _perform_auth_request(data):
res = requests.post(auth_url, data,
headers={'User-Agent': useragent})
return google.parse_auth_response(res.text)
def perform_oauth(email, master_token, android_id, service, app, client_sig,
device_country='us', operatorCountry='us', lang='en',
sdk_version=17):
"""
Use a master token from master_login to perform OAuth to a specific Google
service.
Return a dict, eg::
{
'Auth': '...',
'LSID': '...',
'SID': '..',
'issueAdvice': 'auto',
'services': 'hist,mail,googleme,...'
}
To authenticate requests to this service, include a header
``Authorization: GoogleLogin auth=res['Auth']``.
"""
data = {
'accountType': 'HOSTED_OR_GOOGLE',
'Email': email,
'has_permission': 1,
'EncryptedPasswd': master_token,
'service': service,
'source': 'android',
'androidId': android_id,
'app': app,
'client_sig': client_sig,
'device_country': device_country,
'operatorCountry': device_country,
'lang': lang,
'sdk_version': sdk_version
}
return _perform_auth_request(data)
|
rckclmbr/pyportify | pyportify/gpsoauth/__init__.py | perform_oauth | python | def perform_oauth(email, master_token, android_id, service, app, client_sig,
device_country='us', operatorCountry='us', lang='en',
sdk_version=17):
data = {
'accountType': 'HOSTED_OR_GOOGLE',
'Email': email,
'has_permission': 1,
'EncryptedPasswd': master_token,
'service': service,
'source': 'android',
'androidId': android_id,
'app': app,
'client_sig': client_sig,
'device_country': device_country,
'operatorCountry': device_country,
'lang': lang,
'sdk_version': sdk_version
}
return _perform_auth_request(data) | Use a master token from master_login to perform OAuth to a specific Google
service.
Return a dict, eg::
{
'Auth': '...',
'LSID': '...',
'SID': '..',
'issueAdvice': 'auto',
'services': 'hist,mail,googleme,...'
}
To authenticate requests to this service, include a header
``Authorization: GoogleLogin auth=res['Auth']``. | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/gpsoauth/__init__.py#L70-L107 | [
"def _perform_auth_request(data):\n res = requests.post(auth_url, data,\n headers={'User-Agent': useragent})\n\n return google.parse_auth_response(res.text)\n"
] | import requests
from . import google
# The key is distirbuted with Google Play Services.
# This one is from version 7.3.29.
b64_key_7_3_29 = (b"AAAAgMom/1a/v0lblO2Ubrt60J2gcuXSljGFQXgcyZWveWLEwo6prwgi3"
b"iJIZdodyhKZQrNWp5nKJ3srRXcUW+F1BD3baEVGcmEgqaLZUNBjm057pK"
b"RI16kB0YppeGx5qIQ5QjKzsR8ETQbKLNWgRY0QRNVz34kMJR3P/LgHax/"
b"6rmf5AAAAAwEAAQ==")
android_key_7_3_29 = google.key_from_b64(b64_key_7_3_29)
auth_url = 'https://android.clients.google.com/auth'
useragent = 'gpsoauth-portify/1.0'
def _perform_auth_request(data):
res = requests.post(auth_url, data,
headers={'User-Agent': useragent})
return google.parse_auth_response(res.text)
def perform_master_login(email, password, android_id,
service='ac2dm', device_country='us',
operatorCountry='us', lang='en', sdk_version=17):
"""
Perform a master login, which is what Android does when you first add a
Google account.
Return a dict, eg::
{
'Auth': '...',
'Email': 'email@gmail.com',
'GooglePlusUpgrade': '1',
'LSID': '...',
'PicasaUser': 'My Name',
'RopRevision': '1',
'RopText': ' ',
'SID': '...',
'Token': 'oauth2rt_1/...',
'firstName': 'My',
'lastName': 'Name',
'services': 'hist,mail,googleme,...'
}
"""
data = {
'accountType': 'HOSTED_OR_GOOGLE',
'Email': email,
'has_permission': 1,
'add_account': 1,
'EncryptedPasswd':
google.signature(email, password, android_key_7_3_29),
'service': service,
'source': 'android',
'androidId': android_id,
'device_country': device_country,
'operatorCountry': device_country,
'lang': lang,
'sdk_version': sdk_version,
}
return _perform_auth_request(data)
|
rckclmbr/pyportify | pyportify/pkcs1/primes.py | is_prime | python | def is_prime(n, rnd=default_pseudo_random, k=DEFAULT_ITERATION,
algorithm=None):
'''Test if n is a prime number
m - the integer to test
rnd - the random number generator to use for the probalistic primality
algorithms,
k - the number of iterations to use for the probabilistic primality
algorithms,
algorithm - the primality algorithm to use, default is Miller-Rabin. The
gmpy implementation is used if gmpy is installed.
Return value: True is n seems prime, False otherwise.
'''
if algorithm is None:
algorithm = PRIME_ALGO
if algorithm == 'gmpy-miller-rabin':
if not gmpy:
raise NotImplementedError
return gmpy.is_prime(n, k)
elif algorithm == 'miller-rabin':
# miller rabin probability of primality is 1/4**k
return miller_rabin(n, k, rnd=rnd)
elif algorithm == 'solovay-strassen':
# for jacobi it's 1/2**k
return randomized_primality_testing(n, rnd=rnd, k=k*2)
else:
raise NotImplementedError | Test if n is a prime number
m - the integer to test
rnd - the random number generator to use for the probalistic primality
algorithms,
k - the number of iterations to use for the probabilistic primality
algorithms,
algorithm - the primality algorithm to use, default is Miller-Rabin. The
gmpy implementation is used if gmpy is installed.
Return value: True is n seems prime, False otherwise. | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primes.py#L19-L47 | [
"def miller_rabin(n, k, rnd=default_pseudo_random):\n '''\n Pure python implementation of the Miller-Rabin algorithm.\n\n n - the integer number to test,\n k - the number of iteration, the probability of n being prime if the\n algorithm returns True is 1/2**k,\n rnd - a random generator\n '''\n s = 0\n d = n-1\n # Find nearest power of 2\n s = primitives.integer_bit_size(n)\n # Find greatest factor which is a power of 2\n s = fractions.gcd(2**s, n-1)\n d = (n-1) // s\n s = primitives.integer_bit_size(s) - 1\n while k:\n k = k - 1\n a = rnd.randint(2, n-2)\n x = pow(a, d, n)\n if x == 1 or x == n - 1:\n continue\n for r in range(1, s-1):\n x = pow(x, 2, n)\n if x == 1:\n return False\n if x == n - 1:\n break\n else:\n return False\n return True\n",
"def randomized_primality_testing(n, rnd=default_crypto_random,\n k=DEFAULT_ITERATION):\n '''Calculates whether n is composite (which is always correct) or\n prime (which is incorrect with error probability 2**-k)\n\n Returns False if the number is composite, and True if it's\n probably prime.\n '''\n\n # 50% of Jacobi-witnesses can report compositness of non-prime numbers\n\n # The implemented algorithm using the Jacobi witness function has error\n # probability q <= 0.5, according to Goodrich et. al\n #\n # q = 0.5\n # t = int(math.ceil(k / log(1 / q, 2)))\n # So t = k / log(2, 2) = k / 1 = k\n # this means we can use range(k) rather than range(t)\n\n for _ in range(k):\n x = rnd.randint(0, n-1)\n if jacobi_witness(x, n):\n return False\n return True\n"
] | import fractions
from . import primitives
from .defaults import default_pseudo_random, default_crypto_random
PRIME_ALGO = 'miller-rabin'
gmpy = None
try:
import gmpy
PRIME_ALGO = 'gmpy-miller-rabin'
except ImportError:
pass
DEFAULT_ITERATION = 1000
USE_MILLER_RABIN = True
def get_prime(size=128, rnd=default_crypto_random, k=DEFAULT_ITERATION,
algorithm=None):
'''Generate a prime number of the giver size using the is_prime() helper
function.
size - size in bits of the prime, default to 128
rnd - a random generator to use
k - the number of iteration to use for the probabilistic primality
algorithms.
algorithm - the name of the primality algorithm to use, default is the
probabilistic Miller-Rabin algorithm.
Return value: a prime number, as a long integer
'''
while True:
n = rnd.getrandbits(size-2)
n = 2 ** (size-1) + n * 2 + 1
if is_prime(n, rnd=rnd, k=k, algorithm=algorithm):
return n
if algorithm == 'gmpy-miller-rabin':
return gmpy.next_prime(n)
def jacobi(a, b):
'''Calculates the value of the Jacobi symbol (a/b) where both a and b are
positive integers, and b is odd
:returns: -1, 0 or 1
'''
assert a > 0
assert b > 0
if a == 0:
return 0
result = 1
while a > 1:
if a & 1:
if ((a-1)*(b-1) >> 2) & 1:
result = -result
a, b = b % a, a
else:
if (((b * b) - 1) >> 3) & 1:
result = -result
a >>= 1
if a == 0:
return 0
return result
def jacobi_witness(x, n):
'''Returns False if n is an Euler pseudo-prime with base x, and
True otherwise.
'''
j = jacobi(x, n) % n
f = pow(x, n >> 1, n)
return j != f
def randomized_primality_testing(n, rnd=default_crypto_random,
k=DEFAULT_ITERATION):
'''Calculates whether n is composite (which is always correct) or
prime (which is incorrect with error probability 2**-k)
Returns False if the number is composite, and True if it's
probably prime.
'''
# 50% of Jacobi-witnesses can report compositness of non-prime numbers
# The implemented algorithm using the Jacobi witness function has error
# probability q <= 0.5, according to Goodrich et. al
#
# q = 0.5
# t = int(math.ceil(k / log(1 / q, 2)))
# So t = k / log(2, 2) = k / 1 = k
# this means we can use range(k) rather than range(t)
for _ in range(k):
x = rnd.randint(0, n-1)
if jacobi_witness(x, n):
return False
return True
def miller_rabin(n, k, rnd=default_pseudo_random):
'''
Pure python implementation of the Miller-Rabin algorithm.
n - the integer number to test,
k - the number of iteration, the probability of n being prime if the
algorithm returns True is 1/2**k,
rnd - a random generator
'''
s = 0
d = n-1
# Find nearest power of 2
s = primitives.integer_bit_size(n)
# Find greatest factor which is a power of 2
s = fractions.gcd(2**s, n-1)
d = (n-1) // s
s = primitives.integer_bit_size(s) - 1
while k:
k = k - 1
a = rnd.randint(2, n-2)
x = pow(a, d, n)
if x == 1 or x == n - 1:
continue
for r in range(1, s-1):
x = pow(x, 2, n)
if x == 1:
return False
if x == n - 1:
break
else:
return False
return True
|
rckclmbr/pyportify | pyportify/pkcs1/primes.py | get_prime | python | def get_prime(size=128, rnd=default_crypto_random, k=DEFAULT_ITERATION,
algorithm=None):
'''Generate a prime number of the giver size using the is_prime() helper
function.
size - size in bits of the prime, default to 128
rnd - a random generator to use
k - the number of iteration to use for the probabilistic primality
algorithms.
algorithm - the name of the primality algorithm to use, default is the
probabilistic Miller-Rabin algorithm.
Return value: a prime number, as a long integer
'''
while True:
n = rnd.getrandbits(size-2)
n = 2 ** (size-1) + n * 2 + 1
if is_prime(n, rnd=rnd, k=k, algorithm=algorithm):
return n
if algorithm == 'gmpy-miller-rabin':
return gmpy.next_prime(n) | Generate a prime number of the giver size using the is_prime() helper
function.
size - size in bits of the prime, default to 128
rnd - a random generator to use
k - the number of iteration to use for the probabilistic primality
algorithms.
algorithm - the name of the primality algorithm to use, default is the
probabilistic Miller-Rabin algorithm.
Return value: a prime number, as a long integer | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primes.py#L50-L70 | [
"def is_prime(n, rnd=default_pseudo_random, k=DEFAULT_ITERATION,\n algorithm=None):\n '''Test if n is a prime number\n\n m - the integer to test\n rnd - the random number generator to use for the probalistic primality\n algorithms,\n k - the number of iterations to use for the probabilistic primality\n algorithms,\n algorithm - the primality algorithm to use, default is Miller-Rabin. The\n gmpy implementation is used if gmpy is installed.\n\n Return value: True is n seems prime, False otherwise.\n '''\n\n if algorithm is None:\n algorithm = PRIME_ALGO\n if algorithm == 'gmpy-miller-rabin':\n if not gmpy:\n raise NotImplementedError\n return gmpy.is_prime(n, k)\n elif algorithm == 'miller-rabin':\n # miller rabin probability of primality is 1/4**k\n return miller_rabin(n, k, rnd=rnd)\n elif algorithm == 'solovay-strassen':\n # for jacobi it's 1/2**k\n return randomized_primality_testing(n, rnd=rnd, k=k*2)\n else:\n raise NotImplementedError\n"
] | import fractions
from . import primitives
from .defaults import default_pseudo_random, default_crypto_random
PRIME_ALGO = 'miller-rabin'
gmpy = None
try:
import gmpy
PRIME_ALGO = 'gmpy-miller-rabin'
except ImportError:
pass
DEFAULT_ITERATION = 1000
USE_MILLER_RABIN = True
def is_prime(n, rnd=default_pseudo_random, k=DEFAULT_ITERATION,
algorithm=None):
'''Test if n is a prime number
m - the integer to test
rnd - the random number generator to use for the probalistic primality
algorithms,
k - the number of iterations to use for the probabilistic primality
algorithms,
algorithm - the primality algorithm to use, default is Miller-Rabin. The
gmpy implementation is used if gmpy is installed.
Return value: True is n seems prime, False otherwise.
'''
if algorithm is None:
algorithm = PRIME_ALGO
if algorithm == 'gmpy-miller-rabin':
if not gmpy:
raise NotImplementedError
return gmpy.is_prime(n, k)
elif algorithm == 'miller-rabin':
# miller rabin probability of primality is 1/4**k
return miller_rabin(n, k, rnd=rnd)
elif algorithm == 'solovay-strassen':
# for jacobi it's 1/2**k
return randomized_primality_testing(n, rnd=rnd, k=k*2)
else:
raise NotImplementedError
def jacobi(a, b):
'''Calculates the value of the Jacobi symbol (a/b) where both a and b are
positive integers, and b is odd
:returns: -1, 0 or 1
'''
assert a > 0
assert b > 0
if a == 0:
return 0
result = 1
while a > 1:
if a & 1:
if ((a-1)*(b-1) >> 2) & 1:
result = -result
a, b = b % a, a
else:
if (((b * b) - 1) >> 3) & 1:
result = -result
a >>= 1
if a == 0:
return 0
return result
def jacobi_witness(x, n):
'''Returns False if n is an Euler pseudo-prime with base x, and
True otherwise.
'''
j = jacobi(x, n) % n
f = pow(x, n >> 1, n)
return j != f
def randomized_primality_testing(n, rnd=default_crypto_random,
k=DEFAULT_ITERATION):
'''Calculates whether n is composite (which is always correct) or
prime (which is incorrect with error probability 2**-k)
Returns False if the number is composite, and True if it's
probably prime.
'''
# 50% of Jacobi-witnesses can report compositness of non-prime numbers
# The implemented algorithm using the Jacobi witness function has error
# probability q <= 0.5, according to Goodrich et. al
#
# q = 0.5
# t = int(math.ceil(k / log(1 / q, 2)))
# So t = k / log(2, 2) = k / 1 = k
# this means we can use range(k) rather than range(t)
for _ in range(k):
x = rnd.randint(0, n-1)
if jacobi_witness(x, n):
return False
return True
def miller_rabin(n, k, rnd=default_pseudo_random):
'''
Pure python implementation of the Miller-Rabin algorithm.
n - the integer number to test,
k - the number of iteration, the probability of n being prime if the
algorithm returns True is 1/2**k,
rnd - a random generator
'''
s = 0
d = n-1
# Find nearest power of 2
s = primitives.integer_bit_size(n)
# Find greatest factor which is a power of 2
s = fractions.gcd(2**s, n-1)
d = (n-1) // s
s = primitives.integer_bit_size(s) - 1
while k:
k = k - 1
a = rnd.randint(2, n-2)
x = pow(a, d, n)
if x == 1 or x == n - 1:
continue
for r in range(1, s-1):
x = pow(x, 2, n)
if x == 1:
return False
if x == n - 1:
break
else:
return False
return True
|
rckclmbr/pyportify | pyportify/pkcs1/primes.py | jacobi | python | def jacobi(a, b):
'''Calculates the value of the Jacobi symbol (a/b) where both a and b are
positive integers, and b is odd
:returns: -1, 0 or 1
'''
assert a > 0
assert b > 0
if a == 0:
return 0
result = 1
while a > 1:
if a & 1:
if ((a-1)*(b-1) >> 2) & 1:
result = -result
a, b = b % a, a
else:
if (((b * b) - 1) >> 3) & 1:
result = -result
a >>= 1
if a == 0:
return 0
return result | Calculates the value of the Jacobi symbol (a/b) where both a and b are
positive integers, and b is odd
:returns: -1, 0 or 1 | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primes.py#L73-L97 | null | import fractions
from . import primitives
from .defaults import default_pseudo_random, default_crypto_random
PRIME_ALGO = 'miller-rabin'
gmpy = None
try:
import gmpy
PRIME_ALGO = 'gmpy-miller-rabin'
except ImportError:
pass
DEFAULT_ITERATION = 1000
USE_MILLER_RABIN = True
def is_prime(n, rnd=default_pseudo_random, k=DEFAULT_ITERATION,
algorithm=None):
'''Test if n is a prime number
m - the integer to test
rnd - the random number generator to use for the probalistic primality
algorithms,
k - the number of iterations to use for the probabilistic primality
algorithms,
algorithm - the primality algorithm to use, default is Miller-Rabin. The
gmpy implementation is used if gmpy is installed.
Return value: True is n seems prime, False otherwise.
'''
if algorithm is None:
algorithm = PRIME_ALGO
if algorithm == 'gmpy-miller-rabin':
if not gmpy:
raise NotImplementedError
return gmpy.is_prime(n, k)
elif algorithm == 'miller-rabin':
# miller rabin probability of primality is 1/4**k
return miller_rabin(n, k, rnd=rnd)
elif algorithm == 'solovay-strassen':
# for jacobi it's 1/2**k
return randomized_primality_testing(n, rnd=rnd, k=k*2)
else:
raise NotImplementedError
def get_prime(size=128, rnd=default_crypto_random, k=DEFAULT_ITERATION,
algorithm=None):
'''Generate a prime number of the giver size using the is_prime() helper
function.
size - size in bits of the prime, default to 128
rnd - a random generator to use
k - the number of iteration to use for the probabilistic primality
algorithms.
algorithm - the name of the primality algorithm to use, default is the
probabilistic Miller-Rabin algorithm.
Return value: a prime number, as a long integer
'''
while True:
n = rnd.getrandbits(size-2)
n = 2 ** (size-1) + n * 2 + 1
if is_prime(n, rnd=rnd, k=k, algorithm=algorithm):
return n
if algorithm == 'gmpy-miller-rabin':
return gmpy.next_prime(n)
def jacobi_witness(x, n):
'''Returns False if n is an Euler pseudo-prime with base x, and
True otherwise.
'''
j = jacobi(x, n) % n
f = pow(x, n >> 1, n)
return j != f
def randomized_primality_testing(n, rnd=default_crypto_random,
k=DEFAULT_ITERATION):
'''Calculates whether n is composite (which is always correct) or
prime (which is incorrect with error probability 2**-k)
Returns False if the number is composite, and True if it's
probably prime.
'''
# 50% of Jacobi-witnesses can report compositness of non-prime numbers
# The implemented algorithm using the Jacobi witness function has error
# probability q <= 0.5, according to Goodrich et. al
#
# q = 0.5
# t = int(math.ceil(k / log(1 / q, 2)))
# So t = k / log(2, 2) = k / 1 = k
# this means we can use range(k) rather than range(t)
for _ in range(k):
x = rnd.randint(0, n-1)
if jacobi_witness(x, n):
return False
return True
def miller_rabin(n, k, rnd=default_pseudo_random):
'''
Pure python implementation of the Miller-Rabin algorithm.
n - the integer number to test,
k - the number of iteration, the probability of n being prime if the
algorithm returns True is 1/2**k,
rnd - a random generator
'''
s = 0
d = n-1
# Find nearest power of 2
s = primitives.integer_bit_size(n)
# Find greatest factor which is a power of 2
s = fractions.gcd(2**s, n-1)
d = (n-1) // s
s = primitives.integer_bit_size(s) - 1
while k:
k = k - 1
a = rnd.randint(2, n-2)
x = pow(a, d, n)
if x == 1 or x == n - 1:
continue
for r in range(1, s-1):
x = pow(x, 2, n)
if x == 1:
return False
if x == n - 1:
break
else:
return False
return True
|
rckclmbr/pyportify | pyportify/pkcs1/primes.py | jacobi_witness | python | def jacobi_witness(x, n):
'''Returns False if n is an Euler pseudo-prime with base x, and
True otherwise.
'''
j = jacobi(x, n) % n
f = pow(x, n >> 1, n)
return j != f | Returns False if n is an Euler pseudo-prime with base x, and
True otherwise. | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primes.py#L100-L106 | [
"def jacobi(a, b):\n '''Calculates the value of the Jacobi symbol (a/b) where both a and b are\n positive integers, and b is odd\n\n :returns: -1, 0 or 1\n '''\n\n assert a > 0\n assert b > 0\n\n if a == 0:\n return 0\n result = 1\n while a > 1:\n if a & 1:\n if ((a-1)*(b-1) >> 2) & 1:\n result = -result\n a, b = b % a, a\n else:\n if (((b * b) - 1) >> 3) & 1:\n result = -result\n a >>= 1\n if a == 0:\n return 0\n return result\n"
] | import fractions
from . import primitives
from .defaults import default_pseudo_random, default_crypto_random
PRIME_ALGO = 'miller-rabin'
gmpy = None
try:
import gmpy
PRIME_ALGO = 'gmpy-miller-rabin'
except ImportError:
pass
DEFAULT_ITERATION = 1000
USE_MILLER_RABIN = True
def is_prime(n, rnd=default_pseudo_random, k=DEFAULT_ITERATION,
algorithm=None):
'''Test if n is a prime number
m - the integer to test
rnd - the random number generator to use for the probalistic primality
algorithms,
k - the number of iterations to use for the probabilistic primality
algorithms,
algorithm - the primality algorithm to use, default is Miller-Rabin. The
gmpy implementation is used if gmpy is installed.
Return value: True is n seems prime, False otherwise.
'''
if algorithm is None:
algorithm = PRIME_ALGO
if algorithm == 'gmpy-miller-rabin':
if not gmpy:
raise NotImplementedError
return gmpy.is_prime(n, k)
elif algorithm == 'miller-rabin':
# miller rabin probability of primality is 1/4**k
return miller_rabin(n, k, rnd=rnd)
elif algorithm == 'solovay-strassen':
# for jacobi it's 1/2**k
return randomized_primality_testing(n, rnd=rnd, k=k*2)
else:
raise NotImplementedError
def get_prime(size=128, rnd=default_crypto_random, k=DEFAULT_ITERATION,
algorithm=None):
'''Generate a prime number of the giver size using the is_prime() helper
function.
size - size in bits of the prime, default to 128
rnd - a random generator to use
k - the number of iteration to use for the probabilistic primality
algorithms.
algorithm - the name of the primality algorithm to use, default is the
probabilistic Miller-Rabin algorithm.
Return value: a prime number, as a long integer
'''
while True:
n = rnd.getrandbits(size-2)
n = 2 ** (size-1) + n * 2 + 1
if is_prime(n, rnd=rnd, k=k, algorithm=algorithm):
return n
if algorithm == 'gmpy-miller-rabin':
return gmpy.next_prime(n)
def jacobi(a, b):
'''Calculates the value of the Jacobi symbol (a/b) where both a and b are
positive integers, and b is odd
:returns: -1, 0 or 1
'''
assert a > 0
assert b > 0
if a == 0:
return 0
result = 1
while a > 1:
if a & 1:
if ((a-1)*(b-1) >> 2) & 1:
result = -result
a, b = b % a, a
else:
if (((b * b) - 1) >> 3) & 1:
result = -result
a >>= 1
if a == 0:
return 0
return result
def randomized_primality_testing(n, rnd=default_crypto_random,
k=DEFAULT_ITERATION):
'''Calculates whether n is composite (which is always correct) or
prime (which is incorrect with error probability 2**-k)
Returns False if the number is composite, and True if it's
probably prime.
'''
# 50% of Jacobi-witnesses can report compositness of non-prime numbers
# The implemented algorithm using the Jacobi witness function has error
# probability q <= 0.5, according to Goodrich et. al
#
# q = 0.5
# t = int(math.ceil(k / log(1 / q, 2)))
# So t = k / log(2, 2) = k / 1 = k
# this means we can use range(k) rather than range(t)
for _ in range(k):
x = rnd.randint(0, n-1)
if jacobi_witness(x, n):
return False
return True
def miller_rabin(n, k, rnd=default_pseudo_random):
'''
Pure python implementation of the Miller-Rabin algorithm.
n - the integer number to test,
k - the number of iteration, the probability of n being prime if the
algorithm returns True is 1/2**k,
rnd - a random generator
'''
s = 0
d = n-1
# Find nearest power of 2
s = primitives.integer_bit_size(n)
# Find greatest factor which is a power of 2
s = fractions.gcd(2**s, n-1)
d = (n-1) // s
s = primitives.integer_bit_size(s) - 1
while k:
k = k - 1
a = rnd.randint(2, n-2)
x = pow(a, d, n)
if x == 1 or x == n - 1:
continue
for r in range(1, s-1):
x = pow(x, 2, n)
if x == 1:
return False
if x == n - 1:
break
else:
return False
return True
|
rckclmbr/pyportify | pyportify/pkcs1/primes.py | miller_rabin | python | def miller_rabin(n, k, rnd=default_pseudo_random):
'''
Pure python implementation of the Miller-Rabin algorithm.
n - the integer number to test,
k - the number of iteration, the probability of n being prime if the
algorithm returns True is 1/2**k,
rnd - a random generator
'''
s = 0
d = n-1
# Find nearest power of 2
s = primitives.integer_bit_size(n)
# Find greatest factor which is a power of 2
s = fractions.gcd(2**s, n-1)
d = (n-1) // s
s = primitives.integer_bit_size(s) - 1
while k:
k = k - 1
a = rnd.randint(2, n-2)
x = pow(a, d, n)
if x == 1 or x == n - 1:
continue
for r in range(1, s-1):
x = pow(x, 2, n)
if x == 1:
return False
if x == n - 1:
break
else:
return False
return True | Pure python implementation of the Miller-Rabin algorithm.
n - the integer number to test,
k - the number of iteration, the probability of n being prime if the
algorithm returns True is 1/2**k,
rnd - a random generator | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primes.py#L135-L166 | [
"def integer_bit_size(n):\n '''Returns the number of bits necessary to store the integer n.'''\n if n == 0:\n return 1\n s = 0\n while n:\n s += 1\n n >>= 1\n return s\n"
] | import fractions
from . import primitives
from .defaults import default_pseudo_random, default_crypto_random
PRIME_ALGO = 'miller-rabin'
gmpy = None
try:
import gmpy
PRIME_ALGO = 'gmpy-miller-rabin'
except ImportError:
pass
DEFAULT_ITERATION = 1000
USE_MILLER_RABIN = True
def is_prime(n, rnd=default_pseudo_random, k=DEFAULT_ITERATION,
algorithm=None):
'''Test if n is a prime number
m - the integer to test
rnd - the random number generator to use for the probalistic primality
algorithms,
k - the number of iterations to use for the probabilistic primality
algorithms,
algorithm - the primality algorithm to use, default is Miller-Rabin. The
gmpy implementation is used if gmpy is installed.
Return value: True is n seems prime, False otherwise.
'''
if algorithm is None:
algorithm = PRIME_ALGO
if algorithm == 'gmpy-miller-rabin':
if not gmpy:
raise NotImplementedError
return gmpy.is_prime(n, k)
elif algorithm == 'miller-rabin':
# miller rabin probability of primality is 1/4**k
return miller_rabin(n, k, rnd=rnd)
elif algorithm == 'solovay-strassen':
# for jacobi it's 1/2**k
return randomized_primality_testing(n, rnd=rnd, k=k*2)
else:
raise NotImplementedError
def get_prime(size=128, rnd=default_crypto_random, k=DEFAULT_ITERATION,
algorithm=None):
'''Generate a prime number of the giver size using the is_prime() helper
function.
size - size in bits of the prime, default to 128
rnd - a random generator to use
k - the number of iteration to use for the probabilistic primality
algorithms.
algorithm - the name of the primality algorithm to use, default is the
probabilistic Miller-Rabin algorithm.
Return value: a prime number, as a long integer
'''
while True:
n = rnd.getrandbits(size-2)
n = 2 ** (size-1) + n * 2 + 1
if is_prime(n, rnd=rnd, k=k, algorithm=algorithm):
return n
if algorithm == 'gmpy-miller-rabin':
return gmpy.next_prime(n)
def jacobi(a, b):
'''Calculates the value of the Jacobi symbol (a/b) where both a and b are
positive integers, and b is odd
:returns: -1, 0 or 1
'''
assert a > 0
assert b > 0
if a == 0:
return 0
result = 1
while a > 1:
if a & 1:
if ((a-1)*(b-1) >> 2) & 1:
result = -result
a, b = b % a, a
else:
if (((b * b) - 1) >> 3) & 1:
result = -result
a >>= 1
if a == 0:
return 0
return result
def jacobi_witness(x, n):
'''Returns False if n is an Euler pseudo-prime with base x, and
True otherwise.
'''
j = jacobi(x, n) % n
f = pow(x, n >> 1, n)
return j != f
def randomized_primality_testing(n, rnd=default_crypto_random,
k=DEFAULT_ITERATION):
'''Calculates whether n is composite (which is always correct) or
prime (which is incorrect with error probability 2**-k)
Returns False if the number is composite, and True if it's
probably prime.
'''
# 50% of Jacobi-witnesses can report compositness of non-prime numbers
# The implemented algorithm using the Jacobi witness function has error
# probability q <= 0.5, according to Goodrich et. al
#
# q = 0.5
# t = int(math.ceil(k / log(1 / q, 2)))
# So t = k / log(2, 2) = k / 1 = k
# this means we can use range(k) rather than range(t)
for _ in range(k):
x = rnd.randint(0, n-1)
if jacobi_witness(x, n):
return False
return True
|
rckclmbr/pyportify | pyportify/gpsoauth/util.py | long_to_bytes | python | def long_to_bytes(lnum, padmultiple=1):
# source: http://stackoverflow.com/a/14527004/1231454
if lnum == 0:
return b'\0' * padmultiple
elif lnum < 0:
raise ValueError("Can only convert non-negative numbers.")
s = hex(lnum)[2:]
s = s.rstrip('L')
if len(s) & 1:
s = '0' + s
s = binascii.unhexlify(s)
if (padmultiple != 1) and (padmultiple != 0):
filled_so_far = len(s) % padmultiple
if filled_so_far != 0:
s = b'\0' * (padmultiple - filled_so_far) + s
return s | Packs the lnum (which must be convertable to a long) into a
byte string 0 padded to a multiple of padmultiple bytes in size. 0
means no padding whatsoever, so that packing 0 result in an empty
string. The resulting byte string is the big-endian two's
complement representation of the passed in long. | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/gpsoauth/util.py#L11-L33 | null | import binascii
import sys
PY3 = sys.version[0] == '3'
def bytes_to_long(s):
return int.from_bytes(s, "big")
|
rckclmbr/pyportify | pyportify/util.py | find_closest_match | python | def find_closest_match(target_track, tracks):
track = None
# Get a list of (track, artist match ratio, name match ratio)
tracks_with_match_ratio = [(
track,
get_similarity(target_track.artist, track.artist),
get_similarity(target_track.name, track.name),
) for track in tracks]
# Sort by artist then by title
sorted_tracks = sorted(
tracks_with_match_ratio,
key=lambda t: (t[1], t[2]),
reverse=True # Descending, highest match ratio first
)
if sorted_tracks:
track = sorted_tracks[0][0] # Closest match to query
return track | Return closest match to target track | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/util.py#L35-L54 | null | import itertools
import sys
from difflib import SequenceMatcher as SM
def uprint(*objects, sep=' ', end='\n', file=sys.stdout):
enc = file.encoding
if enc == 'UTF-8':
print(*objects, sep=sep, end=end, file=file)
else:
def f(obj):
return str(obj) \
.encode(enc, errors='backslashreplace') \
.decode(enc)
print(*map(f, objects), sep=sep, end=end, file=file)
def grouper(iterable, n):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
def get_similarity(s1, s2):
"""
Return similarity of both strings as a float between 0 and 1
"""
return SM(None, s1, s2).ratio()
|
rckclmbr/pyportify | pyportify/pkcs1/mgf.py | mgf1 | python | def mgf1(mgf_seed, mask_len, hash_class=hashlib.sha1):
'''
Mask Generation Function v1 from the PKCS#1 v2.0 standard.
mgs_seed - the seed, a byte string
mask_len - the length of the mask to generate
hash_class - the digest algorithm to use, default is SHA1
Return value: a pseudo-random mask, as a byte string
'''
h_len = hash_class().digest_size
if mask_len > 0x10000:
raise ValueError('mask too long')
T = b''
for i in range(0, integer_ceil(mask_len, h_len)):
C = i2osp(i, 4)
T = T + hash_class(mgf_seed + C).digest()
return T[:mask_len] | Mask Generation Function v1 from the PKCS#1 v2.0 standard.
mgs_seed - the seed, a byte string
mask_len - the length of the mask to generate
hash_class - the digest algorithm to use, default is SHA1
Return value: a pseudo-random mask, as a byte string | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/mgf.py#L6-L23 | [
"def integer_ceil(a, b):\n '''Return the ceil integer of a div b.'''\n quanta, mod = divmod(a, b)\n if mod:\n quanta += 1\n return quanta\n",
"def i2osp(x, x_len):\n '''Converts the integer x to its big-endian representation of length\n x_len.\n '''\n if x > 256**x_len:\n raise exceptions.IntegerTooLarge\n h = hex(x)[2:]\n if h[-1] == 'L':\n h = h[:-1]\n if len(h) & 1 == 1:\n h = '0%s' % h\n x = binascii.unhexlify(h)\n return b'\\x00' * int(x_len-len(x)) + x\n"
] | import hashlib
from .primitives import integer_ceil, i2osp
|
rckclmbr/pyportify | pyportify/pkcs1/primitives.py | integer_ceil | python | def integer_ceil(a, b):
'''Return the ceil integer of a div b.'''
quanta, mod = divmod(a, b)
if mod:
quanta += 1
return quanta | Return the ceil integer of a div b. | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primitives.py#L14-L19 | null | import binascii
import operator
import sys
from functools import reduce
from .defaults import default_crypto_random
from . import exceptions
'''Primitive functions extracted from the PKCS1 RFC'''
def integer_byte_size(n):
'''Returns the number of bytes necessary to store the integer n.'''
quanta, mod = divmod(integer_bit_size(n), 8)
if mod or n == 0:
quanta += 1
return quanta
def integer_bit_size(n):
'''Returns the number of bits necessary to store the integer n.'''
if n == 0:
return 1
s = 0
while n:
s += 1
n >>= 1
return s
def bezout(a, b):
'''Compute the bezout algorithm of a and b, i.e. it returns u, v, p such as:
p = GCD(a,b)
a * u + b * v = p
Copied from http://www.labri.fr/perso/betrema/deug/poly/euclide.html.
'''
u = 1
v = 0
s = 0
t = 1
while b > 0:
q = a // b
r = a % b
a = b
b = r
tmp = s
s = u - q * s
u = tmp
tmp = t
t = v - q * t
v = tmp
return u, v, a
def i2osp(x, x_len):
'''Converts the integer x to its big-endian representation of length
x_len.
'''
if x > 256**x_len:
raise exceptions.IntegerTooLarge
h = hex(x)[2:]
if h[-1] == 'L':
h = h[:-1]
if len(h) & 1 == 1:
h = '0%s' % h
x = binascii.unhexlify(h)
return b'\x00' * int(x_len-len(x)) + x
def os2ip(x):
'''Converts the byte string x representing an integer reprented using the
big-endian convient to an integer.
'''
h = binascii.hexlify(x)
return int(h, 16)
def string_xor(a, b):
'''Computes the XOR operator between two byte strings. If the strings are
of different lengths, the result string is as long as the shorter.
'''
if sys.version_info[0] < 3:
return ''.join((chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)))
else:
return bytes(x ^ y for (x, y) in zip(a, b))
def product(*args):
'''Computes the product of its arguments.'''
return reduce(operator.__mul__, args)
def get_nonzero_random_bytes(length, rnd=default_crypto_random):
'''
Accumulate random bit string and remove \0 bytes until the needed length
is obtained.
'''
result = []
i = 0
while i < length:
rnd = rnd.getrandbits(12*length)
s = i2osp(rnd, 3*length)
s = s.replace('\x00', '')
result.append(s)
i += len(s)
return (''.join(result))[:length]
def constant_time_cmp(a, b):
'''Compare two strings using constant time.'''
result = True
for x, y in zip(a, b):
result &= (x == y)
return result
|
rckclmbr/pyportify | pyportify/pkcs1/primitives.py | integer_byte_size | python | def integer_byte_size(n):
'''Returns the number of bytes necessary to store the integer n.'''
quanta, mod = divmod(integer_bit_size(n), 8)
if mod or n == 0:
quanta += 1
return quanta | Returns the number of bytes necessary to store the integer n. | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primitives.py#L22-L27 | [
"def integer_bit_size(n):\n '''Returns the number of bits necessary to store the integer n.'''\n if n == 0:\n return 1\n s = 0\n while n:\n s += 1\n n >>= 1\n return s\n"
] | import binascii
import operator
import sys
from functools import reduce
from .defaults import default_crypto_random
from . import exceptions
'''Primitive functions extracted from the PKCS1 RFC'''
def integer_ceil(a, b):
'''Return the ceil integer of a div b.'''
quanta, mod = divmod(a, b)
if mod:
quanta += 1
return quanta
def integer_bit_size(n):
'''Returns the number of bits necessary to store the integer n.'''
if n == 0:
return 1
s = 0
while n:
s += 1
n >>= 1
return s
def bezout(a, b):
'''Compute the bezout algorithm of a and b, i.e. it returns u, v, p such as:
p = GCD(a,b)
a * u + b * v = p
Copied from http://www.labri.fr/perso/betrema/deug/poly/euclide.html.
'''
u = 1
v = 0
s = 0
t = 1
while b > 0:
q = a // b
r = a % b
a = b
b = r
tmp = s
s = u - q * s
u = tmp
tmp = t
t = v - q * t
v = tmp
return u, v, a
def i2osp(x, x_len):
'''Converts the integer x to its big-endian representation of length
x_len.
'''
if x > 256**x_len:
raise exceptions.IntegerTooLarge
h = hex(x)[2:]
if h[-1] == 'L':
h = h[:-1]
if len(h) & 1 == 1:
h = '0%s' % h
x = binascii.unhexlify(h)
return b'\x00' * int(x_len-len(x)) + x
def os2ip(x):
'''Converts the byte string x representing an integer reprented using the
big-endian convient to an integer.
'''
h = binascii.hexlify(x)
return int(h, 16)
def string_xor(a, b):
'''Computes the XOR operator between two byte strings. If the strings are
of different lengths, the result string is as long as the shorter.
'''
if sys.version_info[0] < 3:
return ''.join((chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)))
else:
return bytes(x ^ y for (x, y) in zip(a, b))
def product(*args):
'''Computes the product of its arguments.'''
return reduce(operator.__mul__, args)
def get_nonzero_random_bytes(length, rnd=default_crypto_random):
'''
Accumulate random bit string and remove \0 bytes until the needed length
is obtained.
'''
result = []
i = 0
while i < length:
rnd = rnd.getrandbits(12*length)
s = i2osp(rnd, 3*length)
s = s.replace('\x00', '')
result.append(s)
i += len(s)
return (''.join(result))[:length]
def constant_time_cmp(a, b):
'''Compare two strings using constant time.'''
result = True
for x, y in zip(a, b):
result &= (x == y)
return result
|
rckclmbr/pyportify | pyportify/pkcs1/primitives.py | bezout | python | def bezout(a, b):
'''Compute the bezout algorithm of a and b, i.e. it returns u, v, p such as:
p = GCD(a,b)
a * u + b * v = p
Copied from http://www.labri.fr/perso/betrema/deug/poly/euclide.html.
'''
u = 1
v = 0
s = 0
t = 1
while b > 0:
q = a // b
r = a % b
a = b
b = r
tmp = s
s = u - q * s
u = tmp
tmp = t
t = v - q * t
v = tmp
return u, v, a | Compute the bezout algorithm of a and b, i.e. it returns u, v, p such as:
p = GCD(a,b)
a * u + b * v = p
Copied from http://www.labri.fr/perso/betrema/deug/poly/euclide.html. | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primitives.py#L41-L64 | null | import binascii
import operator
import sys
from functools import reduce
from .defaults import default_crypto_random
from . import exceptions
'''Primitive functions extracted from the PKCS1 RFC'''
def integer_ceil(a, b):
'''Return the ceil integer of a div b.'''
quanta, mod = divmod(a, b)
if mod:
quanta += 1
return quanta
def integer_byte_size(n):
'''Returns the number of bytes necessary to store the integer n.'''
quanta, mod = divmod(integer_bit_size(n), 8)
if mod or n == 0:
quanta += 1
return quanta
def integer_bit_size(n):
'''Returns the number of bits necessary to store the integer n.'''
if n == 0:
return 1
s = 0
while n:
s += 1
n >>= 1
return s
def i2osp(x, x_len):
'''Converts the integer x to its big-endian representation of length
x_len.
'''
if x > 256**x_len:
raise exceptions.IntegerTooLarge
h = hex(x)[2:]
if h[-1] == 'L':
h = h[:-1]
if len(h) & 1 == 1:
h = '0%s' % h
x = binascii.unhexlify(h)
return b'\x00' * int(x_len-len(x)) + x
def os2ip(x):
'''Converts the byte string x representing an integer reprented using the
big-endian convient to an integer.
'''
h = binascii.hexlify(x)
return int(h, 16)
def string_xor(a, b):
'''Computes the XOR operator between two byte strings. If the strings are
of different lengths, the result string is as long as the shorter.
'''
if sys.version_info[0] < 3:
return ''.join((chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)))
else:
return bytes(x ^ y for (x, y) in zip(a, b))
def product(*args):
'''Computes the product of its arguments.'''
return reduce(operator.__mul__, args)
def get_nonzero_random_bytes(length, rnd=default_crypto_random):
'''
Accumulate random bit string and remove \0 bytes until the needed length
is obtained.
'''
result = []
i = 0
while i < length:
rnd = rnd.getrandbits(12*length)
s = i2osp(rnd, 3*length)
s = s.replace('\x00', '')
result.append(s)
i += len(s)
return (''.join(result))[:length]
def constant_time_cmp(a, b):
'''Compare two strings using constant time.'''
result = True
for x, y in zip(a, b):
result &= (x == y)
return result
|
rckclmbr/pyportify | pyportify/pkcs1/primitives.py | i2osp | python | def i2osp(x, x_len):
'''Converts the integer x to its big-endian representation of length
x_len.
'''
if x > 256**x_len:
raise exceptions.IntegerTooLarge
h = hex(x)[2:]
if h[-1] == 'L':
h = h[:-1]
if len(h) & 1 == 1:
h = '0%s' % h
x = binascii.unhexlify(h)
return b'\x00' * int(x_len-len(x)) + x | Converts the integer x to its big-endian representation of length
x_len. | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primitives.py#L67-L79 | null | import binascii
import operator
import sys
from functools import reduce
from .defaults import default_crypto_random
from . import exceptions
'''Primitive functions extracted from the PKCS1 RFC'''
def integer_ceil(a, b):
'''Return the ceil integer of a div b.'''
quanta, mod = divmod(a, b)
if mod:
quanta += 1
return quanta
def integer_byte_size(n):
'''Returns the number of bytes necessary to store the integer n.'''
quanta, mod = divmod(integer_bit_size(n), 8)
if mod or n == 0:
quanta += 1
return quanta
def integer_bit_size(n):
'''Returns the number of bits necessary to store the integer n.'''
if n == 0:
return 1
s = 0
while n:
s += 1
n >>= 1
return s
def bezout(a, b):
'''Compute the bezout algorithm of a and b, i.e. it returns u, v, p such as:
p = GCD(a,b)
a * u + b * v = p
Copied from http://www.labri.fr/perso/betrema/deug/poly/euclide.html.
'''
u = 1
v = 0
s = 0
t = 1
while b > 0:
q = a // b
r = a % b
a = b
b = r
tmp = s
s = u - q * s
u = tmp
tmp = t
t = v - q * t
v = tmp
return u, v, a
def os2ip(x):
'''Converts the byte string x representing an integer reprented using the
big-endian convient to an integer.
'''
h = binascii.hexlify(x)
return int(h, 16)
def string_xor(a, b):
'''Computes the XOR operator between two byte strings. If the strings are
of different lengths, the result string is as long as the shorter.
'''
if sys.version_info[0] < 3:
return ''.join((chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)))
else:
return bytes(x ^ y for (x, y) in zip(a, b))
def product(*args):
'''Computes the product of its arguments.'''
return reduce(operator.__mul__, args)
def get_nonzero_random_bytes(length, rnd=default_crypto_random):
'''
Accumulate random bit string and remove \0 bytes until the needed length
is obtained.
'''
result = []
i = 0
while i < length:
rnd = rnd.getrandbits(12*length)
s = i2osp(rnd, 3*length)
s = s.replace('\x00', '')
result.append(s)
i += len(s)
return (''.join(result))[:length]
def constant_time_cmp(a, b):
'''Compare two strings using constant time.'''
result = True
for x, y in zip(a, b):
result &= (x == y)
return result
|
rckclmbr/pyportify | pyportify/pkcs1/primitives.py | string_xor | python | def string_xor(a, b):
'''Computes the XOR operator between two byte strings. If the strings are
of different lengths, the result string is as long as the shorter.
'''
if sys.version_info[0] < 3:
return ''.join((chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)))
else:
return bytes(x ^ y for (x, y) in zip(a, b)) | Computes the XOR operator between two byte strings. If the strings are
of different lengths, the result string is as long as the shorter. | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primitives.py#L90-L97 | null | import binascii
import operator
import sys
from functools import reduce
from .defaults import default_crypto_random
from . import exceptions
'''Primitive functions extracted from the PKCS1 RFC'''
def integer_ceil(a, b):
'''Return the ceil integer of a div b.'''
quanta, mod = divmod(a, b)
if mod:
quanta += 1
return quanta
def integer_byte_size(n):
'''Returns the number of bytes necessary to store the integer n.'''
quanta, mod = divmod(integer_bit_size(n), 8)
if mod or n == 0:
quanta += 1
return quanta
def integer_bit_size(n):
'''Returns the number of bits necessary to store the integer n.'''
if n == 0:
return 1
s = 0
while n:
s += 1
n >>= 1
return s
def bezout(a, b):
'''Compute the bezout algorithm of a and b, i.e. it returns u, v, p such as:
p = GCD(a,b)
a * u + b * v = p
Copied from http://www.labri.fr/perso/betrema/deug/poly/euclide.html.
'''
u = 1
v = 0
s = 0
t = 1
while b > 0:
q = a // b
r = a % b
a = b
b = r
tmp = s
s = u - q * s
u = tmp
tmp = t
t = v - q * t
v = tmp
return u, v, a
def i2osp(x, x_len):
'''Converts the integer x to its big-endian representation of length
x_len.
'''
if x > 256**x_len:
raise exceptions.IntegerTooLarge
h = hex(x)[2:]
if h[-1] == 'L':
h = h[:-1]
if len(h) & 1 == 1:
h = '0%s' % h
x = binascii.unhexlify(h)
return b'\x00' * int(x_len-len(x)) + x
def os2ip(x):
'''Converts the byte string x representing an integer reprented using the
big-endian convient to an integer.
'''
h = binascii.hexlify(x)
return int(h, 16)
def product(*args):
'''Computes the product of its arguments.'''
return reduce(operator.__mul__, args)
def get_nonzero_random_bytes(length, rnd=default_crypto_random):
'''
Accumulate random bit string and remove \0 bytes until the needed length
is obtained.
'''
result = []
i = 0
while i < length:
rnd = rnd.getrandbits(12*length)
s = i2osp(rnd, 3*length)
s = s.replace('\x00', '')
result.append(s)
i += len(s)
return (''.join(result))[:length]
def constant_time_cmp(a, b):
'''Compare two strings using constant time.'''
result = True
for x, y in zip(a, b):
result &= (x == y)
return result
|
rckclmbr/pyportify | pyportify/pkcs1/primitives.py | get_nonzero_random_bytes | python | def get_nonzero_random_bytes(length, rnd=default_crypto_random):
'''
Accumulate random bit string and remove \0 bytes until the needed length
is obtained.
'''
result = []
i = 0
while i < length:
rnd = rnd.getrandbits(12*length)
s = i2osp(rnd, 3*length)
s = s.replace('\x00', '')
result.append(s)
i += len(s)
return (''.join(result))[:length] | Accumulate random bit string and remove \0 bytes until the needed length
is obtained. | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primitives.py#L105-L118 | [
"def i2osp(x, x_len):\n '''Converts the integer x to its big-endian representation of length\n x_len.\n '''\n if x > 256**x_len:\n raise exceptions.IntegerTooLarge\n h = hex(x)[2:]\n if h[-1] == 'L':\n h = h[:-1]\n if len(h) & 1 == 1:\n h = '0%s' % h\n x = binascii.unhexlify(h)\n return b'\\x00' * int(x_len-len(x)) + x\n"
] | import binascii
import operator
import sys
from functools import reduce
from .defaults import default_crypto_random
from . import exceptions
'''Primitive functions extracted from the PKCS1 RFC'''
def integer_ceil(a, b):
'''Return the ceil integer of a div b.'''
quanta, mod = divmod(a, b)
if mod:
quanta += 1
return quanta
def integer_byte_size(n):
'''Returns the number of bytes necessary to store the integer n.'''
quanta, mod = divmod(integer_bit_size(n), 8)
if mod or n == 0:
quanta += 1
return quanta
def integer_bit_size(n):
'''Returns the number of bits necessary to store the integer n.'''
if n == 0:
return 1
s = 0
while n:
s += 1
n >>= 1
return s
def bezout(a, b):
'''Compute the bezout algorithm of a and b, i.e. it returns u, v, p such as:
p = GCD(a,b)
a * u + b * v = p
Copied from http://www.labri.fr/perso/betrema/deug/poly/euclide.html.
'''
u = 1
v = 0
s = 0
t = 1
while b > 0:
q = a // b
r = a % b
a = b
b = r
tmp = s
s = u - q * s
u = tmp
tmp = t
t = v - q * t
v = tmp
return u, v, a
def i2osp(x, x_len):
'''Converts the integer x to its big-endian representation of length
x_len.
'''
if x > 256**x_len:
raise exceptions.IntegerTooLarge
h = hex(x)[2:]
if h[-1] == 'L':
h = h[:-1]
if len(h) & 1 == 1:
h = '0%s' % h
x = binascii.unhexlify(h)
return b'\x00' * int(x_len-len(x)) + x
def os2ip(x):
'''Converts the byte string x representing an integer reprented using the
big-endian convient to an integer.
'''
h = binascii.hexlify(x)
return int(h, 16)
def string_xor(a, b):
'''Computes the XOR operator between two byte strings. If the strings are
of different lengths, the result string is as long as the shorter.
'''
if sys.version_info[0] < 3:
return ''.join((chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)))
else:
return bytes(x ^ y for (x, y) in zip(a, b))
def product(*args):
'''Computes the product of its arguments.'''
return reduce(operator.__mul__, args)
def constant_time_cmp(a, b):
'''Compare two strings using constant time.'''
result = True
for x, y in zip(a, b):
result &= (x == y)
return result
|
rckclmbr/pyportify | pyportify/pkcs1/primitives.py | constant_time_cmp | python | def constant_time_cmp(a, b):
'''Compare two strings using constant time.'''
result = True
for x, y in zip(a, b):
result &= (x == y)
return result | Compare two strings using constant time. | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primitives.py#L121-L126 | null | import binascii
import operator
import sys
from functools import reduce
from .defaults import default_crypto_random
from . import exceptions
'''Primitive functions extracted from the PKCS1 RFC'''
def integer_ceil(a, b):
'''Return the ceil integer of a div b.'''
quanta, mod = divmod(a, b)
if mod:
quanta += 1
return quanta
def integer_byte_size(n):
'''Returns the number of bytes necessary to store the integer n.'''
quanta, mod = divmod(integer_bit_size(n), 8)
if mod or n == 0:
quanta += 1
return quanta
def integer_bit_size(n):
'''Returns the number of bits necessary to store the integer n.'''
if n == 0:
return 1
s = 0
while n:
s += 1
n >>= 1
return s
def bezout(a, b):
'''Compute the bezout algorithm of a and b, i.e. it returns u, v, p such as:
p = GCD(a,b)
a * u + b * v = p
Copied from http://www.labri.fr/perso/betrema/deug/poly/euclide.html.
'''
u = 1
v = 0
s = 0
t = 1
while b > 0:
q = a // b
r = a % b
a = b
b = r
tmp = s
s = u - q * s
u = tmp
tmp = t
t = v - q * t
v = tmp
return u, v, a
def i2osp(x, x_len):
'''Converts the integer x to its big-endian representation of length
x_len.
'''
if x > 256**x_len:
raise exceptions.IntegerTooLarge
h = hex(x)[2:]
if h[-1] == 'L':
h = h[:-1]
if len(h) & 1 == 1:
h = '0%s' % h
x = binascii.unhexlify(h)
return b'\x00' * int(x_len-len(x)) + x
def os2ip(x):
'''Converts the byte string x representing an integer reprented using the
big-endian convient to an integer.
'''
h = binascii.hexlify(x)
return int(h, 16)
def string_xor(a, b):
'''Computes the XOR operator between two byte strings. If the strings are
of different lengths, the result string is as long as the shorter.
'''
if sys.version_info[0] < 3:
return ''.join((chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)))
else:
return bytes(x ^ y for (x, y) in zip(a, b))
def product(*args):
'''Computes the product of its arguments.'''
return reduce(operator.__mul__, args)
def get_nonzero_random_bytes(length, rnd=default_crypto_random):
'''
Accumulate random bit string and remove \0 bytes until the needed length
is obtained.
'''
result = []
i = 0
while i < length:
rnd = rnd.getrandbits(12*length)
s = i2osp(rnd, 3*length)
s = s.replace('\x00', '')
result.append(s)
i += len(s)
return (''.join(result))[:length]
|
rckclmbr/pyportify | pyportify/pkcs1/rsaes_oaep.py | encrypt | python | def encrypt(public_key, message, label=b'', hash_class=hashlib.sha1,
mgf=mgf.mgf1, seed=None, rnd=default_crypto_random):
'''Encrypt a byte message using a RSA public key and the OAEP wrapping
algorithm,
Parameters:
public_key - an RSA public key
message - a byte string
label - a label a per-se PKCS#1 standard
hash_class - a Python class for a message digest algorithme respecting
the hashlib interface
mgf1 - a mask generation function
seed - a seed to use instead of generating it using a random generator
rnd - a random generator class, respecting the random generator
interface from the random module, if seed is None, it is used to
generate it.
Return value:
the encrypted string of the same length as the public key
'''
hash = hash_class()
h_len = hash.digest_size
k = public_key.byte_size
max_message_length = k - 2 * h_len - 2
if len(message) > max_message_length:
raise exceptions.MessageTooLong
hash.update(label)
label_hash = hash.digest()
ps = b'\0' * int(max_message_length - len(message))
db = b''.join((label_hash, ps, b'\x01', message))
if not seed:
seed = primitives.i2osp(rnd.getrandbits(h_len*8), h_len)
db_mask = mgf(seed, k - h_len - 1, hash_class=hash_class)
masked_db = primitives.string_xor(db, db_mask)
seed_mask = mgf(masked_db, h_len, hash_class=hash_class)
masked_seed = primitives.string_xor(seed, seed_mask)
em = b''.join((b'\x00', masked_seed, masked_db))
m = primitives.os2ip(em)
c = public_key.rsaep(m)
output = primitives.i2osp(c, k)
return output | Encrypt a byte message using a RSA public key and the OAEP wrapping
algorithm,
Parameters:
public_key - an RSA public key
message - a byte string
label - a label a per-se PKCS#1 standard
hash_class - a Python class for a message digest algorithme respecting
the hashlib interface
mgf1 - a mask generation function
seed - a seed to use instead of generating it using a random generator
rnd - a random generator class, respecting the random generator
interface from the random module, if seed is None, it is used to
generate it.
Return value:
the encrypted string of the same length as the public key | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/rsaes_oaep.py#L9-L50 | [
"def i2osp(x, x_len):\n '''Converts the integer x to its big-endian representation of length\n x_len.\n '''\n if x > 256**x_len:\n raise exceptions.IntegerTooLarge\n h = hex(x)[2:]\n if h[-1] == 'L':\n h = h[:-1]\n if len(h) & 1 == 1:\n h = '0%s' % h\n x = binascii.unhexlify(h)\n return b'\\x00' * int(x_len-len(x)) + x\n",
"def mgf1(mgf_seed, mask_len, hash_class=hashlib.sha1):\n '''\n Mask Generation Function v1 from the PKCS#1 v2.0 standard.\n\n mgs_seed - the seed, a byte string\n mask_len - the length of the mask to generate\n hash_class - the digest algorithm to use, default is SHA1\n\n Return value: a pseudo-random mask, as a byte string\n '''\n h_len = hash_class().digest_size\n if mask_len > 0x10000:\n raise ValueError('mask too long')\n T = b''\n for i in range(0, integer_ceil(mask_len, h_len)):\n C = i2osp(i, 4)\n T = T + hash_class(mgf_seed + C).digest()\n return T[:mask_len]\n",
"def os2ip(x):\n '''Converts the byte string x representing an integer reprented using the\n big-endian convient to an integer.\n '''\n h = binascii.hexlify(x)\n return int(h, 16)\n",
"def string_xor(a, b):\n '''Computes the XOR operator between two byte strings. If the strings are\n of different lengths, the result string is as long as the shorter.\n '''\n if sys.version_info[0] < 3:\n return ''.join((chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)))\n else:\n return bytes(x ^ y for (x, y) in zip(a, b))\n"
] | import hashlib
from . import primitives
from . import exceptions
from . import mgf
from .defaults import default_crypto_random
def decrypt(private_key, message, label=b'', hash_class=hashlib.sha1,
mgf=mgf.mgf1):
'''Decrypt a byte message using a RSA private key and the OAEP wrapping
algorithm
Parameters:
public_key - an RSA public key
message - a byte string
label - a label a per-se PKCS#1 standard
hash_class - a Python class for a message digest algorithme respecting
the hashlib interface
mgf1 - a mask generation function
Return value:
the string before encryption (decrypted)
'''
hash = hash_class()
h_len = hash.digest_size
k = private_key.byte_size
# 1. check length
if len(message) != k or k < 2 * h_len + 2:
raise ValueError('decryption error')
# 2. RSA decryption
c = primitives.os2ip(message)
m = private_key.rsadp(c)
em = primitives.i2osp(m, k)
# 4. EME-OAEP decoding
hash.update(label)
label_hash = hash.digest()
y, masked_seed, masked_db = em[0], em[1:h_len+1], em[1+h_len:]
if y != b'\x00' and y != 0:
raise ValueError('decryption error')
seed_mask = mgf(masked_db, h_len)
seed = primitives.string_xor(masked_seed, seed_mask)
db_mask = mgf(seed, k - h_len - 1)
db = primitives.string_xor(masked_db, db_mask)
label_hash_prime, rest = db[:h_len], db[h_len:]
i = rest.find(b'\x01')
if i == -1:
raise exceptions.DecryptionError
if rest[:i].strip(b'\x00') != b'':
print(rest[:i].strip(b'\x00'))
raise exceptions.DecryptionError
m = rest[i+1:]
if label_hash_prime != label_hash:
raise exceptions.DecryptionError
return m
|
rckclmbr/pyportify | pyportify/pkcs1/rsaes_oaep.py | decrypt | python | def decrypt(private_key, message, label=b'', hash_class=hashlib.sha1,
mgf=mgf.mgf1):
'''Decrypt a byte message using a RSA private key and the OAEP wrapping
algorithm
Parameters:
public_key - an RSA public key
message - a byte string
label - a label a per-se PKCS#1 standard
hash_class - a Python class for a message digest algorithme respecting
the hashlib interface
mgf1 - a mask generation function
Return value:
the string before encryption (decrypted)
'''
hash = hash_class()
h_len = hash.digest_size
k = private_key.byte_size
# 1. check length
if len(message) != k or k < 2 * h_len + 2:
raise ValueError('decryption error')
# 2. RSA decryption
c = primitives.os2ip(message)
m = private_key.rsadp(c)
em = primitives.i2osp(m, k)
# 4. EME-OAEP decoding
hash.update(label)
label_hash = hash.digest()
y, masked_seed, masked_db = em[0], em[1:h_len+1], em[1+h_len:]
if y != b'\x00' and y != 0:
raise ValueError('decryption error')
seed_mask = mgf(masked_db, h_len)
seed = primitives.string_xor(masked_seed, seed_mask)
db_mask = mgf(seed, k - h_len - 1)
db = primitives.string_xor(masked_db, db_mask)
label_hash_prime, rest = db[:h_len], db[h_len:]
i = rest.find(b'\x01')
if i == -1:
raise exceptions.DecryptionError
if rest[:i].strip(b'\x00') != b'':
print(rest[:i].strip(b'\x00'))
raise exceptions.DecryptionError
m = rest[i+1:]
if label_hash_prime != label_hash:
raise exceptions.DecryptionError
return m | Decrypt a byte message using a RSA private key and the OAEP wrapping
algorithm
Parameters:
public_key - an RSA public key
message - a byte string
label - a label a per-se PKCS#1 standard
hash_class - a Python class for a message digest algorithme respecting
the hashlib interface
mgf1 - a mask generation function
Return value:
the string before encryption (decrypted) | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/rsaes_oaep.py#L53-L99 | [
"def i2osp(x, x_len):\n '''Converts the integer x to its big-endian representation of length\n x_len.\n '''\n if x > 256**x_len:\n raise exceptions.IntegerTooLarge\n h = hex(x)[2:]\n if h[-1] == 'L':\n h = h[:-1]\n if len(h) & 1 == 1:\n h = '0%s' % h\n x = binascii.unhexlify(h)\n return b'\\x00' * int(x_len-len(x)) + x\n",
"def os2ip(x):\n '''Converts the byte string x representing an integer reprented using the\n big-endian convient to an integer.\n '''\n h = binascii.hexlify(x)\n return int(h, 16)\n"
] | import hashlib
from . import primitives
from . import exceptions
from . import mgf
from .defaults import default_crypto_random
def encrypt(public_key, message, label=b'', hash_class=hashlib.sha1,
mgf=mgf.mgf1, seed=None, rnd=default_crypto_random):
'''Encrypt a byte message using a RSA public key and the OAEP wrapping
algorithm,
Parameters:
public_key - an RSA public key
message - a byte string
label - a label a per-se PKCS#1 standard
hash_class - a Python class for a message digest algorithme respecting
the hashlib interface
mgf1 - a mask generation function
seed - a seed to use instead of generating it using a random generator
rnd - a random generator class, respecting the random generator
interface from the random module, if seed is None, it is used to
generate it.
Return value:
the encrypted string of the same length as the public key
'''
hash = hash_class()
h_len = hash.digest_size
k = public_key.byte_size
max_message_length = k - 2 * h_len - 2
if len(message) > max_message_length:
raise exceptions.MessageTooLong
hash.update(label)
label_hash = hash.digest()
ps = b'\0' * int(max_message_length - len(message))
db = b''.join((label_hash, ps, b'\x01', message))
if not seed:
seed = primitives.i2osp(rnd.getrandbits(h_len*8), h_len)
db_mask = mgf(seed, k - h_len - 1, hash_class=hash_class)
masked_db = primitives.string_xor(db, db_mask)
seed_mask = mgf(masked_db, h_len, hash_class=hash_class)
masked_seed = primitives.string_xor(seed, seed_mask)
em = b''.join((b'\x00', masked_seed, masked_db))
m = primitives.os2ip(em)
c = public_key.rsaep(m)
output = primitives.i2osp(c, k)
return output
|
rckclmbr/pyportify | pyportify/pkcs1/keys.py | generate_key_pair | python | def generate_key_pair(size=512, number=2, rnd=default_crypto_random,
k=DEFAULT_ITERATION, primality_algorithm=None,
strict_size=True, e=0x10001):
'''Generates an RSA key pair.
size:
the bit size of the modulus, default to 512.
number:
the number of primes to use, default to 2.
rnd:
the random number generator to use, default to SystemRandom from the
random library.
k:
the number of iteration to use for the probabilistic primality
tests.
primality_algorithm:
the primality algorithm to use.
strict_size:
whether to use size as a lower bound or a strict goal.
e:
the public key exponent.
Returns the pair (public_key, private_key).
'''
primes = []
lbda = 1
bits = size // number + 1
n = 1
while len(primes) < number:
if number - len(primes) == 1:
bits = size - primitives.integer_bit_size(n) + 1
prime = get_prime(bits, rnd, k, algorithm=primality_algorithm)
if prime in primes:
continue
if e is not None and fractions.gcd(e, lbda) != 1:
continue
if (strict_size and number - len(primes) == 1 and
primitives.integer_bit_size(n*prime) != size):
continue
primes.append(prime)
n *= prime
lbda *= prime - 1
if e is None:
e = 0x10001
while e < lbda:
if fractions.gcd(e, lbda) == 1:
break
e += 2
assert 3 <= e <= n-1
public = RsaPublicKey(n, e)
private = MultiPrimeRsaPrivateKey(primes, e, blind=True, rnd=rnd)
return public, private | Generates an RSA key pair.
size:
the bit size of the modulus, default to 512.
number:
the number of primes to use, default to 2.
rnd:
the random number generator to use, default to SystemRandom from the
random library.
k:
the number of iteration to use for the probabilistic primality
tests.
primality_algorithm:
the primality algorithm to use.
strict_size:
whether to use size as a lower bound or a strict goal.
e:
the public key exponent.
Returns the pair (public_key, private_key). | train | https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/keys.py#L122-L173 | [
"def get_prime(size=128, rnd=default_crypto_random, k=DEFAULT_ITERATION,\n algorithm=None):\n '''Generate a prime number of the giver size using the is_prime() helper\n function.\n\n size - size in bits of the prime, default to 128\n rnd - a random generator to use\n k - the number of iteration to use for the probabilistic primality\n algorithms.\n algorithm - the name of the primality algorithm to use, default is the\n probabilistic Miller-Rabin algorithm.\n\n Return value: a prime number, as a long integer\n '''\n while True:\n n = rnd.getrandbits(size-2)\n n = 2 ** (size-1) + n * 2 + 1\n if is_prime(n, rnd=rnd, k=k, algorithm=algorithm):\n return n\n if algorithm == 'gmpy-miller-rabin':\n return gmpy.next_prime(n)\n",
"def integer_bit_size(n):\n '''Returns the number of bits necessary to store the integer n.'''\n if n == 0:\n return 1\n s = 0\n while n:\n s += 1\n n >>= 1\n return s\n"
] | import fractions
from . import primitives
from . import exceptions
from .defaults import default_crypto_random
from .primes import get_prime, DEFAULT_ITERATION
class RsaPublicKey(object):
__slots__ = ('n', 'e', 'bit_size', 'byte_size')
def __init__(self, n, e):
self.n = n
self.e = e
self.bit_size = primitives.integer_bit_size(n)
self.byte_size = primitives.integer_byte_size(n)
def __repr__(self):
return '<RsaPublicKey n: %d e: %d bit_size: %d>' % \
(self.n, self.e, self.bit_size)
def rsavp1(self, s):
if not (0 <= s <= self.n-1):
raise exceptions.SignatureRepresentativeOutOfRange
return self.rsaep(s)
def rsaep(self, m):
if not (0 <= m <= self.n-1):
raise exceptions.MessageRepresentativeOutOfRange
return pow(m, self.e, self.n)
class RsaPrivateKey(object):
__slots__ = ('n', 'd', 'bit_size', 'byte_size')
def __init__(self, n, d):
self.n = n
self.d = d
self.bit_size = primitives.integer_bit_size(n)
self.byte_size = primitives.integer_byte_size(n)
def __repr__(self):
return '<RsaPrivateKey n: %d d: %d bit_size: %d>' % \
(self.n, self.d, self.bit_size)
def rsadp(self, c):
if not (0 <= c <= self.n-1):
raise exceptions.CiphertextRepresentativeOutOfRange
return pow(c, self.d, self.n)
def rsasp1(self, m):
if not (0 <= m <= self.n-1):
raise exceptions.MessageRepresentativeOutOfRange
return self.rsadp(m)
class MultiPrimeRsaPrivateKey(object):
__slots__ = ('primes', 'blind', 'blind_inv', 'n', 'e', 'exponents', 'crts',
'bit_size', 'byte_size')
def __init__(self, primes, e, blind=True, rnd=default_crypto_random):
self.primes = primes
self.n = primitives.product(*primes)
self.e = e
self.bit_size = primitives.integer_bit_size(self.n)
self.byte_size = primitives.integer_byte_size(self.n)
self.exponents = []
for prime in primes:
exponent, a, b = primitives.bezout(e, prime-1)
assert b == 1
if exponent < 0:
exponent += prime-1
self.exponents.append(exponent)
self.crts = [1]
R = primes[0]
for prime in primes[1:]:
crt, a, b = primitives.bezout(R, prime)
assert b == 1
R *= prime
self.crts.append(crt)
public = RsaPublicKey(self.n, self.e)
if blind:
while True:
blind_factor = rnd.getrandbits(self.bit_size-1)
self.blind = public.rsaep(blind_factor)
u, v, gcd = primitives.bezout(blind_factor, self.n)
if gcd == 1:
self.blind_inv = u if u > 0 else u + self.n
assert (blind_factor * self.blind_inv) % self.n == 1
break
else:
self.blind = None
self.blind_inv = None
def __repr__(self):
return '<RsaPrivateKey n: %d primes: %s bit_size: %d>' % \
(self.n, self.primes, self.bit_size)
def rsadp(self, c):
if not (0 <= c <= self.n-1):
raise exceptions.CiphertextRepresentativeOutOfRange
R = 1
m = 0
if self.blind:
c = (c * self.blind) % self.n
contents = zip(self.primes, self.exponents, self.crts)
for prime, exponent, crt in contents:
m_i = primitives._pow(c, exponent, prime)
h = ((m_i - m) * crt) % prime
m += R * h
R *= prime
if self.blind_inv:
m = (m * self.blind_inv) % self.n
return m
def rsasp1(self, m):
if not (0 <= m <= self.n-1):
raise exceptions.MessageRepresentativeOutOfRange
return self.rsadp(m)
|
atztogo/phono3py | phono3py/phonon3/conductivity_LBTE.py | diagonalize_collision_matrix | python | def diagonalize_collision_matrix(collision_matrices,
i_sigma=None,
i_temp=None,
pinv_solver=0,
log_level=0):
start = time.time()
# Matrix size of collision matrix to be diagonalized.
# The following value is expected:
# ir-colmat: num_ir_grid_points * num_band * 3
# red-colmat: num_mesh_points * num_band
shape = collision_matrices.shape
if len(shape) == 6:
size = shape[2] * shape[3]
assert size == shape[4] * shape[5]
elif len(shape) == 8:
size = np.prod(shape[2:5])
assert size == np.prod(shape[5:8])
elif len(shape) == 2:
size = shape[0]
assert size == shape[1]
solver = _select_solver(pinv_solver)
# [1] dsyev: safer and slower than dsyevd and smallest memory usage
# [2] dsyevd: faster than dsyev and largest memory usage
if solver in [1, 2]:
if log_level:
routine = ['dsyev', 'dsyevd'][solver - 1]
sys.stdout.write("Diagonalizing by lapacke %s... " % routine)
sys.stdout.flush()
import phono3py._phono3py as phono3c
w = np.zeros(size, dtype='double')
if i_sigma is None:
_i_sigma = 0
else:
_i_sigma = i_sigma
if i_temp is None:
_i_temp = 0
else:
_i_temp = i_temp
phono3c.diagonalize_collision_matrix(collision_matrices,
w,
_i_sigma,
_i_temp,
0.0,
(solver + 1) % 2,
0) # only diagonalization
elif solver == 3: # np.linalg.eigh depends on dsyevd.
if log_level:
sys.stdout.write("Diagonalizing by np.linalg.eigh... ")
sys.stdout.flush()
col_mat = collision_matrices[i_sigma, i_temp].reshape(
size, size)
w, col_mat[:] = np.linalg.eigh(col_mat)
elif solver == 4: # fully scipy dsyev
if log_level:
sys.stdout.write("Diagonalizing by "
"scipy.linalg.lapack.dsyev... ")
sys.stdout.flush()
import scipy.linalg
col_mat = collision_matrices[i_sigma, i_temp].reshape(
size, size)
w, _, info = scipy.linalg.lapack.dsyev(col_mat.T, overwrite_a=1)
elif solver == 5: # fully scipy dsyevd
if log_level:
sys.stdout.write("Diagonalizing by "
"scipy.linalg.lapack.dsyevd... ")
sys.stdout.flush()
import scipy.linalg
col_mat = collision_matrices[i_sigma, i_temp].reshape(
size, size)
w, _, info = scipy.linalg.lapack.dsyevd(col_mat.T, overwrite_a=1)
if log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
return w | Diagonalize collision matrices.
Note
----
collision_matricies is overwritten by eigenvectors.
Parameters
----------
collision_matricies : ndarray, optional
Collision matrix. This ndarray has to have the following size and
flags.
shapes:
(sigmas, temperatures, prod(mesh), num_band, prod(mesh), num_band)
(sigmas, temperatures, ir_grid_points, num_band, 3,
ir_grid_points, num_band, 3)
(size, size)
dtype='double', order='C'
i_sigma : int, optional
Index of BZ integration methods, tetrahedron method and smearing
method with widths. Default is None.
i_temp : int, optional
Index of temperature. Default is None.
pinv_solver : int, optional
Diagnalization solver choice.
log_level : int, optional
Verbosity level. Smaller is more quiet. Default is 0.
Returns
-------
w : ndarray, optional
Eigenvalues.
shape=(size_of_collision_matrix,), dtype='double' | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/conductivity_LBTE.py#L609-L724 | [
"def _select_solver(pinv_solver):\n try:\n import phono3py._phono3py as phono3c\n default_solver = phono3c.default_colmat_solver()\n except ImportError:\n print(\"Phono3py C-routine is not compiled correctly.\")\n default_solver = 4\n\n solver_numbers = (1, 2, 3, 4, 5, 6)\n\n solver = pinv_solver\n if solver == 0: # default solver\n if default_solver in (4, 5, 6):\n try:\n import scipy.linalg\n except ImportError:\n solver = 1\n else:\n solver = default_solver\n else:\n solver = default_solver\n elif solver not in solver_numbers:\n solver = default_solver\n\n return solver\n"
] | import sys
import time
import numpy as np
from phonopy.phonon.degeneracy import degenerate_sets
from phono3py.phonon3.conductivity import (Conductivity, all_bands_exist,
unit_to_WmK)
from phono3py.phonon3.conductivity import write_pp as _write_pp
from phono3py.phonon3.collision_matrix import CollisionMatrix
from phono3py.phonon3.triplets import get_grid_points_by_rotations
from phono3py.file_IO import (write_kappa_to_hdf5,
write_collision_to_hdf5,
read_collision_from_hdf5,
write_collision_eigenvalues_to_hdf5,
write_unitary_matrix_to_hdf5,
read_pp_from_hdf5)
from phonopy.units import THzToEv, Kb
def get_thermal_conductivity_LBTE(
interaction,
symmetry,
temperatures=np.arange(0, 1001, 10, dtype='double'),
sigmas=None,
sigma_cutoff=None,
is_isotope=False,
mass_variances=None,
grid_points=None,
boundary_mfp=None, # in micrometre
solve_collective_phonon=False,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=1e-4, # for group velocity
is_full_pp=False,
pinv_cutoff=1.0e-8,
pinv_solver=0, # default: dsyev in lapacke
write_collision=False,
read_collision=False,
write_kappa=False,
write_pp=False,
read_pp=False,
write_LBTE_solution=False,
compression=None,
input_filename=None,
output_filename=None,
log_level=0):
if sigmas is None:
sigmas = []
if log_level:
print("-" * 19 + " Lattice thermal conducitivity (LBTE) " + "-" * 19)
print("Cutoff frequency of pseudo inversion of collision matrix: %s" %
pinv_cutoff)
if read_collision:
temps = None
else:
temps = temperatures
lbte = Conductivity_LBTE(
interaction,
symmetry,
grid_points=grid_points,
temperatures=temps,
sigmas=sigmas,
sigma_cutoff=sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
boundary_mfp=boundary_mfp,
solve_collective_phonon=solve_collective_phonon,
is_reducible_collision_matrix=is_reducible_collision_matrix,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
read_pp=read_pp,
pp_filename=input_filename,
pinv_cutoff=pinv_cutoff,
pinv_solver=pinv_solver,
log_level=log_level)
if read_collision:
read_from = _set_collision_from_file(
lbte,
indices=read_collision,
is_reducible_collision_matrix=is_reducible_collision_matrix,
filename=input_filename,
log_level=log_level)
if not read_from:
print("Reading collision failed.")
return False
if log_level:
temperatures = lbte.get_temperatures()
if len(temperatures) > 5:
text = (" %.1f " * 5 + "...") % tuple(temperatures[:5])
text += " %.1f" % temperatures[-1]
else:
text = (" %.1f " * len(temperatures)) % tuple(temperatures)
print("Temperature: " + text)
for i in lbte:
if write_pp:
_write_pp(lbte,
interaction,
i,
filename=output_filename,
compression=compression)
if write_collision:
_write_collision(
lbte,
interaction,
i=i,
is_reducible_collision_matrix=is_reducible_collision_matrix,
is_one_gp_colmat=(grid_points is not None),
filename=output_filename)
lbte.delete_gp_collision_and_pp()
# Write full collision matrix
if write_LBTE_solution:
if ((read_collision and
all_bands_exist(interaction) and
read_from == "grid_points" and
grid_points is None) or
(not read_collision)):
_write_collision(lbte, interaction, filename=output_filename)
if write_kappa:
if grid_points is None and all_bands_exist(interaction):
lbte.set_kappa_at_sigmas()
_write_kappa(
lbte,
interaction.get_primitive().get_volume(),
is_reducible_collision_matrix=is_reducible_collision_matrix,
write_LBTE_solution=write_LBTE_solution,
pinv_solver=pinv_solver,
compression=compression,
filename=output_filename,
log_level=log_level)
return lbte
def _write_collision(lbte,
interaction,
i=None,
is_reducible_collision_matrix=False,
is_one_gp_colmat=False,
filename=None):
grid_points = lbte.get_grid_points()
temperatures = lbte.get_temperatures()
sigmas = lbte.get_sigmas()
sigma_cutoff = lbte.get_sigma_cutoff_width()
gamma = lbte.get_gamma()
gamma_isotope = lbte.get_gamma_isotope()
collision_matrix = lbte.get_collision_matrix()
mesh = lbte.get_mesh_numbers()
if i is not None:
gp = grid_points[i]
if is_one_gp_colmat:
igp = 0
else:
if is_reducible_collision_matrix:
igp = gp
else:
igp = i
if all_bands_exist(interaction):
for j, sigma in enumerate(sigmas):
if gamma_isotope is not None:
gamma_isotope_at_sigma = gamma_isotope[j, igp]
else:
gamma_isotope_at_sigma = None
write_collision_to_hdf5(
temperatures,
mesh,
gamma=gamma[j, :, igp],
gamma_isotope=gamma_isotope_at_sigma,
collision_matrix=collision_matrix[j, :, igp],
grid_point=gp,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
else:
for j, sigma in enumerate(sigmas):
for k, bi in enumerate(interaction.get_band_indices()):
if gamma_isotope is not None:
gamma_isotope_at_sigma = gamma_isotope[j, igp, k]
else:
gamma_isotope_at_sigma = None
write_collision_to_hdf5(
temperatures,
mesh,
gamma=gamma[j, :, igp, k],
gamma_isotope=gamma_isotope_at_sigma,
collision_matrix=collision_matrix[j, :, igp, k],
grid_point=gp,
band_index=bi,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
else:
for j, sigma in enumerate(sigmas):
if gamma_isotope is not None:
gamma_isotope_at_sigma = gamma_isotope[j]
else:
gamma_isotope_at_sigma = None
write_collision_to_hdf5(temperatures,
mesh,
gamma=gamma[j],
gamma_isotope=gamma_isotope_at_sigma,
collision_matrix=collision_matrix[j],
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
def _write_kappa(lbte,
volume,
is_reducible_collision_matrix=False,
write_LBTE_solution=False,
pinv_solver=None,
compression=None,
filename=None,
log_level=0):
temperatures = lbte.get_temperatures()
sigmas = lbte.get_sigmas()
sigma_cutoff = lbte.get_sigma_cutoff_width()
mesh = lbte.get_mesh_numbers()
weights = lbte.get_grid_weights()
frequencies = lbte.get_frequencies()
ave_pp = lbte.get_averaged_pp_interaction()
qpoints = lbte.get_qpoints()
kappa = lbte.get_kappa()
kappa_RTA = lbte.get_kappa_RTA()
gamma = lbte.get_gamma()
gamma_isotope = lbte.get_gamma_isotope()
gv = lbte.get_group_velocities()
f_vector = lbte.get_f_vectors()
gv_by_gv = lbte.get_gv_by_gv()
mode_cv = lbte.get_mode_heat_capacities()
mode_kappa = lbte.get_mode_kappa()
mode_kappa_RTA = lbte.get_mode_kappa_RTA()
mfp = lbte.get_mean_free_path()
coleigs = lbte.get_collision_eigenvalues()
# After kappa calculation, the variable is overwritten by unitary matrix
unitary_matrix = lbte.get_collision_matrix()
if is_reducible_collision_matrix:
frequencies = lbte.get_frequencies_all()
else:
frequencies = lbte.get_frequencies()
for i, sigma in enumerate(sigmas):
if gamma_isotope is not None:
gamma_isotope_at_sigma = gamma_isotope[i]
else:
gamma_isotope_at_sigma = None
write_kappa_to_hdf5(temperatures,
mesh,
frequency=frequencies,
group_velocity=gv,
gv_by_gv=gv_by_gv,
mean_free_path=mfp[i],
heat_capacity=mode_cv,
kappa=kappa[i],
mode_kappa=mode_kappa[i],
kappa_RTA=kappa_RTA[i],
mode_kappa_RTA=mode_kappa_RTA[i],
f_vector=f_vector,
gamma=gamma[i],
gamma_isotope=gamma_isotope_at_sigma,
averaged_pp_interaction=ave_pp,
qpoint=qpoints,
weight=weights,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
kappa_unit_conversion=unit_to_WmK / volume,
compression=compression,
filename=filename,
verbose=log_level)
if coleigs is not None:
write_collision_eigenvalues_to_hdf5(temperatures,
mesh,
coleigs[i],
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=log_level)
if write_LBTE_solution:
if pinv_solver is not None:
solver = _select_solver(pinv_solver)
if solver in [1, 2, 3, 4, 5]:
write_unitary_matrix_to_hdf5(
temperatures,
mesh,
unitary_matrix=unitary_matrix,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
solver=solver,
filename=filename,
verbose=log_level)
def _set_collision_from_file(lbte,
indices='all',
is_reducible_collision_matrix=False,
filename=None,
log_level=0):
sigmas = lbte.get_sigmas()
sigma_cutoff = lbte.get_sigma_cutoff_width()
mesh = lbte.get_mesh_numbers()
grid_points = lbte.get_grid_points()
indices = indices
if len(sigmas) > 1:
gamma = []
collision_matrix = []
read_from = None
if log_level:
print("---------------------- Reading collision data from file "
"----------------------")
sys.stdout.flush()
for j, sigma in enumerate(sigmas):
collisions = read_collision_from_hdf5(mesh,
indices=indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=(log_level > 0))
if log_level:
sys.stdout.flush()
if collisions:
(colmat_at_sigma,
gamma_at_sigma,
temperatures) = collisions
if len(sigmas) == 1:
collision_matrix = colmat_at_sigma
gamma = np.zeros((1,) + gamma_at_sigma.shape,
dtype='double', order='C')
gamma[0] = gamma_at_sigma
else:
collision_matrix.append(colmat_at_sigma)
gamma.append(gamma_at_sigma)
read_from = "full_matrix"
else:
vals = _allocate_collision(True,
mesh,
sigma,
sigma_cutoff,
grid_points,
indices,
is_reducible_collision_matrix,
filename)
if vals:
colmat_at_sigma, gamma_at_sigma, temperatures = vals
else:
if log_level:
print("Collision at grid point %d doesn't exist." %
grid_points[0])
vals = _allocate_collision(False,
mesh,
sigma,
sigma_cutoff,
grid_points,
indices,
is_reducible_collision_matrix,
filename)
if vals:
colmat_at_sigma, gamma_at_sigma, temperatures = vals
else:
if log_level:
print("Collision at (grid point %d, band index %d) "
"doesn't exist." % (grid_points[0], 1))
return False
for i, gp in enumerate(grid_points):
if not _collect_collision_gp(colmat_at_sigma,
gamma_at_sigma,
temperatures,
mesh,
sigma,
sigma_cutoff,
i,
gp,
indices,
is_reducible_collision_matrix,
filename,
log_level):
num_band = colmat_at_sigma.shape[3]
for j in range(num_band):
if not _collect_collision_band(
colmat_at_sigma,
gamma_at_sigma,
temperatures,
mesh,
sigma,
sigma_cutoff,
i,
gp,
j,
indices,
is_reducible_collision_matrix,
filename,
log_level):
return False
if len(sigmas) == 1:
gamma = gamma_at_sigma
collision_matrix = colmat_at_sigma
else:
gamma.append(gamma_at_sigma[0])
collision_matrix.append(colmat_at_sigma[0])
read_from = "grid_points"
if len(sigmas) > 1:
temperatures = np.array(temperatures, dtype='double', order='C')
gamma = np.array(gamma, dtype='double', order='C')
collision_matrix = np.array(collision_matrix,
dtype='double', order='C')
lbte.set_gamma(gamma)
lbte.set_collision_matrix(collision_matrix)
# lbte.set_temperatures invokes allocation of arrays. So this must
# be called after setting collision_matrix for saving memory
# space.
lbte.set_temperatures(temperatures)
return read_from
def _allocate_collision(for_gps,
mesh,
sigma,
sigma_cutoff,
grid_points,
indices,
is_reducible_collision_matrix,
filename):
num_mesh_points = np.prod(mesh)
if for_gps:
collision = read_collision_from_hdf5(mesh,
indices=indices,
grid_point=grid_points[0],
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=False)
else:
collision = read_collision_from_hdf5(mesh,
indices=indices,
grid_point=grid_points[0],
band_index=0,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=False)
if collision is None:
return False
num_temp = len(collision[2]) # This is to treat indices="all".
if is_reducible_collision_matrix:
if for_gps:
num_band = collision[0].shape[4] # for gps (s,T,b,irgp,b)
else:
num_band = collision[0].shape[3] # for bands (s,T,irgp,b)
gamma_at_sigma = np.zeros(
(1, num_temp, num_mesh_points, num_band),
dtype='double', order='C')
colmat_at_sigma = np.zeros(
(1, num_temp,
num_mesh_points, num_band,
num_mesh_points, num_band),
dtype='double', order='C')
else:
if for_gps:
num_band = collision[0].shape[5] # for gps (s,T,b0,3,irgp,b,3)
else:
num_band = collision[0].shape[4] # for bands (s,T,3,irgp,b,3)
gamma_at_sigma = np.zeros(
(1, num_temp, len(grid_points), num_band),
dtype='double', order='C')
colmat_at_sigma = np.zeros(
(1, num_temp,
len(grid_points), num_band, 3,
len(grid_points), num_band, 3),
dtype='double', order='C')
temperatures = np.zeros(num_temp, dtype='double', order='C')
return colmat_at_sigma, gamma_at_sigma, temperatures
def _collect_collision_gp(colmat_at_sigma,
gamma_at_sigma,
temperatures,
mesh,
sigma,
sigma_cutoff,
i,
gp,
indices,
is_reducible_collision_matrix,
filename,
log_level):
collision_gp = read_collision_from_hdf5(
mesh,
indices=indices,
grid_point=gp,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=(log_level > 0))
if log_level:
sys.stdout.flush()
if not collision_gp:
return False
(colmat_at_gp,
gamma_at_gp,
temperatures_at_gp) = collision_gp
if is_reducible_collision_matrix:
igp = gp
else:
igp = i
gamma_at_sigma[0, :, igp] = gamma_at_gp
colmat_at_sigma[0, :, igp] = colmat_at_gp[0]
temperatures[:] = temperatures_at_gp
return True
def _collect_collision_band(colmat_at_sigma,
gamma_at_sigma,
temperatures,
mesh,
sigma,
sigma_cutoff,
i,
gp,
j,
indices,
is_reducible_collision_matrix,
filename,
log_level):
collision_band = read_collision_from_hdf5(
mesh,
indices=indices,
grid_point=gp,
band_index=j,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=(log_level > 0))
if log_level:
sys.stdout.flush()
if collision_band is False:
return False
(colmat_at_band,
gamma_at_band,
temperatures_at_band) = collision_band
if is_reducible_collision_matrix:
igp = gp
else:
igp = i
gamma_at_sigma[0, :, igp, j] = gamma_at_band
colmat_at_sigma[0, :, igp, j] = colmat_at_band[0]
temperatures[:] = temperatures_at_band
return True
def _select_solver(pinv_solver):
try:
import phono3py._phono3py as phono3c
default_solver = phono3c.default_colmat_solver()
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
default_solver = 4
solver_numbers = (1, 2, 3, 4, 5, 6)
solver = pinv_solver
if solver == 0: # default solver
if default_solver in (4, 5, 6):
try:
import scipy.linalg
except ImportError:
solver = 1
else:
solver = default_solver
else:
solver = default_solver
elif solver not in solver_numbers:
solver = default_solver
return solver
class Conductivity_LBTE(Conductivity):
def __init__(self,
interaction,
symmetry,
grid_points=None,
temperatures=None,
sigmas=None,
sigma_cutoff=None,
is_isotope=False,
mass_variances=None,
boundary_mfp=None, # in micrometre
solve_collective_phonon=False,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=None, # finite difference for group veolocity
is_full_pp=False,
read_pp=False,
pp_filename=None,
pinv_cutoff=1.0e-8,
pinv_solver=0,
log_level=0):
self._pp = None
self._temperatures = None
self._sigmas = None
self._sigma_cutoff = None
self._is_kappa_star = None
self._gv_delta_q = None
self._is_full_pp = None
self._log_level = None
self._primitive = None
self._dm = None
self._frequency_factor_to_THz = None
self._cutoff_frequency = None
self._boundary_mfp = None
self._symmetry = None
self._point_operations = None
self._rotations_cartesian = None
self._grid_points = None
self._grid_weights = None
self._grid_address = None
self._ir_grid_points = None
self._ir_grid_weights = None
self._kappa = None
self._mode_kappa = None
self._kappa_RTA = None
self._mode_kappa_RTA = None
self._read_gamma = False
self._read_gamma_iso = False
self._frequencies = None
self._cv = None
self._gv = None
self._f_vectors = None
self._gv_sum2 = None
self._mfp = None
self._gamma = None
self._gamma_iso = None
self._averaged_pp_interaction = None
self._mesh = None
self._conversion_factor = None
self._is_isotope = None
self._isotope = None
self._mass_variances = None
self._grid_point_count = None
self._collision_eigenvalues = None
Conductivity.__init__(self,
interaction,
symmetry,
grid_points=grid_points,
temperatures=temperatures,
sigmas=sigmas,
sigma_cutoff=sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
boundary_mfp=boundary_mfp,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
log_level=log_level)
self._is_reducible_collision_matrix = is_reducible_collision_matrix
self._solve_collective_phonon = solve_collective_phonon
if not self._is_kappa_star:
self._is_reducible_collision_matrix = True
self._collision_matrix = None
self._read_pp = read_pp
self._pp_filename = pp_filename
self._pinv_cutoff = pinv_cutoff
self._pinv_solver = pinv_solver
if grid_points is None:
self._all_grid_points = True
else:
self._all_grid_points = False
if self._temperatures is not None:
self._allocate_values()
def set_kappa_at_sigmas(self):
if len(self._grid_points) != len(self._ir_grid_points):
print("Collision matrix is not well created.")
import sys
sys.exit(1)
else:
self._set_kappa_at_sigmas()
def set_collision_matrix(self, collision_matrix):
self._collision_matrix = collision_matrix
def get_f_vectors(self):
return self._f_vectors
def get_collision_matrix(self):
return self._collision_matrix
def get_collision_eigenvalues(self):
return self._collision_eigenvalues
def get_mean_free_path(self):
return self._mfp
def get_frequencies_all(self):
return self._frequencies[:np.prod(self._mesh)]
def get_kappa_RTA(self):
return self._kappa_RTA
def get_mode_kappa_RTA(self):
return self._mode_kappa_RTA
def delete_gp_collision_and_pp(self):
self._collision.delete_integration_weights()
self._pp.delete_interaction_strength()
def _run_at_grid_point(self):
i = self._grid_point_count
self._show_log_header(i)
gp = self._grid_points[i]
if not self._all_grid_points:
self._collision_matrix[:] = 0
if not self._read_gamma:
self._collision.set_grid_point(gp)
if self._log_level:
print("Number of triplets: %d" %
len(self._pp.get_triplets_at_q()[0]))
self._set_collision_matrix_at_sigmas(i)
if self._is_reducible_collision_matrix:
igp = gp
else:
igp = i
self._set_harmonic_properties(i, igp)
if self._isotope is not None:
gamma_iso = self._get_gamma_isotope_at_sigmas(i)
band_indices = self._pp.get_band_indices()
self._gamma_iso[:, igp, :] = gamma_iso[:, band_indices]
if self._log_level:
self._show_log(i)
def _allocate_values(self):
num_band0 = len(self._pp.get_band_indices())
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
num_temp = len(self._temperatures)
num_mesh_points = np.prod(self._mesh)
if self._is_reducible_collision_matrix:
num_grid_points = num_mesh_points
else:
num_grid_points = len(self._grid_points)
if self._all_grid_points:
num_stored_grid_points = num_grid_points
else:
num_stored_grid_points = 1
self._kappa = np.zeros((len(self._sigmas), num_temp, 6),
dtype='double', order='C')
self._kappa_RTA = np.zeros((len(self._sigmas), num_temp, 6),
dtype='double', order='C')
self._gv = np.zeros((num_grid_points, num_band0, 3),
dtype='double', order='C')
self._f_vectors = np.zeros((num_grid_points, num_band0, 3),
dtype='double', order='C')
self._gv_sum2 = np.zeros((num_grid_points, num_band0, 6),
dtype='double', order='C')
self._mfp = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
3), dtype='double', order='C')
self._cv = np.zeros((num_temp, num_grid_points, num_band0),
dtype='double', order='C')
if self._is_full_pp:
self._averaged_pp_interaction = np.zeros(
(num_grid_points, num_band0), dtype='double', order='C')
if self._gamma is None:
self._gamma = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0), dtype='double', order='C')
if self._isotope is not None:
self._gamma_iso = np.zeros((len(self._sigmas),
num_grid_points,
num_band0), dtype='double', order='C')
if self._is_reducible_collision_matrix:
self._mode_kappa = np.zeros((len(self._sigmas),
num_temp,
num_mesh_points,
num_band,
6), dtype='double', order='C')
self._mode_kappa_RTA = np.zeros((len(self._sigmas),
num_temp,
num_mesh_points,
num_band,
6), dtype='double', order='C')
self._collision = CollisionMatrix(
self._pp,
is_reducible_collision_matrix=True,
log_level=self._log_level)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(len(self._sigmas), num_temp,
num_stored_grid_points, num_band0,
num_mesh_points, num_band),
dtype='double', order='C')
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas), num_temp, num_mesh_points * num_band),
dtype='double', order='C')
else:
self._mode_kappa = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
6), dtype='double')
self._mode_kappa_RTA = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
6), dtype='double')
self._rot_grid_points = np.zeros(
(len(self._ir_grid_points), len(self._point_operations)),
dtype='uintp')
for i, ir_gp in enumerate(self._ir_grid_points):
self._rot_grid_points[i] = get_grid_points_by_rotations(
self._grid_address[ir_gp],
self._point_operations,
self._mesh)
self._collision = CollisionMatrix(
self._pp,
point_operations=self._point_operations,
ir_grid_points=self._ir_grid_points,
rot_grid_points=self._rot_grid_points,
log_level=self._log_level)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(len(self._sigmas),
num_temp,
num_stored_grid_points, num_band0, 3,
num_ir_grid_points, num_band, 3),
dtype='double', order='C')
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas),
num_temp,
num_ir_grid_points * num_band * 3),
dtype='double', order='C')
def _set_collision_matrix_at_sigmas(self, i):
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "Calculating collision matrix with "
if sigma is None:
text += "tetrahedron method."
else:
text += "sigma=%s" % sigma
if self._sigma_cutoff is None:
text += "."
else:
text += "(%4.2f SD)." % self._sigma_cutoff
print(text)
self._collision.set_sigma(sigma, sigma_cutoff=self._sigma_cutoff)
self._collision.set_integration_weights()
if self._read_pp:
pp, _g_zero = read_pp_from_hdf5(
self._mesh,
grid_point=self._grid_points[i],
sigma=sigma,
sigma_cutoff=self._sigma_cutoff,
filename=self._pp_filename,
verbose=(self._log_level > 0))
_, g_zero = self._collision.get_integration_weights()
if self._log_level:
if len(self._sigmas) > 1:
print("Multiple sigmas or mixing smearing and "
"tetrahedron method is not supported.")
if _g_zero is not None and (_g_zero != g_zero).any():
raise ValueError("Inconsistency found in g_zero.")
self._collision.set_interaction_strength(pp)
elif j != 0 and (self._is_full_pp or self._sigma_cutoff is None):
if self._log_level:
print("Existing ph-ph interaction is used.")
else:
if self._log_level:
print("Calculating ph-ph interaction...")
self._collision.run_interaction(is_full_pp=self._is_full_pp)
if self._is_full_pp and j == 0:
self._averaged_pp_interaction[i] = (
self._pp.get_averaged_interaction())
for k, t in enumerate(self._temperatures):
self._collision.set_temperature(t)
self._collision.run()
if self._all_grid_points:
if self._is_reducible_collision_matrix:
i_data = self._grid_points[i]
else:
i_data = i
else:
i_data = 0
self._gamma[j, k, i_data] = (
self._collision.get_imag_self_energy())
self._collision_matrix[j, k, i_data] = (
self._collision.get_collision_matrix())
def _set_kappa_at_sigmas(self):
if self._is_reducible_collision_matrix:
if self._is_kappa_star:
self._average_collision_matrix_by_degeneracy()
self._expand_collisions()
self._combine_reducible_collisions()
weights = np.ones(np.prod(self._mesh), dtype='intc')
self._symmetrize_collision_matrix()
else:
self._combine_collisions()
weights = self._get_weights()
for i, w_i in enumerate(weights):
for j, w_j in enumerate(weights):
self._collision_matrix[:, :, i, :, :, j, :, :] *= w_i * w_j
self._average_collision_matrix_by_degeneracy()
self._symmetrize_collision_matrix()
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "----------- Thermal conductivity (W/m-k) "
if sigma:
text += "for sigma=%s -----------" % sigma
else:
text += "with tetrahedron method -----------"
print(text)
sys.stdout.flush()
for k, t in enumerate(self._temperatures):
if t > 0:
self._set_kappa_RTA(j, k, weights)
w = diagonalize_collision_matrix(
self._collision_matrix,
i_sigma=j, i_temp=k,
pinv_solver=self._pinv_solver,
log_level=self._log_level)
self._collision_eigenvalues[j, k] = w
self._set_kappa(j, k, weights)
if self._log_level:
print(("#%6s " + " %-10s" * 6) %
("T(K)", "xx", "yy", "zz", "yz", "xz", "xy"))
print(("%7.1f " + " %10.3f" * 6) %
((t,) + tuple(self._kappa[j, k])))
print((" %6s " + " %10.3f" * 6) %
(("(RTA)",) + tuple(self._kappa_RTA[j, k])))
print("-" * 76)
sys.stdout.flush()
sys.stdout.flush()
if self._log_level:
print('')
def _combine_collisions(self):
num_band = self._primitive.get_number_of_atoms() * 3
for j, k in list(np.ndindex((len(self._sigmas),
len(self._temperatures)))):
for i, ir_gp in enumerate(self._ir_grid_points):
for r, r_gp in zip(self._rotations_cartesian,
self._rot_grid_points[i]):
if ir_gp != r_gp:
continue
main_diagonal = self._get_main_diagonal(i, j, k)
for l in range(num_band):
self._collision_matrix[
j, k, i, l, :, i, l, :] += main_diagonal[l] * r
def _combine_reducible_collisions(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_mesh_points = np.prod(self._mesh)
for j, k in list(
np.ndindex((len(self._sigmas), len(self._temperatures)))):
for i in range(num_mesh_points):
main_diagonal = self._get_main_diagonal(i, j, k)
for l in range(num_band):
self._collision_matrix[
j, k, i, l, i, l] += main_diagonal[l]
def _expand_collisions(self):
start = time.time()
if self._log_level:
sys.stdout.write("- Expanding properties to all grid points ")
sys.stdout.flush()
num_mesh_points = np.prod(self._mesh)
num_rot = len(self._point_operations)
rot_grid_points = np.zeros((num_rot, num_mesh_points), dtype='uintp')
for i in range(num_mesh_points):
rot_grid_points[:, i] = get_grid_points_by_rotations(
self._grid_address[i],
self._point_operations,
self._mesh)
try:
import phono3py._phono3py as phono3c
phono3c.expand_collision_matrix(self._collision_matrix,
self._ir_grid_points,
rot_grid_points)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
for i, ir_gp in enumerate(self._ir_grid_points):
multi = (rot_grid_points[:, ir_gp] == ir_gp).sum()
colmat_irgp = self._collision_matrix[:, :, ir_gp, :, :, :].copy()
colmat_irgp /= multi
self._collision_matrix[:, :, ir_gp, :, :, :] = 0
for j, r in enumerate(self._rotations_cartesian):
gp_r = rot_grid_points[j, ir_gp]
for k in range(num_mesh_points):
gp_c = rot_grid_points[j, k]
self._collision_matrix[:, :, gp_r, :, gp_c, :] += (
colmat_irgp[:, :, :, k, :])
for i, ir_gp in enumerate(self._ir_grid_points):
gv_irgp = self._gv[ir_gp].copy()
self._gv[ir_gp] = 0
cv_irgp = self._cv[:, ir_gp, :].copy()
self._cv[:, ir_gp, :] = 0
gamma_irgp = self._gamma[:, :, ir_gp, :].copy()
self._gamma[:, :, ir_gp, :] = 0
multi = (rot_grid_points[:, ir_gp] == ir_gp).sum()
if self._gamma_iso is not None:
gamma_iso_irgp = self._gamma_iso[:, ir_gp, :].copy()
self._gamma_iso[:, ir_gp, :] = 0
for j, r in enumerate(self._rotations_cartesian):
gp_r = rot_grid_points[j, ir_gp]
self._gamma[:, :, gp_r, :] += gamma_irgp / multi
if self._gamma_iso is not None:
self._gamma_iso[:, gp_r, :] += gamma_iso_irgp / multi
self._gv[gp_r] += np.dot(gv_irgp, r.T) / multi
self._cv[:, gp_r, :] += cv_irgp / multi
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _get_weights(self):
"""Returns weights used for collision matrix and |X> and |f>
self._rot_grid_points : ndarray
shape=(ir_grid_points, point_operations), dtype='uintp'
r_gps : grid points of arms of k-star with duplicates
len(r_gps) == order of crystallographic point group
len(unique(r_gps)) == number of arms of the k-star
Returns
-------
weights : list
sqrt(g_k)/|g|, where g is the crystallographic point group and
g_k is the number of arms of k-star.
"""
weights = []
n = float(self._rot_grid_points.shape[1])
for r_gps in self._rot_grid_points:
weights.append(np.sqrt(len(np.unique(r_gps)) / n))
return weights
def _symmetrize_collision_matrix(self):
start = time.time()
try:
import phono3py._phono3py as phono3c
if self._log_level:
sys.stdout.write("- Making collision matrix symmetric "
"(built-in) ")
sys.stdout.flush()
phono3c.symmetrize_collision_matrix(self._collision_matrix)
except ImportError:
if self._log_level:
sys.stdout.write("- Making collision matrix symmetric "
"(numpy) ")
sys.stdout.flush()
if self._is_reducible_collision_matrix:
size = np.prod(self._collision_matrix.shape[2:4])
else:
size = np.prod(self._collision_matrix.shape[2:5])
for i in range(self._collision_matrix.shape[0]):
for j in range(self._collision_matrix.shape[1]):
col_mat = self._collision_matrix[i, j].reshape(size, size)
col_mat += col_mat.T
col_mat /= 2
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _average_collision_matrix_by_degeneracy(self):
start = time.time()
# Average matrix elements belonging to degenerate bands
if self._log_level:
sys.stdout.write("- Averaging collision matrix elements "
"by phonon degeneracy ")
sys.stdout.flush()
col_mat = self._collision_matrix
for i, gp in enumerate(self._ir_grid_points):
freqs = self._frequencies[gp]
deg_sets = degenerate_sets(freqs)
for dset in deg_sets:
bi_set = []
for j in range(len(freqs)):
if j in dset:
bi_set.append(j)
if self._is_reducible_collision_matrix:
sum_col = (col_mat[:, :, gp, bi_set, :, :].sum(axis=2) /
len(bi_set))
for j in bi_set:
col_mat[:, :, gp, j, :, :] = sum_col
else:
sum_col = (
col_mat[:, :, i, bi_set, :, :, :, :].sum(axis=2) /
len(bi_set))
for j in bi_set:
col_mat[:, :, i, j, :, :, :, :] = sum_col
for i, gp in enumerate(self._ir_grid_points):
freqs = self._frequencies[gp]
deg_sets = degenerate_sets(freqs)
for dset in deg_sets:
bi_set = []
for j in range(len(freqs)):
if j in dset:
bi_set.append(j)
if self._is_reducible_collision_matrix:
sum_col = (col_mat[:, :, :, :, gp, bi_set].sum(axis=4) /
len(bi_set))
for j in bi_set:
col_mat[:, :, :, :, gp, j] = sum_col
else:
sum_col = (
col_mat[:, :, :, :, :, i, bi_set, :].sum(axis=5) /
len(bi_set))
for j in bi_set:
col_mat[:, :, :, :, :, i, j, :] = sum_col
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _get_X(self, i_temp, weights, gv):
num_band = self._primitive.get_number_of_atoms() * 3
X = gv.copy()
if self._is_reducible_collision_matrix:
num_mesh_points = np.prod(self._mesh)
freqs = self._frequencies[:num_mesh_points]
else:
freqs = self._frequencies[self._ir_grid_points]
t = self._temperatures[i_temp]
sinh = np.where(freqs > self._cutoff_frequency,
np.sinh(freqs * THzToEv / (2 * Kb * t)),
-1.0)
inv_sinh = np.where(sinh > 0, 1.0 / sinh, 0)
freqs_sinh = freqs * THzToEv * inv_sinh / (4 * Kb * t ** 2)
for i, f in enumerate(freqs_sinh):
X[i] *= weights[i]
for j in range(num_band):
X[i, j] *= f[j]
if t > 0:
return X.reshape(-1, 3)
else:
return np.zeros_like(X.reshape(-1, 3))
def _get_Y(self, i_sigma, i_temp, weights, X):
solver = _select_solver(self._pinv_solver)
num_band = self._primitive.get_number_of_atoms() * 3
if self._is_reducible_collision_matrix:
num_grid_points = np.prod(self._mesh)
size = num_grid_points * num_band
else:
num_grid_points = len(self._ir_grid_points)
size = num_grid_points * num_band * 3
v = self._collision_matrix[i_sigma, i_temp].reshape(size, size)
# Transpose eigvecs because colmat was solved by column major order
if solver in [1, 2, 4, 5]:
v = v.T
start = time.time()
if solver in [0, 1, 2, 3, 4, 5]:
if self._log_level:
sys.stdout.write("Calculating pseudo-inv with cutoff=%-.1e "
"(np.dot) " % self._pinv_cutoff)
sys.stdout.flush()
e = self._get_eigvals_pinv(i_sigma, i_temp)
if self._is_reducible_collision_matrix:
X1 = np.dot(v.T, X)
for i in range(3):
X1[:, i] *= e
Y = np.dot(v, X1)
else:
Y = np.dot(v, e * np.dot(v.T, X.ravel())).reshape(-1, 3)
else: # solver=6 This is slower as far as tested.
import phono3py._phono3py as phono3c
if self._log_level:
sys.stdout.write("Calculating pseudo-inv with cutoff=%-.1e "
"(built-in) " % self._pinv_cutoff)
sys.stdout.flush()
w = self._collision_eigenvalues[i_sigma, i_temp]
phono3c.pinv_from_eigensolution(self._collision_matrix,
w,
i_sigma,
i_temp,
self._pinv_cutoff,
0)
if self._is_reducible_collision_matrix:
Y = np.dot(v, X)
else:
Y = np.dot(v, X.ravel()).reshape(-1, 3)
self._set_f_vectors(Y, num_grid_points, weights)
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
return Y
def _set_f_vectors(self, Y, num_grid_points, weights):
# Collision matrix is half of that defined in Chaput's paper.
# Therefore Y is divided by 2.
num_band = self._primitive.get_number_of_atoms() * 3
self._f_vectors[:] = ((Y / 2).reshape(num_grid_points, num_band * 3).T
/ weights).T.reshape(self._f_vectors.shape)
def _get_eigvals_pinv(self, i_sigma, i_temp):
w = self._collision_eigenvalues[i_sigma, i_temp]
e = np.zeros_like(w)
for l, val in enumerate(w):
if abs(val) > self._pinv_cutoff:
e[l] = 1 / val
return e
def _get_I(self, a, b, size, plus_transpose=True):
"""Return I matrix in Chaput's PRL paper.
None is returned if I is zero matrix.
"""
r_sum = np.zeros((3, 3), dtype='double', order='C')
for r in self._rotations_cartesian:
for i in range(3):
for j in range(3):
r_sum[i, j] += r[a, i] * r[b, j]
if plus_transpose:
r_sum += r_sum.T
# Return None not to consume computer for diagonalization
if (np.abs(r_sum) < 1e-10).all():
return None
# Same as np.kron(np.eye(size), r_sum), but writen as below
# to be sure the values in memory C-congiguous with 'double'.
I_mat = np.zeros((3 * size, 3 * size), dtype='double', order='C')
for i in range(size):
I_mat[(i * 3):((i + 1) * 3), (i * 3):((i + 1) * 3)] = r_sum
return I_mat
def _set_kappa(self, i_sigma, i_temp, weights):
N = self._num_sampling_grid_points
if self._is_reducible_collision_matrix:
X = self._get_X(i_temp, weights, self._gv)
num_mesh_points = np.prod(self._mesh)
Y = self._get_Y(i_sigma, i_temp, weights, X)
self._set_mean_free_path(i_sigma, i_temp, weights, Y)
# Putting self._rotations_cartesian is to symmetrize kappa.
# None can be put instead for watching pure information.
self._set_mode_kappa(self._mode_kappa,
X,
Y,
num_mesh_points,
self._rotations_cartesian,
i_sigma,
i_temp)
self._mode_kappa[i_sigma, i_temp] /= len(self._rotations_cartesian)
self._kappa[i_sigma, i_temp] = (
self._mode_kappa[i_sigma, i_temp].sum(axis=0).sum(axis=0) / N)
else:
if self._solve_collective_phonon:
self._set_mode_kappa_Chaput(i_sigma, i_temp, weights)
else:
X = self._get_X(i_temp, weights, self._gv)
num_ir_grid_points = len(self._ir_grid_points)
Y = self._get_Y(i_sigma, i_temp, weights, X)
self._set_mean_free_path(i_sigma, i_temp, weights, Y)
self._set_mode_kappa(self._mode_kappa,
X,
Y,
num_ir_grid_points,
self._rotations_cartesian,
i_sigma,
i_temp)
# self._set_mode_kappa_from_mfp(weights,
# num_ir_grid_points,
# self._rotations_cartesian,
# i_sigma,
# i_temp)
self._kappa[i_sigma, i_temp] = (
self._mode_kappa[i_sigma, i_temp].sum(axis=0).sum(axis=0) / N)
def _set_kappa_RTA(self, i_sigma, i_temp, weights):
N = self._num_sampling_grid_points
num_band = self._primitive.get_number_of_atoms() * 3
X = self._get_X(i_temp, weights, self._gv)
Y = np.zeros_like(X)
if self._is_reducible_collision_matrix:
# This RTA is not equivalent to conductivity_RTA.
# The lifetime is defined from the diagonal part of
# collision matrix.
num_mesh_points = np.prod(self._mesh)
size = num_mesh_points * num_band
v_diag = np.diagonal(
self._collision_matrix[i_sigma, i_temp].reshape(size, size))
for gp in range(num_mesh_points):
frequencies = self._frequencies[gp]
for j, f in enumerate(frequencies):
if f > self._cutoff_frequency:
i_mode = gp * num_band + j
Y[i_mode, :] = X[i_mode, :] / v_diag[i_mode]
# Putting self._rotations_cartesian is to symmetrize kappa.
# None can be put instead for watching pure information.
self._set_mode_kappa(self._mode_kappa_RTA,
X,
Y,
num_mesh_points,
self._rotations_cartesian,
i_sigma,
i_temp)
g = len(self._rotations_cartesian)
self._mode_kappa_RTA[i_sigma, i_temp] /= g
self._kappa_RTA[i_sigma, i_temp] = (
self._mode_kappa_RTA[i_sigma, i_temp].sum(axis=0).sum(axis=0) /
N)
else:
# This RTA is supposed to be the same as conductivity_RTA.
num_ir_grid_points = len(self._ir_grid_points)
size = num_ir_grid_points * num_band * 3
for i, gp in enumerate(self._ir_grid_points):
g = self._get_main_diagonal(i, i_sigma, i_temp)
frequencies = self._frequencies[gp]
for j, f in enumerate(frequencies):
if f > self._cutoff_frequency:
i_mode = i * num_band + j
old_settings = np.seterr(all='raise')
try:
Y[i_mode, :] = X[i_mode, :] / g[j]
except:
print("=" * 26 + " Warning " + "=" * 26)
print(" Unexpected physical condition of ph-ph "
"interaction calculation was found.")
print(" g[j]=%f at gp=%d, band=%d, freq=%f" %
(g[j], gp, j + 1, f))
print("=" * 61)
np.seterr(**old_settings)
self._set_mode_kappa(self._mode_kappa_RTA,
X,
Y,
num_ir_grid_points,
self._rotations_cartesian,
i_sigma,
i_temp)
self._kappa_RTA[i_sigma, i_temp] = (
self._mode_kappa_RTA[i_sigma, i_temp].sum(axis=0).sum(axis=0) /
N)
def _set_mode_kappa(self,
mode_kappa,
X,
Y,
num_grid_points,
rotations_cartesian,
i_sigma,
i_temp):
num_band = self._primitive.get_number_of_atoms() * 3
for i, (v_gp, f_gp) in enumerate(zip(X.reshape(num_grid_points,
num_band, 3),
Y.reshape(num_grid_points,
num_band, 3))):
for j, (v, f) in enumerate(zip(v_gp, f_gp)):
# Do not consider three lowest modes at Gamma-point
# It is assumed that there are no imaginary modes.
if (self._grid_address[i] == 0).all() and j < 3:
continue
if rotations_cartesian is None:
sum_k = np.outer(v, f)
else:
sum_k = np.zeros((3, 3), dtype='double')
for r in rotations_cartesian:
sum_k += np.outer(np.dot(r, v), np.dot(r, f))
sum_k = sum_k + sum_k.T
for k, vxf in enumerate(
((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))):
mode_kappa[i_sigma, i_temp, i, j, k] = sum_k[vxf]
t = self._temperatures[i_temp]
# Collision matrix is half of that defined in Chaput's paper.
# Therefore here 2 is not necessary multiplied.
# sum_k = sum_k + sum_k.T is equivalent to I(a,b) + I(b,a).
mode_kappa[i_sigma, i_temp] *= self._conversion_factor * Kb * t ** 2
def _set_mode_kappa_Chaput(self, i_sigma, i_temp, weights):
"""Calculate mode kappa by the way in Laurent Chaput's PRL paper.
This gives the different result from _set_mode_kappa and requires more
memory space.
"""
X = self._get_X(i_temp, weights, self._gv).ravel()
num_ir_grid_points = len(self._ir_grid_points)
num_band = self._primitive.get_number_of_atoms() * 3
size = num_ir_grid_points * num_band * 3
v = self._collision_matrix[i_sigma, i_temp].reshape(size, size)
solver = _select_solver(self._pinv_solver)
if solver in [1, 2, 4, 5]:
v = v.T
e = self._get_eigvals_pinv(i_sigma, i_temp)
t = self._temperatures[i_temp]
omega_inv = np.empty(v.shape, dtype='double', order='C')
np.dot(v, (e * v).T, out=omega_inv)
Y = np.dot(omega_inv, X)
self._set_f_vectors(Y, num_ir_grid_points, weights)
elems = ((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))
for i, vxf in enumerate(elems):
mat = self._get_I(vxf[0], vxf[1], num_ir_grid_points * num_band)
self._mode_kappa[i_sigma, i_temp, :, :, i] = 0
if mat is not None:
np.dot(mat, omega_inv, out=mat)
# vals = (X ** 2 * np.diag(mat)).reshape(-1, 3).sum(axis=1)
# vals = vals.reshape(num_ir_grid_points, num_band)
# self._mode_kappa[i_sigma, i_temp, :, :, i] = vals
w = diagonalize_collision_matrix(mat,
pinv_solver=self._pinv_solver,
log_level=self._log_level)
if solver in [1, 2, 4, 5]:
mat = mat.T
spectra = np.dot(mat.T, X) ** 2 * w
for s, eigvec in zip(spectra, mat.T):
vals = s * (eigvec ** 2).reshape(-1, 3).sum(axis=1)
vals = vals.reshape(num_ir_grid_points, num_band)
self._mode_kappa[i_sigma, i_temp, :, :, i] += vals
factor = self._conversion_factor * Kb * t ** 2
self._mode_kappa[i_sigma, i_temp] *= factor
def _set_mode_kappa_from_mfp(self,
weights,
num_grid_points,
rotations_cartesian,
i_sigma,
i_temp):
for i, (v_gp, mfp_gp, cv_gp) in enumerate(
zip(self._gv, self._mfp[i_sigma, i_temp], self._cv[i_temp])):
for j, (v, mfp, cv) in enumerate(zip(v_gp, mfp_gp, cv_gp)):
sum_k = np.zeros((3, 3), dtype='double')
for r in rotations_cartesian:
sum_k += np.outer(np.dot(r, v), np.dot(r, mfp))
sum_k = (sum_k + sum_k.T) / 2 * cv * weights[i] ** 2 * 2 * np.pi
for k, vxf in enumerate(
((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))):
self._mode_kappa[i_sigma, i_temp, i, j, k] = sum_k[vxf]
self._mode_kappa *= - self._conversion_factor
def _set_mean_free_path(self, i_sigma, i_temp, weights, Y):
t = self._temperatures[i_temp]
# shape = (num_grid_points, num_band, 3),
for i, f_gp in enumerate(self._f_vectors):
for j, f in enumerate(f_gp):
cv = self._cv[i_temp, i, j]
if cv < 1e-10:
continue
self._mfp[i_sigma, i_temp, i, j] = (
- 2 * t * np.sqrt(Kb / cv) * f / (2 * np.pi))
def _show_log(self, i):
gp = self._grid_points[i]
frequencies = self._frequencies[gp]
if self._is_reducible_collision_matrix:
gv = self._gv[gp]
else:
gv = self._gv[i]
if self._is_full_pp:
ave_pp = self._averaged_pp_interaction[i]
text = "Frequency group velocity (x, y, z) |gv| Pqj"
else:
text = "Frequency group velocity (x, y, z) |gv|"
if self._gv_delta_q is None:
pass
else:
text += " (dq=%3.1e)" % self._gv_delta_q
print(text)
if self._is_full_pp:
for f, v, pp in zip(frequencies, gv, ave_pp):
print("%8.3f (%8.3f %8.3f %8.3f) %8.3f %11.3e" %
(f, v[0], v[1], v[2], np.linalg.norm(v), pp))
else:
for f, v in zip(frequencies, gv):
print("%8.3f (%8.3f %8.3f %8.3f) %8.3f" %
(f, v[0], v[1], v[2], np.linalg.norm(v)))
sys.stdout.flush()
def _py_symmetrize_collision_matrix(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
for i in range(num_ir_grid_points):
for j in range(num_band):
for k in range(3):
for l in range(num_ir_grid_points):
for m in range(num_band):
for n in range(3):
self._py_set_symmetrized_element(
i, j, k, l, m, n)
def _py_set_symmetrized_element(self, i, j, k, l, m, n):
sym_val = (self._collision_matrix[:, :, i, j, k, l, m, n] +
self._collision_matrix[:, :, l, m, n, i, j, k]) / 2
self._collision_matrix[:, :, i, j, k, l, m, n] = sym_val
self._collision_matrix[:, :, l, m, n, i, j, k] = sym_val
def _py_symmetrize_collision_matrix_no_kappa_stars(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
for i in range(num_ir_grid_points):
for j in range(num_band):
for k in range(num_ir_grid_points):
for l in range(num_band):
self._py_set_symmetrized_element_no_kappa_stars(
i, j, k, l)
def _py_set_symmetrized_element_no_kappa_stars(self, i, j, k, l):
sym_val = (self._collision_matrix[:, :, i, j, k, l] +
self._collision_matrix[:, :, k, l, i, j]) / 2
self._collision_matrix[:, :, i, j, k, l] = sym_val
self._collision_matrix[:, :, k, l, i, j] = sym_val
|
atztogo/phono3py | phono3py/phonon3/conductivity_LBTE.py | Conductivity_LBTE._get_weights | python | def _get_weights(self):
weights = []
n = float(self._rot_grid_points.shape[1])
for r_gps in self._rot_grid_points:
weights.append(np.sqrt(len(np.unique(r_gps)) / n))
return weights | Returns weights used for collision matrix and |X> and |f>
self._rot_grid_points : ndarray
shape=(ir_grid_points, point_operations), dtype='uintp'
r_gps : grid points of arms of k-star with duplicates
len(r_gps) == order of crystallographic point group
len(unique(r_gps)) == number of arms of the k-star
Returns
-------
weights : list
sqrt(g_k)/|g|, where g is the crystallographic point group and
g_k is the number of arms of k-star. | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/conductivity_LBTE.py#L1211-L1232 | null | class Conductivity_LBTE(Conductivity):
def __init__(self,
interaction,
symmetry,
grid_points=None,
temperatures=None,
sigmas=None,
sigma_cutoff=None,
is_isotope=False,
mass_variances=None,
boundary_mfp=None, # in micrometre
solve_collective_phonon=False,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=None, # finite difference for group veolocity
is_full_pp=False,
read_pp=False,
pp_filename=None,
pinv_cutoff=1.0e-8,
pinv_solver=0,
log_level=0):
self._pp = None
self._temperatures = None
self._sigmas = None
self._sigma_cutoff = None
self._is_kappa_star = None
self._gv_delta_q = None
self._is_full_pp = None
self._log_level = None
self._primitive = None
self._dm = None
self._frequency_factor_to_THz = None
self._cutoff_frequency = None
self._boundary_mfp = None
self._symmetry = None
self._point_operations = None
self._rotations_cartesian = None
self._grid_points = None
self._grid_weights = None
self._grid_address = None
self._ir_grid_points = None
self._ir_grid_weights = None
self._kappa = None
self._mode_kappa = None
self._kappa_RTA = None
self._mode_kappa_RTA = None
self._read_gamma = False
self._read_gamma_iso = False
self._frequencies = None
self._cv = None
self._gv = None
self._f_vectors = None
self._gv_sum2 = None
self._mfp = None
self._gamma = None
self._gamma_iso = None
self._averaged_pp_interaction = None
self._mesh = None
self._conversion_factor = None
self._is_isotope = None
self._isotope = None
self._mass_variances = None
self._grid_point_count = None
self._collision_eigenvalues = None
Conductivity.__init__(self,
interaction,
symmetry,
grid_points=grid_points,
temperatures=temperatures,
sigmas=sigmas,
sigma_cutoff=sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
boundary_mfp=boundary_mfp,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
log_level=log_level)
self._is_reducible_collision_matrix = is_reducible_collision_matrix
self._solve_collective_phonon = solve_collective_phonon
if not self._is_kappa_star:
self._is_reducible_collision_matrix = True
self._collision_matrix = None
self._read_pp = read_pp
self._pp_filename = pp_filename
self._pinv_cutoff = pinv_cutoff
self._pinv_solver = pinv_solver
if grid_points is None:
self._all_grid_points = True
else:
self._all_grid_points = False
if self._temperatures is not None:
self._allocate_values()
def set_kappa_at_sigmas(self):
if len(self._grid_points) != len(self._ir_grid_points):
print("Collision matrix is not well created.")
import sys
sys.exit(1)
else:
self._set_kappa_at_sigmas()
def set_collision_matrix(self, collision_matrix):
self._collision_matrix = collision_matrix
def get_f_vectors(self):
return self._f_vectors
def get_collision_matrix(self):
return self._collision_matrix
def get_collision_eigenvalues(self):
return self._collision_eigenvalues
def get_mean_free_path(self):
return self._mfp
def get_frequencies_all(self):
return self._frequencies[:np.prod(self._mesh)]
def get_kappa_RTA(self):
return self._kappa_RTA
def get_mode_kappa_RTA(self):
return self._mode_kappa_RTA
def delete_gp_collision_and_pp(self):
self._collision.delete_integration_weights()
self._pp.delete_interaction_strength()
def _run_at_grid_point(self):
i = self._grid_point_count
self._show_log_header(i)
gp = self._grid_points[i]
if not self._all_grid_points:
self._collision_matrix[:] = 0
if not self._read_gamma:
self._collision.set_grid_point(gp)
if self._log_level:
print("Number of triplets: %d" %
len(self._pp.get_triplets_at_q()[0]))
self._set_collision_matrix_at_sigmas(i)
if self._is_reducible_collision_matrix:
igp = gp
else:
igp = i
self._set_harmonic_properties(i, igp)
if self._isotope is not None:
gamma_iso = self._get_gamma_isotope_at_sigmas(i)
band_indices = self._pp.get_band_indices()
self._gamma_iso[:, igp, :] = gamma_iso[:, band_indices]
if self._log_level:
self._show_log(i)
def _allocate_values(self):
num_band0 = len(self._pp.get_band_indices())
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
num_temp = len(self._temperatures)
num_mesh_points = np.prod(self._mesh)
if self._is_reducible_collision_matrix:
num_grid_points = num_mesh_points
else:
num_grid_points = len(self._grid_points)
if self._all_grid_points:
num_stored_grid_points = num_grid_points
else:
num_stored_grid_points = 1
self._kappa = np.zeros((len(self._sigmas), num_temp, 6),
dtype='double', order='C')
self._kappa_RTA = np.zeros((len(self._sigmas), num_temp, 6),
dtype='double', order='C')
self._gv = np.zeros((num_grid_points, num_band0, 3),
dtype='double', order='C')
self._f_vectors = np.zeros((num_grid_points, num_band0, 3),
dtype='double', order='C')
self._gv_sum2 = np.zeros((num_grid_points, num_band0, 6),
dtype='double', order='C')
self._mfp = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
3), dtype='double', order='C')
self._cv = np.zeros((num_temp, num_grid_points, num_band0),
dtype='double', order='C')
if self._is_full_pp:
self._averaged_pp_interaction = np.zeros(
(num_grid_points, num_band0), dtype='double', order='C')
if self._gamma is None:
self._gamma = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0), dtype='double', order='C')
if self._isotope is not None:
self._gamma_iso = np.zeros((len(self._sigmas),
num_grid_points,
num_band0), dtype='double', order='C')
if self._is_reducible_collision_matrix:
self._mode_kappa = np.zeros((len(self._sigmas),
num_temp,
num_mesh_points,
num_band,
6), dtype='double', order='C')
self._mode_kappa_RTA = np.zeros((len(self._sigmas),
num_temp,
num_mesh_points,
num_band,
6), dtype='double', order='C')
self._collision = CollisionMatrix(
self._pp,
is_reducible_collision_matrix=True,
log_level=self._log_level)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(len(self._sigmas), num_temp,
num_stored_grid_points, num_band0,
num_mesh_points, num_band),
dtype='double', order='C')
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas), num_temp, num_mesh_points * num_band),
dtype='double', order='C')
else:
self._mode_kappa = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
6), dtype='double')
self._mode_kappa_RTA = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
6), dtype='double')
self._rot_grid_points = np.zeros(
(len(self._ir_grid_points), len(self._point_operations)),
dtype='uintp')
for i, ir_gp in enumerate(self._ir_grid_points):
self._rot_grid_points[i] = get_grid_points_by_rotations(
self._grid_address[ir_gp],
self._point_operations,
self._mesh)
self._collision = CollisionMatrix(
self._pp,
point_operations=self._point_operations,
ir_grid_points=self._ir_grid_points,
rot_grid_points=self._rot_grid_points,
log_level=self._log_level)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(len(self._sigmas),
num_temp,
num_stored_grid_points, num_band0, 3,
num_ir_grid_points, num_band, 3),
dtype='double', order='C')
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas),
num_temp,
num_ir_grid_points * num_band * 3),
dtype='double', order='C')
def _set_collision_matrix_at_sigmas(self, i):
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "Calculating collision matrix with "
if sigma is None:
text += "tetrahedron method."
else:
text += "sigma=%s" % sigma
if self._sigma_cutoff is None:
text += "."
else:
text += "(%4.2f SD)." % self._sigma_cutoff
print(text)
self._collision.set_sigma(sigma, sigma_cutoff=self._sigma_cutoff)
self._collision.set_integration_weights()
if self._read_pp:
pp, _g_zero = read_pp_from_hdf5(
self._mesh,
grid_point=self._grid_points[i],
sigma=sigma,
sigma_cutoff=self._sigma_cutoff,
filename=self._pp_filename,
verbose=(self._log_level > 0))
_, g_zero = self._collision.get_integration_weights()
if self._log_level:
if len(self._sigmas) > 1:
print("Multiple sigmas or mixing smearing and "
"tetrahedron method is not supported.")
if _g_zero is not None and (_g_zero != g_zero).any():
raise ValueError("Inconsistency found in g_zero.")
self._collision.set_interaction_strength(pp)
elif j != 0 and (self._is_full_pp or self._sigma_cutoff is None):
if self._log_level:
print("Existing ph-ph interaction is used.")
else:
if self._log_level:
print("Calculating ph-ph interaction...")
self._collision.run_interaction(is_full_pp=self._is_full_pp)
if self._is_full_pp and j == 0:
self._averaged_pp_interaction[i] = (
self._pp.get_averaged_interaction())
for k, t in enumerate(self._temperatures):
self._collision.set_temperature(t)
self._collision.run()
if self._all_grid_points:
if self._is_reducible_collision_matrix:
i_data = self._grid_points[i]
else:
i_data = i
else:
i_data = 0
self._gamma[j, k, i_data] = (
self._collision.get_imag_self_energy())
self._collision_matrix[j, k, i_data] = (
self._collision.get_collision_matrix())
def _set_kappa_at_sigmas(self):
if self._is_reducible_collision_matrix:
if self._is_kappa_star:
self._average_collision_matrix_by_degeneracy()
self._expand_collisions()
self._combine_reducible_collisions()
weights = np.ones(np.prod(self._mesh), dtype='intc')
self._symmetrize_collision_matrix()
else:
self._combine_collisions()
weights = self._get_weights()
for i, w_i in enumerate(weights):
for j, w_j in enumerate(weights):
self._collision_matrix[:, :, i, :, :, j, :, :] *= w_i * w_j
self._average_collision_matrix_by_degeneracy()
self._symmetrize_collision_matrix()
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "----------- Thermal conductivity (W/m-k) "
if sigma:
text += "for sigma=%s -----------" % sigma
else:
text += "with tetrahedron method -----------"
print(text)
sys.stdout.flush()
for k, t in enumerate(self._temperatures):
if t > 0:
self._set_kappa_RTA(j, k, weights)
w = diagonalize_collision_matrix(
self._collision_matrix,
i_sigma=j, i_temp=k,
pinv_solver=self._pinv_solver,
log_level=self._log_level)
self._collision_eigenvalues[j, k] = w
self._set_kappa(j, k, weights)
if self._log_level:
print(("#%6s " + " %-10s" * 6) %
("T(K)", "xx", "yy", "zz", "yz", "xz", "xy"))
print(("%7.1f " + " %10.3f" * 6) %
((t,) + tuple(self._kappa[j, k])))
print((" %6s " + " %10.3f" * 6) %
(("(RTA)",) + tuple(self._kappa_RTA[j, k])))
print("-" * 76)
sys.stdout.flush()
sys.stdout.flush()
if self._log_level:
print('')
def _combine_collisions(self):
num_band = self._primitive.get_number_of_atoms() * 3
for j, k in list(np.ndindex((len(self._sigmas),
len(self._temperatures)))):
for i, ir_gp in enumerate(self._ir_grid_points):
for r, r_gp in zip(self._rotations_cartesian,
self._rot_grid_points[i]):
if ir_gp != r_gp:
continue
main_diagonal = self._get_main_diagonal(i, j, k)
for l in range(num_band):
self._collision_matrix[
j, k, i, l, :, i, l, :] += main_diagonal[l] * r
def _combine_reducible_collisions(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_mesh_points = np.prod(self._mesh)
for j, k in list(
np.ndindex((len(self._sigmas), len(self._temperatures)))):
for i in range(num_mesh_points):
main_diagonal = self._get_main_diagonal(i, j, k)
for l in range(num_band):
self._collision_matrix[
j, k, i, l, i, l] += main_diagonal[l]
def _expand_collisions(self):
start = time.time()
if self._log_level:
sys.stdout.write("- Expanding properties to all grid points ")
sys.stdout.flush()
num_mesh_points = np.prod(self._mesh)
num_rot = len(self._point_operations)
rot_grid_points = np.zeros((num_rot, num_mesh_points), dtype='uintp')
for i in range(num_mesh_points):
rot_grid_points[:, i] = get_grid_points_by_rotations(
self._grid_address[i],
self._point_operations,
self._mesh)
try:
import phono3py._phono3py as phono3c
phono3c.expand_collision_matrix(self._collision_matrix,
self._ir_grid_points,
rot_grid_points)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
for i, ir_gp in enumerate(self._ir_grid_points):
multi = (rot_grid_points[:, ir_gp] == ir_gp).sum()
colmat_irgp = self._collision_matrix[:, :, ir_gp, :, :, :].copy()
colmat_irgp /= multi
self._collision_matrix[:, :, ir_gp, :, :, :] = 0
for j, r in enumerate(self._rotations_cartesian):
gp_r = rot_grid_points[j, ir_gp]
for k in range(num_mesh_points):
gp_c = rot_grid_points[j, k]
self._collision_matrix[:, :, gp_r, :, gp_c, :] += (
colmat_irgp[:, :, :, k, :])
for i, ir_gp in enumerate(self._ir_grid_points):
gv_irgp = self._gv[ir_gp].copy()
self._gv[ir_gp] = 0
cv_irgp = self._cv[:, ir_gp, :].copy()
self._cv[:, ir_gp, :] = 0
gamma_irgp = self._gamma[:, :, ir_gp, :].copy()
self._gamma[:, :, ir_gp, :] = 0
multi = (rot_grid_points[:, ir_gp] == ir_gp).sum()
if self._gamma_iso is not None:
gamma_iso_irgp = self._gamma_iso[:, ir_gp, :].copy()
self._gamma_iso[:, ir_gp, :] = 0
for j, r in enumerate(self._rotations_cartesian):
gp_r = rot_grid_points[j, ir_gp]
self._gamma[:, :, gp_r, :] += gamma_irgp / multi
if self._gamma_iso is not None:
self._gamma_iso[:, gp_r, :] += gamma_iso_irgp / multi
self._gv[gp_r] += np.dot(gv_irgp, r.T) / multi
self._cv[:, gp_r, :] += cv_irgp / multi
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _symmetrize_collision_matrix(self):
start = time.time()
try:
import phono3py._phono3py as phono3c
if self._log_level:
sys.stdout.write("- Making collision matrix symmetric "
"(built-in) ")
sys.stdout.flush()
phono3c.symmetrize_collision_matrix(self._collision_matrix)
except ImportError:
if self._log_level:
sys.stdout.write("- Making collision matrix symmetric "
"(numpy) ")
sys.stdout.flush()
if self._is_reducible_collision_matrix:
size = np.prod(self._collision_matrix.shape[2:4])
else:
size = np.prod(self._collision_matrix.shape[2:5])
for i in range(self._collision_matrix.shape[0]):
for j in range(self._collision_matrix.shape[1]):
col_mat = self._collision_matrix[i, j].reshape(size, size)
col_mat += col_mat.T
col_mat /= 2
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _average_collision_matrix_by_degeneracy(self):
start = time.time()
# Average matrix elements belonging to degenerate bands
if self._log_level:
sys.stdout.write("- Averaging collision matrix elements "
"by phonon degeneracy ")
sys.stdout.flush()
col_mat = self._collision_matrix
for i, gp in enumerate(self._ir_grid_points):
freqs = self._frequencies[gp]
deg_sets = degenerate_sets(freqs)
for dset in deg_sets:
bi_set = []
for j in range(len(freqs)):
if j in dset:
bi_set.append(j)
if self._is_reducible_collision_matrix:
sum_col = (col_mat[:, :, gp, bi_set, :, :].sum(axis=2) /
len(bi_set))
for j in bi_set:
col_mat[:, :, gp, j, :, :] = sum_col
else:
sum_col = (
col_mat[:, :, i, bi_set, :, :, :, :].sum(axis=2) /
len(bi_set))
for j in bi_set:
col_mat[:, :, i, j, :, :, :, :] = sum_col
for i, gp in enumerate(self._ir_grid_points):
freqs = self._frequencies[gp]
deg_sets = degenerate_sets(freqs)
for dset in deg_sets:
bi_set = []
for j in range(len(freqs)):
if j in dset:
bi_set.append(j)
if self._is_reducible_collision_matrix:
sum_col = (col_mat[:, :, :, :, gp, bi_set].sum(axis=4) /
len(bi_set))
for j in bi_set:
col_mat[:, :, :, :, gp, j] = sum_col
else:
sum_col = (
col_mat[:, :, :, :, :, i, bi_set, :].sum(axis=5) /
len(bi_set))
for j in bi_set:
col_mat[:, :, :, :, :, i, j, :] = sum_col
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _get_X(self, i_temp, weights, gv):
num_band = self._primitive.get_number_of_atoms() * 3
X = gv.copy()
if self._is_reducible_collision_matrix:
num_mesh_points = np.prod(self._mesh)
freqs = self._frequencies[:num_mesh_points]
else:
freqs = self._frequencies[self._ir_grid_points]
t = self._temperatures[i_temp]
sinh = np.where(freqs > self._cutoff_frequency,
np.sinh(freqs * THzToEv / (2 * Kb * t)),
-1.0)
inv_sinh = np.where(sinh > 0, 1.0 / sinh, 0)
freqs_sinh = freqs * THzToEv * inv_sinh / (4 * Kb * t ** 2)
for i, f in enumerate(freqs_sinh):
X[i] *= weights[i]
for j in range(num_band):
X[i, j] *= f[j]
if t > 0:
return X.reshape(-1, 3)
else:
return np.zeros_like(X.reshape(-1, 3))
def _get_Y(self, i_sigma, i_temp, weights, X):
solver = _select_solver(self._pinv_solver)
num_band = self._primitive.get_number_of_atoms() * 3
if self._is_reducible_collision_matrix:
num_grid_points = np.prod(self._mesh)
size = num_grid_points * num_band
else:
num_grid_points = len(self._ir_grid_points)
size = num_grid_points * num_band * 3
v = self._collision_matrix[i_sigma, i_temp].reshape(size, size)
# Transpose eigvecs because colmat was solved by column major order
if solver in [1, 2, 4, 5]:
v = v.T
start = time.time()
if solver in [0, 1, 2, 3, 4, 5]:
if self._log_level:
sys.stdout.write("Calculating pseudo-inv with cutoff=%-.1e "
"(np.dot) " % self._pinv_cutoff)
sys.stdout.flush()
e = self._get_eigvals_pinv(i_sigma, i_temp)
if self._is_reducible_collision_matrix:
X1 = np.dot(v.T, X)
for i in range(3):
X1[:, i] *= e
Y = np.dot(v, X1)
else:
Y = np.dot(v, e * np.dot(v.T, X.ravel())).reshape(-1, 3)
else: # solver=6 This is slower as far as tested.
import phono3py._phono3py as phono3c
if self._log_level:
sys.stdout.write("Calculating pseudo-inv with cutoff=%-.1e "
"(built-in) " % self._pinv_cutoff)
sys.stdout.flush()
w = self._collision_eigenvalues[i_sigma, i_temp]
phono3c.pinv_from_eigensolution(self._collision_matrix,
w,
i_sigma,
i_temp,
self._pinv_cutoff,
0)
if self._is_reducible_collision_matrix:
Y = np.dot(v, X)
else:
Y = np.dot(v, X.ravel()).reshape(-1, 3)
self._set_f_vectors(Y, num_grid_points, weights)
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
return Y
def _set_f_vectors(self, Y, num_grid_points, weights):
# Collision matrix is half of that defined in Chaput's paper.
# Therefore Y is divided by 2.
num_band = self._primitive.get_number_of_atoms() * 3
self._f_vectors[:] = ((Y / 2).reshape(num_grid_points, num_band * 3).T
/ weights).T.reshape(self._f_vectors.shape)
def _get_eigvals_pinv(self, i_sigma, i_temp):
w = self._collision_eigenvalues[i_sigma, i_temp]
e = np.zeros_like(w)
for l, val in enumerate(w):
if abs(val) > self._pinv_cutoff:
e[l] = 1 / val
return e
def _get_I(self, a, b, size, plus_transpose=True):
"""Return I matrix in Chaput's PRL paper.
None is returned if I is zero matrix.
"""
r_sum = np.zeros((3, 3), dtype='double', order='C')
for r in self._rotations_cartesian:
for i in range(3):
for j in range(3):
r_sum[i, j] += r[a, i] * r[b, j]
if plus_transpose:
r_sum += r_sum.T
# Return None not to consume computer for diagonalization
if (np.abs(r_sum) < 1e-10).all():
return None
# Same as np.kron(np.eye(size), r_sum), but writen as below
# to be sure the values in memory C-congiguous with 'double'.
I_mat = np.zeros((3 * size, 3 * size), dtype='double', order='C')
for i in range(size):
I_mat[(i * 3):((i + 1) * 3), (i * 3):((i + 1) * 3)] = r_sum
return I_mat
def _set_kappa(self, i_sigma, i_temp, weights):
N = self._num_sampling_grid_points
if self._is_reducible_collision_matrix:
X = self._get_X(i_temp, weights, self._gv)
num_mesh_points = np.prod(self._mesh)
Y = self._get_Y(i_sigma, i_temp, weights, X)
self._set_mean_free_path(i_sigma, i_temp, weights, Y)
# Putting self._rotations_cartesian is to symmetrize kappa.
# None can be put instead for watching pure information.
self._set_mode_kappa(self._mode_kappa,
X,
Y,
num_mesh_points,
self._rotations_cartesian,
i_sigma,
i_temp)
self._mode_kappa[i_sigma, i_temp] /= len(self._rotations_cartesian)
self._kappa[i_sigma, i_temp] = (
self._mode_kappa[i_sigma, i_temp].sum(axis=0).sum(axis=0) / N)
else:
if self._solve_collective_phonon:
self._set_mode_kappa_Chaput(i_sigma, i_temp, weights)
else:
X = self._get_X(i_temp, weights, self._gv)
num_ir_grid_points = len(self._ir_grid_points)
Y = self._get_Y(i_sigma, i_temp, weights, X)
self._set_mean_free_path(i_sigma, i_temp, weights, Y)
self._set_mode_kappa(self._mode_kappa,
X,
Y,
num_ir_grid_points,
self._rotations_cartesian,
i_sigma,
i_temp)
# self._set_mode_kappa_from_mfp(weights,
# num_ir_grid_points,
# self._rotations_cartesian,
# i_sigma,
# i_temp)
self._kappa[i_sigma, i_temp] = (
self._mode_kappa[i_sigma, i_temp].sum(axis=0).sum(axis=0) / N)
def _set_kappa_RTA(self, i_sigma, i_temp, weights):
N = self._num_sampling_grid_points
num_band = self._primitive.get_number_of_atoms() * 3
X = self._get_X(i_temp, weights, self._gv)
Y = np.zeros_like(X)
if self._is_reducible_collision_matrix:
# This RTA is not equivalent to conductivity_RTA.
# The lifetime is defined from the diagonal part of
# collision matrix.
num_mesh_points = np.prod(self._mesh)
size = num_mesh_points * num_band
v_diag = np.diagonal(
self._collision_matrix[i_sigma, i_temp].reshape(size, size))
for gp in range(num_mesh_points):
frequencies = self._frequencies[gp]
for j, f in enumerate(frequencies):
if f > self._cutoff_frequency:
i_mode = gp * num_band + j
Y[i_mode, :] = X[i_mode, :] / v_diag[i_mode]
# Putting self._rotations_cartesian is to symmetrize kappa.
# None can be put instead for watching pure information.
self._set_mode_kappa(self._mode_kappa_RTA,
X,
Y,
num_mesh_points,
self._rotations_cartesian,
i_sigma,
i_temp)
g = len(self._rotations_cartesian)
self._mode_kappa_RTA[i_sigma, i_temp] /= g
self._kappa_RTA[i_sigma, i_temp] = (
self._mode_kappa_RTA[i_sigma, i_temp].sum(axis=0).sum(axis=0) /
N)
else:
# This RTA is supposed to be the same as conductivity_RTA.
num_ir_grid_points = len(self._ir_grid_points)
size = num_ir_grid_points * num_band * 3
for i, gp in enumerate(self._ir_grid_points):
g = self._get_main_diagonal(i, i_sigma, i_temp)
frequencies = self._frequencies[gp]
for j, f in enumerate(frequencies):
if f > self._cutoff_frequency:
i_mode = i * num_band + j
old_settings = np.seterr(all='raise')
try:
Y[i_mode, :] = X[i_mode, :] / g[j]
except:
print("=" * 26 + " Warning " + "=" * 26)
print(" Unexpected physical condition of ph-ph "
"interaction calculation was found.")
print(" g[j]=%f at gp=%d, band=%d, freq=%f" %
(g[j], gp, j + 1, f))
print("=" * 61)
np.seterr(**old_settings)
self._set_mode_kappa(self._mode_kappa_RTA,
X,
Y,
num_ir_grid_points,
self._rotations_cartesian,
i_sigma,
i_temp)
self._kappa_RTA[i_sigma, i_temp] = (
self._mode_kappa_RTA[i_sigma, i_temp].sum(axis=0).sum(axis=0) /
N)
def _set_mode_kappa(self,
mode_kappa,
X,
Y,
num_grid_points,
rotations_cartesian,
i_sigma,
i_temp):
num_band = self._primitive.get_number_of_atoms() * 3
for i, (v_gp, f_gp) in enumerate(zip(X.reshape(num_grid_points,
num_band, 3),
Y.reshape(num_grid_points,
num_band, 3))):
for j, (v, f) in enumerate(zip(v_gp, f_gp)):
# Do not consider three lowest modes at Gamma-point
# It is assumed that there are no imaginary modes.
if (self._grid_address[i] == 0).all() and j < 3:
continue
if rotations_cartesian is None:
sum_k = np.outer(v, f)
else:
sum_k = np.zeros((3, 3), dtype='double')
for r in rotations_cartesian:
sum_k += np.outer(np.dot(r, v), np.dot(r, f))
sum_k = sum_k + sum_k.T
for k, vxf in enumerate(
((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))):
mode_kappa[i_sigma, i_temp, i, j, k] = sum_k[vxf]
t = self._temperatures[i_temp]
# Collision matrix is half of that defined in Chaput's paper.
# Therefore here 2 is not necessary multiplied.
# sum_k = sum_k + sum_k.T is equivalent to I(a,b) + I(b,a).
mode_kappa[i_sigma, i_temp] *= self._conversion_factor * Kb * t ** 2
def _set_mode_kappa_Chaput(self, i_sigma, i_temp, weights):
"""Calculate mode kappa by the way in Laurent Chaput's PRL paper.
This gives the different result from _set_mode_kappa and requires more
memory space.
"""
X = self._get_X(i_temp, weights, self._gv).ravel()
num_ir_grid_points = len(self._ir_grid_points)
num_band = self._primitive.get_number_of_atoms() * 3
size = num_ir_grid_points * num_band * 3
v = self._collision_matrix[i_sigma, i_temp].reshape(size, size)
solver = _select_solver(self._pinv_solver)
if solver in [1, 2, 4, 5]:
v = v.T
e = self._get_eigvals_pinv(i_sigma, i_temp)
t = self._temperatures[i_temp]
omega_inv = np.empty(v.shape, dtype='double', order='C')
np.dot(v, (e * v).T, out=omega_inv)
Y = np.dot(omega_inv, X)
self._set_f_vectors(Y, num_ir_grid_points, weights)
elems = ((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))
for i, vxf in enumerate(elems):
mat = self._get_I(vxf[0], vxf[1], num_ir_grid_points * num_band)
self._mode_kappa[i_sigma, i_temp, :, :, i] = 0
if mat is not None:
np.dot(mat, omega_inv, out=mat)
# vals = (X ** 2 * np.diag(mat)).reshape(-1, 3).sum(axis=1)
# vals = vals.reshape(num_ir_grid_points, num_band)
# self._mode_kappa[i_sigma, i_temp, :, :, i] = vals
w = diagonalize_collision_matrix(mat,
pinv_solver=self._pinv_solver,
log_level=self._log_level)
if solver in [1, 2, 4, 5]:
mat = mat.T
spectra = np.dot(mat.T, X) ** 2 * w
for s, eigvec in zip(spectra, mat.T):
vals = s * (eigvec ** 2).reshape(-1, 3).sum(axis=1)
vals = vals.reshape(num_ir_grid_points, num_band)
self._mode_kappa[i_sigma, i_temp, :, :, i] += vals
factor = self._conversion_factor * Kb * t ** 2
self._mode_kappa[i_sigma, i_temp] *= factor
def _set_mode_kappa_from_mfp(self,
weights,
num_grid_points,
rotations_cartesian,
i_sigma,
i_temp):
for i, (v_gp, mfp_gp, cv_gp) in enumerate(
zip(self._gv, self._mfp[i_sigma, i_temp], self._cv[i_temp])):
for j, (v, mfp, cv) in enumerate(zip(v_gp, mfp_gp, cv_gp)):
sum_k = np.zeros((3, 3), dtype='double')
for r in rotations_cartesian:
sum_k += np.outer(np.dot(r, v), np.dot(r, mfp))
sum_k = (sum_k + sum_k.T) / 2 * cv * weights[i] ** 2 * 2 * np.pi
for k, vxf in enumerate(
((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))):
self._mode_kappa[i_sigma, i_temp, i, j, k] = sum_k[vxf]
self._mode_kappa *= - self._conversion_factor
def _set_mean_free_path(self, i_sigma, i_temp, weights, Y):
t = self._temperatures[i_temp]
# shape = (num_grid_points, num_band, 3),
for i, f_gp in enumerate(self._f_vectors):
for j, f in enumerate(f_gp):
cv = self._cv[i_temp, i, j]
if cv < 1e-10:
continue
self._mfp[i_sigma, i_temp, i, j] = (
- 2 * t * np.sqrt(Kb / cv) * f / (2 * np.pi))
def _show_log(self, i):
gp = self._grid_points[i]
frequencies = self._frequencies[gp]
if self._is_reducible_collision_matrix:
gv = self._gv[gp]
else:
gv = self._gv[i]
if self._is_full_pp:
ave_pp = self._averaged_pp_interaction[i]
text = "Frequency group velocity (x, y, z) |gv| Pqj"
else:
text = "Frequency group velocity (x, y, z) |gv|"
if self._gv_delta_q is None:
pass
else:
text += " (dq=%3.1e)" % self._gv_delta_q
print(text)
if self._is_full_pp:
for f, v, pp in zip(frequencies, gv, ave_pp):
print("%8.3f (%8.3f %8.3f %8.3f) %8.3f %11.3e" %
(f, v[0], v[1], v[2], np.linalg.norm(v), pp))
else:
for f, v in zip(frequencies, gv):
print("%8.3f (%8.3f %8.3f %8.3f) %8.3f" %
(f, v[0], v[1], v[2], np.linalg.norm(v)))
sys.stdout.flush()
def _py_symmetrize_collision_matrix(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
for i in range(num_ir_grid_points):
for j in range(num_band):
for k in range(3):
for l in range(num_ir_grid_points):
for m in range(num_band):
for n in range(3):
self._py_set_symmetrized_element(
i, j, k, l, m, n)
def _py_set_symmetrized_element(self, i, j, k, l, m, n):
sym_val = (self._collision_matrix[:, :, i, j, k, l, m, n] +
self._collision_matrix[:, :, l, m, n, i, j, k]) / 2
self._collision_matrix[:, :, i, j, k, l, m, n] = sym_val
self._collision_matrix[:, :, l, m, n, i, j, k] = sym_val
def _py_symmetrize_collision_matrix_no_kappa_stars(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
for i in range(num_ir_grid_points):
for j in range(num_band):
for k in range(num_ir_grid_points):
for l in range(num_band):
self._py_set_symmetrized_element_no_kappa_stars(
i, j, k, l)
def _py_set_symmetrized_element_no_kappa_stars(self, i, j, k, l):
sym_val = (self._collision_matrix[:, :, i, j, k, l] +
self._collision_matrix[:, :, k, l, i, j]) / 2
self._collision_matrix[:, :, i, j, k, l] = sym_val
self._collision_matrix[:, :, k, l, i, j] = sym_val
|
atztogo/phono3py | phono3py/phonon3/conductivity_LBTE.py | Conductivity_LBTE._get_I | python | def _get_I(self, a, b, size, plus_transpose=True):
r_sum = np.zeros((3, 3), dtype='double', order='C')
for r in self._rotations_cartesian:
for i in range(3):
for j in range(3):
r_sum[i, j] += r[a, i] * r[b, j]
if plus_transpose:
r_sum += r_sum.T
# Return None not to consume computer for diagonalization
if (np.abs(r_sum) < 1e-10).all():
return None
# Same as np.kron(np.eye(size), r_sum), but writen as below
# to be sure the values in memory C-congiguous with 'double'.
I_mat = np.zeros((3 * size, 3 * size), dtype='double', order='C')
for i in range(size):
I_mat[(i * 3):((i + 1) * 3), (i * 3):((i + 1) * 3)] = r_sum
return I_mat | Return I matrix in Chaput's PRL paper.
None is returned if I is zero matrix. | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/conductivity_LBTE.py#L1418-L1442 | null | class Conductivity_LBTE(Conductivity):
def __init__(self,
interaction,
symmetry,
grid_points=None,
temperatures=None,
sigmas=None,
sigma_cutoff=None,
is_isotope=False,
mass_variances=None,
boundary_mfp=None, # in micrometre
solve_collective_phonon=False,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=None, # finite difference for group veolocity
is_full_pp=False,
read_pp=False,
pp_filename=None,
pinv_cutoff=1.0e-8,
pinv_solver=0,
log_level=0):
self._pp = None
self._temperatures = None
self._sigmas = None
self._sigma_cutoff = None
self._is_kappa_star = None
self._gv_delta_q = None
self._is_full_pp = None
self._log_level = None
self._primitive = None
self._dm = None
self._frequency_factor_to_THz = None
self._cutoff_frequency = None
self._boundary_mfp = None
self._symmetry = None
self._point_operations = None
self._rotations_cartesian = None
self._grid_points = None
self._grid_weights = None
self._grid_address = None
self._ir_grid_points = None
self._ir_grid_weights = None
self._kappa = None
self._mode_kappa = None
self._kappa_RTA = None
self._mode_kappa_RTA = None
self._read_gamma = False
self._read_gamma_iso = False
self._frequencies = None
self._cv = None
self._gv = None
self._f_vectors = None
self._gv_sum2 = None
self._mfp = None
self._gamma = None
self._gamma_iso = None
self._averaged_pp_interaction = None
self._mesh = None
self._conversion_factor = None
self._is_isotope = None
self._isotope = None
self._mass_variances = None
self._grid_point_count = None
self._collision_eigenvalues = None
Conductivity.__init__(self,
interaction,
symmetry,
grid_points=grid_points,
temperatures=temperatures,
sigmas=sigmas,
sigma_cutoff=sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
boundary_mfp=boundary_mfp,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
log_level=log_level)
self._is_reducible_collision_matrix = is_reducible_collision_matrix
self._solve_collective_phonon = solve_collective_phonon
if not self._is_kappa_star:
self._is_reducible_collision_matrix = True
self._collision_matrix = None
self._read_pp = read_pp
self._pp_filename = pp_filename
self._pinv_cutoff = pinv_cutoff
self._pinv_solver = pinv_solver
if grid_points is None:
self._all_grid_points = True
else:
self._all_grid_points = False
if self._temperatures is not None:
self._allocate_values()
def set_kappa_at_sigmas(self):
if len(self._grid_points) != len(self._ir_grid_points):
print("Collision matrix is not well created.")
import sys
sys.exit(1)
else:
self._set_kappa_at_sigmas()
def set_collision_matrix(self, collision_matrix):
self._collision_matrix = collision_matrix
def get_f_vectors(self):
return self._f_vectors
def get_collision_matrix(self):
return self._collision_matrix
def get_collision_eigenvalues(self):
return self._collision_eigenvalues
def get_mean_free_path(self):
return self._mfp
def get_frequencies_all(self):
return self._frequencies[:np.prod(self._mesh)]
def get_kappa_RTA(self):
return self._kappa_RTA
def get_mode_kappa_RTA(self):
return self._mode_kappa_RTA
def delete_gp_collision_and_pp(self):
self._collision.delete_integration_weights()
self._pp.delete_interaction_strength()
def _run_at_grid_point(self):
i = self._grid_point_count
self._show_log_header(i)
gp = self._grid_points[i]
if not self._all_grid_points:
self._collision_matrix[:] = 0
if not self._read_gamma:
self._collision.set_grid_point(gp)
if self._log_level:
print("Number of triplets: %d" %
len(self._pp.get_triplets_at_q()[0]))
self._set_collision_matrix_at_sigmas(i)
if self._is_reducible_collision_matrix:
igp = gp
else:
igp = i
self._set_harmonic_properties(i, igp)
if self._isotope is not None:
gamma_iso = self._get_gamma_isotope_at_sigmas(i)
band_indices = self._pp.get_band_indices()
self._gamma_iso[:, igp, :] = gamma_iso[:, band_indices]
if self._log_level:
self._show_log(i)
def _allocate_values(self):
num_band0 = len(self._pp.get_band_indices())
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
num_temp = len(self._temperatures)
num_mesh_points = np.prod(self._mesh)
if self._is_reducible_collision_matrix:
num_grid_points = num_mesh_points
else:
num_grid_points = len(self._grid_points)
if self._all_grid_points:
num_stored_grid_points = num_grid_points
else:
num_stored_grid_points = 1
self._kappa = np.zeros((len(self._sigmas), num_temp, 6),
dtype='double', order='C')
self._kappa_RTA = np.zeros((len(self._sigmas), num_temp, 6),
dtype='double', order='C')
self._gv = np.zeros((num_grid_points, num_band0, 3),
dtype='double', order='C')
self._f_vectors = np.zeros((num_grid_points, num_band0, 3),
dtype='double', order='C')
self._gv_sum2 = np.zeros((num_grid_points, num_band0, 6),
dtype='double', order='C')
self._mfp = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
3), dtype='double', order='C')
self._cv = np.zeros((num_temp, num_grid_points, num_band0),
dtype='double', order='C')
if self._is_full_pp:
self._averaged_pp_interaction = np.zeros(
(num_grid_points, num_band0), dtype='double', order='C')
if self._gamma is None:
self._gamma = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0), dtype='double', order='C')
if self._isotope is not None:
self._gamma_iso = np.zeros((len(self._sigmas),
num_grid_points,
num_band0), dtype='double', order='C')
if self._is_reducible_collision_matrix:
self._mode_kappa = np.zeros((len(self._sigmas),
num_temp,
num_mesh_points,
num_band,
6), dtype='double', order='C')
self._mode_kappa_RTA = np.zeros((len(self._sigmas),
num_temp,
num_mesh_points,
num_band,
6), dtype='double', order='C')
self._collision = CollisionMatrix(
self._pp,
is_reducible_collision_matrix=True,
log_level=self._log_level)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(len(self._sigmas), num_temp,
num_stored_grid_points, num_band0,
num_mesh_points, num_band),
dtype='double', order='C')
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas), num_temp, num_mesh_points * num_band),
dtype='double', order='C')
else:
self._mode_kappa = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
6), dtype='double')
self._mode_kappa_RTA = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
6), dtype='double')
self._rot_grid_points = np.zeros(
(len(self._ir_grid_points), len(self._point_operations)),
dtype='uintp')
for i, ir_gp in enumerate(self._ir_grid_points):
self._rot_grid_points[i] = get_grid_points_by_rotations(
self._grid_address[ir_gp],
self._point_operations,
self._mesh)
self._collision = CollisionMatrix(
self._pp,
point_operations=self._point_operations,
ir_grid_points=self._ir_grid_points,
rot_grid_points=self._rot_grid_points,
log_level=self._log_level)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(len(self._sigmas),
num_temp,
num_stored_grid_points, num_band0, 3,
num_ir_grid_points, num_band, 3),
dtype='double', order='C')
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas),
num_temp,
num_ir_grid_points * num_band * 3),
dtype='double', order='C')
def _set_collision_matrix_at_sigmas(self, i):
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "Calculating collision matrix with "
if sigma is None:
text += "tetrahedron method."
else:
text += "sigma=%s" % sigma
if self._sigma_cutoff is None:
text += "."
else:
text += "(%4.2f SD)." % self._sigma_cutoff
print(text)
self._collision.set_sigma(sigma, sigma_cutoff=self._sigma_cutoff)
self._collision.set_integration_weights()
if self._read_pp:
pp, _g_zero = read_pp_from_hdf5(
self._mesh,
grid_point=self._grid_points[i],
sigma=sigma,
sigma_cutoff=self._sigma_cutoff,
filename=self._pp_filename,
verbose=(self._log_level > 0))
_, g_zero = self._collision.get_integration_weights()
if self._log_level:
if len(self._sigmas) > 1:
print("Multiple sigmas or mixing smearing and "
"tetrahedron method is not supported.")
if _g_zero is not None and (_g_zero != g_zero).any():
raise ValueError("Inconsistency found in g_zero.")
self._collision.set_interaction_strength(pp)
elif j != 0 and (self._is_full_pp or self._sigma_cutoff is None):
if self._log_level:
print("Existing ph-ph interaction is used.")
else:
if self._log_level:
print("Calculating ph-ph interaction...")
self._collision.run_interaction(is_full_pp=self._is_full_pp)
if self._is_full_pp and j == 0:
self._averaged_pp_interaction[i] = (
self._pp.get_averaged_interaction())
for k, t in enumerate(self._temperatures):
self._collision.set_temperature(t)
self._collision.run()
if self._all_grid_points:
if self._is_reducible_collision_matrix:
i_data = self._grid_points[i]
else:
i_data = i
else:
i_data = 0
self._gamma[j, k, i_data] = (
self._collision.get_imag_self_energy())
self._collision_matrix[j, k, i_data] = (
self._collision.get_collision_matrix())
def _set_kappa_at_sigmas(self):
if self._is_reducible_collision_matrix:
if self._is_kappa_star:
self._average_collision_matrix_by_degeneracy()
self._expand_collisions()
self._combine_reducible_collisions()
weights = np.ones(np.prod(self._mesh), dtype='intc')
self._symmetrize_collision_matrix()
else:
self._combine_collisions()
weights = self._get_weights()
for i, w_i in enumerate(weights):
for j, w_j in enumerate(weights):
self._collision_matrix[:, :, i, :, :, j, :, :] *= w_i * w_j
self._average_collision_matrix_by_degeneracy()
self._symmetrize_collision_matrix()
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "----------- Thermal conductivity (W/m-k) "
if sigma:
text += "for sigma=%s -----------" % sigma
else:
text += "with tetrahedron method -----------"
print(text)
sys.stdout.flush()
for k, t in enumerate(self._temperatures):
if t > 0:
self._set_kappa_RTA(j, k, weights)
w = diagonalize_collision_matrix(
self._collision_matrix,
i_sigma=j, i_temp=k,
pinv_solver=self._pinv_solver,
log_level=self._log_level)
self._collision_eigenvalues[j, k] = w
self._set_kappa(j, k, weights)
if self._log_level:
print(("#%6s " + " %-10s" * 6) %
("T(K)", "xx", "yy", "zz", "yz", "xz", "xy"))
print(("%7.1f " + " %10.3f" * 6) %
((t,) + tuple(self._kappa[j, k])))
print((" %6s " + " %10.3f" * 6) %
(("(RTA)",) + tuple(self._kappa_RTA[j, k])))
print("-" * 76)
sys.stdout.flush()
sys.stdout.flush()
if self._log_level:
print('')
def _combine_collisions(self):
num_band = self._primitive.get_number_of_atoms() * 3
for j, k in list(np.ndindex((len(self._sigmas),
len(self._temperatures)))):
for i, ir_gp in enumerate(self._ir_grid_points):
for r, r_gp in zip(self._rotations_cartesian,
self._rot_grid_points[i]):
if ir_gp != r_gp:
continue
main_diagonal = self._get_main_diagonal(i, j, k)
for l in range(num_band):
self._collision_matrix[
j, k, i, l, :, i, l, :] += main_diagonal[l] * r
def _combine_reducible_collisions(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_mesh_points = np.prod(self._mesh)
for j, k in list(
np.ndindex((len(self._sigmas), len(self._temperatures)))):
for i in range(num_mesh_points):
main_diagonal = self._get_main_diagonal(i, j, k)
for l in range(num_band):
self._collision_matrix[
j, k, i, l, i, l] += main_diagonal[l]
def _expand_collisions(self):
start = time.time()
if self._log_level:
sys.stdout.write("- Expanding properties to all grid points ")
sys.stdout.flush()
num_mesh_points = np.prod(self._mesh)
num_rot = len(self._point_operations)
rot_grid_points = np.zeros((num_rot, num_mesh_points), dtype='uintp')
for i in range(num_mesh_points):
rot_grid_points[:, i] = get_grid_points_by_rotations(
self._grid_address[i],
self._point_operations,
self._mesh)
try:
import phono3py._phono3py as phono3c
phono3c.expand_collision_matrix(self._collision_matrix,
self._ir_grid_points,
rot_grid_points)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
for i, ir_gp in enumerate(self._ir_grid_points):
multi = (rot_grid_points[:, ir_gp] == ir_gp).sum()
colmat_irgp = self._collision_matrix[:, :, ir_gp, :, :, :].copy()
colmat_irgp /= multi
self._collision_matrix[:, :, ir_gp, :, :, :] = 0
for j, r in enumerate(self._rotations_cartesian):
gp_r = rot_grid_points[j, ir_gp]
for k in range(num_mesh_points):
gp_c = rot_grid_points[j, k]
self._collision_matrix[:, :, gp_r, :, gp_c, :] += (
colmat_irgp[:, :, :, k, :])
for i, ir_gp in enumerate(self._ir_grid_points):
gv_irgp = self._gv[ir_gp].copy()
self._gv[ir_gp] = 0
cv_irgp = self._cv[:, ir_gp, :].copy()
self._cv[:, ir_gp, :] = 0
gamma_irgp = self._gamma[:, :, ir_gp, :].copy()
self._gamma[:, :, ir_gp, :] = 0
multi = (rot_grid_points[:, ir_gp] == ir_gp).sum()
if self._gamma_iso is not None:
gamma_iso_irgp = self._gamma_iso[:, ir_gp, :].copy()
self._gamma_iso[:, ir_gp, :] = 0
for j, r in enumerate(self._rotations_cartesian):
gp_r = rot_grid_points[j, ir_gp]
self._gamma[:, :, gp_r, :] += gamma_irgp / multi
if self._gamma_iso is not None:
self._gamma_iso[:, gp_r, :] += gamma_iso_irgp / multi
self._gv[gp_r] += np.dot(gv_irgp, r.T) / multi
self._cv[:, gp_r, :] += cv_irgp / multi
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _get_weights(self):
"""Returns weights used for collision matrix and |X> and |f>
self._rot_grid_points : ndarray
shape=(ir_grid_points, point_operations), dtype='uintp'
r_gps : grid points of arms of k-star with duplicates
len(r_gps) == order of crystallographic point group
len(unique(r_gps)) == number of arms of the k-star
Returns
-------
weights : list
sqrt(g_k)/|g|, where g is the crystallographic point group and
g_k is the number of arms of k-star.
"""
weights = []
n = float(self._rot_grid_points.shape[1])
for r_gps in self._rot_grid_points:
weights.append(np.sqrt(len(np.unique(r_gps)) / n))
return weights
def _symmetrize_collision_matrix(self):
start = time.time()
try:
import phono3py._phono3py as phono3c
if self._log_level:
sys.stdout.write("- Making collision matrix symmetric "
"(built-in) ")
sys.stdout.flush()
phono3c.symmetrize_collision_matrix(self._collision_matrix)
except ImportError:
if self._log_level:
sys.stdout.write("- Making collision matrix symmetric "
"(numpy) ")
sys.stdout.flush()
if self._is_reducible_collision_matrix:
size = np.prod(self._collision_matrix.shape[2:4])
else:
size = np.prod(self._collision_matrix.shape[2:5])
for i in range(self._collision_matrix.shape[0]):
for j in range(self._collision_matrix.shape[1]):
col_mat = self._collision_matrix[i, j].reshape(size, size)
col_mat += col_mat.T
col_mat /= 2
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _average_collision_matrix_by_degeneracy(self):
start = time.time()
# Average matrix elements belonging to degenerate bands
if self._log_level:
sys.stdout.write("- Averaging collision matrix elements "
"by phonon degeneracy ")
sys.stdout.flush()
col_mat = self._collision_matrix
for i, gp in enumerate(self._ir_grid_points):
freqs = self._frequencies[gp]
deg_sets = degenerate_sets(freqs)
for dset in deg_sets:
bi_set = []
for j in range(len(freqs)):
if j in dset:
bi_set.append(j)
if self._is_reducible_collision_matrix:
sum_col = (col_mat[:, :, gp, bi_set, :, :].sum(axis=2) /
len(bi_set))
for j in bi_set:
col_mat[:, :, gp, j, :, :] = sum_col
else:
sum_col = (
col_mat[:, :, i, bi_set, :, :, :, :].sum(axis=2) /
len(bi_set))
for j in bi_set:
col_mat[:, :, i, j, :, :, :, :] = sum_col
for i, gp in enumerate(self._ir_grid_points):
freqs = self._frequencies[gp]
deg_sets = degenerate_sets(freqs)
for dset in deg_sets:
bi_set = []
for j in range(len(freqs)):
if j in dset:
bi_set.append(j)
if self._is_reducible_collision_matrix:
sum_col = (col_mat[:, :, :, :, gp, bi_set].sum(axis=4) /
len(bi_set))
for j in bi_set:
col_mat[:, :, :, :, gp, j] = sum_col
else:
sum_col = (
col_mat[:, :, :, :, :, i, bi_set, :].sum(axis=5) /
len(bi_set))
for j in bi_set:
col_mat[:, :, :, :, :, i, j, :] = sum_col
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _get_X(self, i_temp, weights, gv):
num_band = self._primitive.get_number_of_atoms() * 3
X = gv.copy()
if self._is_reducible_collision_matrix:
num_mesh_points = np.prod(self._mesh)
freqs = self._frequencies[:num_mesh_points]
else:
freqs = self._frequencies[self._ir_grid_points]
t = self._temperatures[i_temp]
sinh = np.where(freqs > self._cutoff_frequency,
np.sinh(freqs * THzToEv / (2 * Kb * t)),
-1.0)
inv_sinh = np.where(sinh > 0, 1.0 / sinh, 0)
freqs_sinh = freqs * THzToEv * inv_sinh / (4 * Kb * t ** 2)
for i, f in enumerate(freqs_sinh):
X[i] *= weights[i]
for j in range(num_band):
X[i, j] *= f[j]
if t > 0:
return X.reshape(-1, 3)
else:
return np.zeros_like(X.reshape(-1, 3))
def _get_Y(self, i_sigma, i_temp, weights, X):
solver = _select_solver(self._pinv_solver)
num_band = self._primitive.get_number_of_atoms() * 3
if self._is_reducible_collision_matrix:
num_grid_points = np.prod(self._mesh)
size = num_grid_points * num_band
else:
num_grid_points = len(self._ir_grid_points)
size = num_grid_points * num_band * 3
v = self._collision_matrix[i_sigma, i_temp].reshape(size, size)
# Transpose eigvecs because colmat was solved by column major order
if solver in [1, 2, 4, 5]:
v = v.T
start = time.time()
if solver in [0, 1, 2, 3, 4, 5]:
if self._log_level:
sys.stdout.write("Calculating pseudo-inv with cutoff=%-.1e "
"(np.dot) " % self._pinv_cutoff)
sys.stdout.flush()
e = self._get_eigvals_pinv(i_sigma, i_temp)
if self._is_reducible_collision_matrix:
X1 = np.dot(v.T, X)
for i in range(3):
X1[:, i] *= e
Y = np.dot(v, X1)
else:
Y = np.dot(v, e * np.dot(v.T, X.ravel())).reshape(-1, 3)
else: # solver=6 This is slower as far as tested.
import phono3py._phono3py as phono3c
if self._log_level:
sys.stdout.write("Calculating pseudo-inv with cutoff=%-.1e "
"(built-in) " % self._pinv_cutoff)
sys.stdout.flush()
w = self._collision_eigenvalues[i_sigma, i_temp]
phono3c.pinv_from_eigensolution(self._collision_matrix,
w,
i_sigma,
i_temp,
self._pinv_cutoff,
0)
if self._is_reducible_collision_matrix:
Y = np.dot(v, X)
else:
Y = np.dot(v, X.ravel()).reshape(-1, 3)
self._set_f_vectors(Y, num_grid_points, weights)
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
return Y
def _set_f_vectors(self, Y, num_grid_points, weights):
# Collision matrix is half of that defined in Chaput's paper.
# Therefore Y is divided by 2.
num_band = self._primitive.get_number_of_atoms() * 3
self._f_vectors[:] = ((Y / 2).reshape(num_grid_points, num_band * 3).T
/ weights).T.reshape(self._f_vectors.shape)
def _get_eigvals_pinv(self, i_sigma, i_temp):
w = self._collision_eigenvalues[i_sigma, i_temp]
e = np.zeros_like(w)
for l, val in enumerate(w):
if abs(val) > self._pinv_cutoff:
e[l] = 1 / val
return e
def _set_kappa(self, i_sigma, i_temp, weights):
N = self._num_sampling_grid_points
if self._is_reducible_collision_matrix:
X = self._get_X(i_temp, weights, self._gv)
num_mesh_points = np.prod(self._mesh)
Y = self._get_Y(i_sigma, i_temp, weights, X)
self._set_mean_free_path(i_sigma, i_temp, weights, Y)
# Putting self._rotations_cartesian is to symmetrize kappa.
# None can be put instead for watching pure information.
self._set_mode_kappa(self._mode_kappa,
X,
Y,
num_mesh_points,
self._rotations_cartesian,
i_sigma,
i_temp)
self._mode_kappa[i_sigma, i_temp] /= len(self._rotations_cartesian)
self._kappa[i_sigma, i_temp] = (
self._mode_kappa[i_sigma, i_temp].sum(axis=0).sum(axis=0) / N)
else:
if self._solve_collective_phonon:
self._set_mode_kappa_Chaput(i_sigma, i_temp, weights)
else:
X = self._get_X(i_temp, weights, self._gv)
num_ir_grid_points = len(self._ir_grid_points)
Y = self._get_Y(i_sigma, i_temp, weights, X)
self._set_mean_free_path(i_sigma, i_temp, weights, Y)
self._set_mode_kappa(self._mode_kappa,
X,
Y,
num_ir_grid_points,
self._rotations_cartesian,
i_sigma,
i_temp)
# self._set_mode_kappa_from_mfp(weights,
# num_ir_grid_points,
# self._rotations_cartesian,
# i_sigma,
# i_temp)
self._kappa[i_sigma, i_temp] = (
self._mode_kappa[i_sigma, i_temp].sum(axis=0).sum(axis=0) / N)
def _set_kappa_RTA(self, i_sigma, i_temp, weights):
N = self._num_sampling_grid_points
num_band = self._primitive.get_number_of_atoms() * 3
X = self._get_X(i_temp, weights, self._gv)
Y = np.zeros_like(X)
if self._is_reducible_collision_matrix:
# This RTA is not equivalent to conductivity_RTA.
# The lifetime is defined from the diagonal part of
# collision matrix.
num_mesh_points = np.prod(self._mesh)
size = num_mesh_points * num_band
v_diag = np.diagonal(
self._collision_matrix[i_sigma, i_temp].reshape(size, size))
for gp in range(num_mesh_points):
frequencies = self._frequencies[gp]
for j, f in enumerate(frequencies):
if f > self._cutoff_frequency:
i_mode = gp * num_band + j
Y[i_mode, :] = X[i_mode, :] / v_diag[i_mode]
# Putting self._rotations_cartesian is to symmetrize kappa.
# None can be put instead for watching pure information.
self._set_mode_kappa(self._mode_kappa_RTA,
X,
Y,
num_mesh_points,
self._rotations_cartesian,
i_sigma,
i_temp)
g = len(self._rotations_cartesian)
self._mode_kappa_RTA[i_sigma, i_temp] /= g
self._kappa_RTA[i_sigma, i_temp] = (
self._mode_kappa_RTA[i_sigma, i_temp].sum(axis=0).sum(axis=0) /
N)
else:
# This RTA is supposed to be the same as conductivity_RTA.
num_ir_grid_points = len(self._ir_grid_points)
size = num_ir_grid_points * num_band * 3
for i, gp in enumerate(self._ir_grid_points):
g = self._get_main_diagonal(i, i_sigma, i_temp)
frequencies = self._frequencies[gp]
for j, f in enumerate(frequencies):
if f > self._cutoff_frequency:
i_mode = i * num_band + j
old_settings = np.seterr(all='raise')
try:
Y[i_mode, :] = X[i_mode, :] / g[j]
except:
print("=" * 26 + " Warning " + "=" * 26)
print(" Unexpected physical condition of ph-ph "
"interaction calculation was found.")
print(" g[j]=%f at gp=%d, band=%d, freq=%f" %
(g[j], gp, j + 1, f))
print("=" * 61)
np.seterr(**old_settings)
self._set_mode_kappa(self._mode_kappa_RTA,
X,
Y,
num_ir_grid_points,
self._rotations_cartesian,
i_sigma,
i_temp)
self._kappa_RTA[i_sigma, i_temp] = (
self._mode_kappa_RTA[i_sigma, i_temp].sum(axis=0).sum(axis=0) /
N)
def _set_mode_kappa(self,
mode_kappa,
X,
Y,
num_grid_points,
rotations_cartesian,
i_sigma,
i_temp):
num_band = self._primitive.get_number_of_atoms() * 3
for i, (v_gp, f_gp) in enumerate(zip(X.reshape(num_grid_points,
num_band, 3),
Y.reshape(num_grid_points,
num_band, 3))):
for j, (v, f) in enumerate(zip(v_gp, f_gp)):
# Do not consider three lowest modes at Gamma-point
# It is assumed that there are no imaginary modes.
if (self._grid_address[i] == 0).all() and j < 3:
continue
if rotations_cartesian is None:
sum_k = np.outer(v, f)
else:
sum_k = np.zeros((3, 3), dtype='double')
for r in rotations_cartesian:
sum_k += np.outer(np.dot(r, v), np.dot(r, f))
sum_k = sum_k + sum_k.T
for k, vxf in enumerate(
((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))):
mode_kappa[i_sigma, i_temp, i, j, k] = sum_k[vxf]
t = self._temperatures[i_temp]
# Collision matrix is half of that defined in Chaput's paper.
# Therefore here 2 is not necessary multiplied.
# sum_k = sum_k + sum_k.T is equivalent to I(a,b) + I(b,a).
mode_kappa[i_sigma, i_temp] *= self._conversion_factor * Kb * t ** 2
def _set_mode_kappa_Chaput(self, i_sigma, i_temp, weights):
"""Calculate mode kappa by the way in Laurent Chaput's PRL paper.
This gives the different result from _set_mode_kappa and requires more
memory space.
"""
X = self._get_X(i_temp, weights, self._gv).ravel()
num_ir_grid_points = len(self._ir_grid_points)
num_band = self._primitive.get_number_of_atoms() * 3
size = num_ir_grid_points * num_band * 3
v = self._collision_matrix[i_sigma, i_temp].reshape(size, size)
solver = _select_solver(self._pinv_solver)
if solver in [1, 2, 4, 5]:
v = v.T
e = self._get_eigvals_pinv(i_sigma, i_temp)
t = self._temperatures[i_temp]
omega_inv = np.empty(v.shape, dtype='double', order='C')
np.dot(v, (e * v).T, out=omega_inv)
Y = np.dot(omega_inv, X)
self._set_f_vectors(Y, num_ir_grid_points, weights)
elems = ((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))
for i, vxf in enumerate(elems):
mat = self._get_I(vxf[0], vxf[1], num_ir_grid_points * num_band)
self._mode_kappa[i_sigma, i_temp, :, :, i] = 0
if mat is not None:
np.dot(mat, omega_inv, out=mat)
# vals = (X ** 2 * np.diag(mat)).reshape(-1, 3).sum(axis=1)
# vals = vals.reshape(num_ir_grid_points, num_band)
# self._mode_kappa[i_sigma, i_temp, :, :, i] = vals
w = diagonalize_collision_matrix(mat,
pinv_solver=self._pinv_solver,
log_level=self._log_level)
if solver in [1, 2, 4, 5]:
mat = mat.T
spectra = np.dot(mat.T, X) ** 2 * w
for s, eigvec in zip(spectra, mat.T):
vals = s * (eigvec ** 2).reshape(-1, 3).sum(axis=1)
vals = vals.reshape(num_ir_grid_points, num_band)
self._mode_kappa[i_sigma, i_temp, :, :, i] += vals
factor = self._conversion_factor * Kb * t ** 2
self._mode_kappa[i_sigma, i_temp] *= factor
def _set_mode_kappa_from_mfp(self,
weights,
num_grid_points,
rotations_cartesian,
i_sigma,
i_temp):
for i, (v_gp, mfp_gp, cv_gp) in enumerate(
zip(self._gv, self._mfp[i_sigma, i_temp], self._cv[i_temp])):
for j, (v, mfp, cv) in enumerate(zip(v_gp, mfp_gp, cv_gp)):
sum_k = np.zeros((3, 3), dtype='double')
for r in rotations_cartesian:
sum_k += np.outer(np.dot(r, v), np.dot(r, mfp))
sum_k = (sum_k + sum_k.T) / 2 * cv * weights[i] ** 2 * 2 * np.pi
for k, vxf in enumerate(
((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))):
self._mode_kappa[i_sigma, i_temp, i, j, k] = sum_k[vxf]
self._mode_kappa *= - self._conversion_factor
def _set_mean_free_path(self, i_sigma, i_temp, weights, Y):
t = self._temperatures[i_temp]
# shape = (num_grid_points, num_band, 3),
for i, f_gp in enumerate(self._f_vectors):
for j, f in enumerate(f_gp):
cv = self._cv[i_temp, i, j]
if cv < 1e-10:
continue
self._mfp[i_sigma, i_temp, i, j] = (
- 2 * t * np.sqrt(Kb / cv) * f / (2 * np.pi))
def _show_log(self, i):
gp = self._grid_points[i]
frequencies = self._frequencies[gp]
if self._is_reducible_collision_matrix:
gv = self._gv[gp]
else:
gv = self._gv[i]
if self._is_full_pp:
ave_pp = self._averaged_pp_interaction[i]
text = "Frequency group velocity (x, y, z) |gv| Pqj"
else:
text = "Frequency group velocity (x, y, z) |gv|"
if self._gv_delta_q is None:
pass
else:
text += " (dq=%3.1e)" % self._gv_delta_q
print(text)
if self._is_full_pp:
for f, v, pp in zip(frequencies, gv, ave_pp):
print("%8.3f (%8.3f %8.3f %8.3f) %8.3f %11.3e" %
(f, v[0], v[1], v[2], np.linalg.norm(v), pp))
else:
for f, v in zip(frequencies, gv):
print("%8.3f (%8.3f %8.3f %8.3f) %8.3f" %
(f, v[0], v[1], v[2], np.linalg.norm(v)))
sys.stdout.flush()
def _py_symmetrize_collision_matrix(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
for i in range(num_ir_grid_points):
for j in range(num_band):
for k in range(3):
for l in range(num_ir_grid_points):
for m in range(num_band):
for n in range(3):
self._py_set_symmetrized_element(
i, j, k, l, m, n)
def _py_set_symmetrized_element(self, i, j, k, l, m, n):
sym_val = (self._collision_matrix[:, :, i, j, k, l, m, n] +
self._collision_matrix[:, :, l, m, n, i, j, k]) / 2
self._collision_matrix[:, :, i, j, k, l, m, n] = sym_val
self._collision_matrix[:, :, l, m, n, i, j, k] = sym_val
def _py_symmetrize_collision_matrix_no_kappa_stars(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
for i in range(num_ir_grid_points):
for j in range(num_band):
for k in range(num_ir_grid_points):
for l in range(num_band):
self._py_set_symmetrized_element_no_kappa_stars(
i, j, k, l)
def _py_set_symmetrized_element_no_kappa_stars(self, i, j, k, l):
sym_val = (self._collision_matrix[:, :, i, j, k, l] +
self._collision_matrix[:, :, k, l, i, j]) / 2
self._collision_matrix[:, :, i, j, k, l] = sym_val
self._collision_matrix[:, :, k, l, i, j] = sym_val
|
atztogo/phono3py | phono3py/phonon3/conductivity_LBTE.py | Conductivity_LBTE._set_mode_kappa_Chaput | python | def _set_mode_kappa_Chaput(self, i_sigma, i_temp, weights):
X = self._get_X(i_temp, weights, self._gv).ravel()
num_ir_grid_points = len(self._ir_grid_points)
num_band = self._primitive.get_number_of_atoms() * 3
size = num_ir_grid_points * num_band * 3
v = self._collision_matrix[i_sigma, i_temp].reshape(size, size)
solver = _select_solver(self._pinv_solver)
if solver in [1, 2, 4, 5]:
v = v.T
e = self._get_eigvals_pinv(i_sigma, i_temp)
t = self._temperatures[i_temp]
omega_inv = np.empty(v.shape, dtype='double', order='C')
np.dot(v, (e * v).T, out=omega_inv)
Y = np.dot(omega_inv, X)
self._set_f_vectors(Y, num_ir_grid_points, weights)
elems = ((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))
for i, vxf in enumerate(elems):
mat = self._get_I(vxf[0], vxf[1], num_ir_grid_points * num_band)
self._mode_kappa[i_sigma, i_temp, :, :, i] = 0
if mat is not None:
np.dot(mat, omega_inv, out=mat)
# vals = (X ** 2 * np.diag(mat)).reshape(-1, 3).sum(axis=1)
# vals = vals.reshape(num_ir_grid_points, num_band)
# self._mode_kappa[i_sigma, i_temp, :, :, i] = vals
w = diagonalize_collision_matrix(mat,
pinv_solver=self._pinv_solver,
log_level=self._log_level)
if solver in [1, 2, 4, 5]:
mat = mat.T
spectra = np.dot(mat.T, X) ** 2 * w
for s, eigvec in zip(spectra, mat.T):
vals = s * (eigvec ** 2).reshape(-1, 3).sum(axis=1)
vals = vals.reshape(num_ir_grid_points, num_band)
self._mode_kappa[i_sigma, i_temp, :, :, i] += vals
factor = self._conversion_factor * Kb * t ** 2
self._mode_kappa[i_sigma, i_temp] *= factor | Calculate mode kappa by the way in Laurent Chaput's PRL paper.
This gives the different result from _set_mode_kappa and requires more
memory space. | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/conductivity_LBTE.py#L1591-L1635 | null | class Conductivity_LBTE(Conductivity):
def __init__(self,
interaction,
symmetry,
grid_points=None,
temperatures=None,
sigmas=None,
sigma_cutoff=None,
is_isotope=False,
mass_variances=None,
boundary_mfp=None, # in micrometre
solve_collective_phonon=False,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=None, # finite difference for group veolocity
is_full_pp=False,
read_pp=False,
pp_filename=None,
pinv_cutoff=1.0e-8,
pinv_solver=0,
log_level=0):
self._pp = None
self._temperatures = None
self._sigmas = None
self._sigma_cutoff = None
self._is_kappa_star = None
self._gv_delta_q = None
self._is_full_pp = None
self._log_level = None
self._primitive = None
self._dm = None
self._frequency_factor_to_THz = None
self._cutoff_frequency = None
self._boundary_mfp = None
self._symmetry = None
self._point_operations = None
self._rotations_cartesian = None
self._grid_points = None
self._grid_weights = None
self._grid_address = None
self._ir_grid_points = None
self._ir_grid_weights = None
self._kappa = None
self._mode_kappa = None
self._kappa_RTA = None
self._mode_kappa_RTA = None
self._read_gamma = False
self._read_gamma_iso = False
self._frequencies = None
self._cv = None
self._gv = None
self._f_vectors = None
self._gv_sum2 = None
self._mfp = None
self._gamma = None
self._gamma_iso = None
self._averaged_pp_interaction = None
self._mesh = None
self._conversion_factor = None
self._is_isotope = None
self._isotope = None
self._mass_variances = None
self._grid_point_count = None
self._collision_eigenvalues = None
Conductivity.__init__(self,
interaction,
symmetry,
grid_points=grid_points,
temperatures=temperatures,
sigmas=sigmas,
sigma_cutoff=sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
boundary_mfp=boundary_mfp,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
log_level=log_level)
self._is_reducible_collision_matrix = is_reducible_collision_matrix
self._solve_collective_phonon = solve_collective_phonon
if not self._is_kappa_star:
self._is_reducible_collision_matrix = True
self._collision_matrix = None
self._read_pp = read_pp
self._pp_filename = pp_filename
self._pinv_cutoff = pinv_cutoff
self._pinv_solver = pinv_solver
if grid_points is None:
self._all_grid_points = True
else:
self._all_grid_points = False
if self._temperatures is not None:
self._allocate_values()
def set_kappa_at_sigmas(self):
if len(self._grid_points) != len(self._ir_grid_points):
print("Collision matrix is not well created.")
import sys
sys.exit(1)
else:
self._set_kappa_at_sigmas()
def set_collision_matrix(self, collision_matrix):
self._collision_matrix = collision_matrix
def get_f_vectors(self):
return self._f_vectors
def get_collision_matrix(self):
return self._collision_matrix
def get_collision_eigenvalues(self):
return self._collision_eigenvalues
def get_mean_free_path(self):
return self._mfp
def get_frequencies_all(self):
return self._frequencies[:np.prod(self._mesh)]
def get_kappa_RTA(self):
return self._kappa_RTA
def get_mode_kappa_RTA(self):
return self._mode_kappa_RTA
def delete_gp_collision_and_pp(self):
self._collision.delete_integration_weights()
self._pp.delete_interaction_strength()
def _run_at_grid_point(self):
i = self._grid_point_count
self._show_log_header(i)
gp = self._grid_points[i]
if not self._all_grid_points:
self._collision_matrix[:] = 0
if not self._read_gamma:
self._collision.set_grid_point(gp)
if self._log_level:
print("Number of triplets: %d" %
len(self._pp.get_triplets_at_q()[0]))
self._set_collision_matrix_at_sigmas(i)
if self._is_reducible_collision_matrix:
igp = gp
else:
igp = i
self._set_harmonic_properties(i, igp)
if self._isotope is not None:
gamma_iso = self._get_gamma_isotope_at_sigmas(i)
band_indices = self._pp.get_band_indices()
self._gamma_iso[:, igp, :] = gamma_iso[:, band_indices]
if self._log_level:
self._show_log(i)
def _allocate_values(self):
num_band0 = len(self._pp.get_band_indices())
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
num_temp = len(self._temperatures)
num_mesh_points = np.prod(self._mesh)
if self._is_reducible_collision_matrix:
num_grid_points = num_mesh_points
else:
num_grid_points = len(self._grid_points)
if self._all_grid_points:
num_stored_grid_points = num_grid_points
else:
num_stored_grid_points = 1
self._kappa = np.zeros((len(self._sigmas), num_temp, 6),
dtype='double', order='C')
self._kappa_RTA = np.zeros((len(self._sigmas), num_temp, 6),
dtype='double', order='C')
self._gv = np.zeros((num_grid_points, num_band0, 3),
dtype='double', order='C')
self._f_vectors = np.zeros((num_grid_points, num_band0, 3),
dtype='double', order='C')
self._gv_sum2 = np.zeros((num_grid_points, num_band0, 6),
dtype='double', order='C')
self._mfp = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
3), dtype='double', order='C')
self._cv = np.zeros((num_temp, num_grid_points, num_band0),
dtype='double', order='C')
if self._is_full_pp:
self._averaged_pp_interaction = np.zeros(
(num_grid_points, num_band0), dtype='double', order='C')
if self._gamma is None:
self._gamma = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0), dtype='double', order='C')
if self._isotope is not None:
self._gamma_iso = np.zeros((len(self._sigmas),
num_grid_points,
num_band0), dtype='double', order='C')
if self._is_reducible_collision_matrix:
self._mode_kappa = np.zeros((len(self._sigmas),
num_temp,
num_mesh_points,
num_band,
6), dtype='double', order='C')
self._mode_kappa_RTA = np.zeros((len(self._sigmas),
num_temp,
num_mesh_points,
num_band,
6), dtype='double', order='C')
self._collision = CollisionMatrix(
self._pp,
is_reducible_collision_matrix=True,
log_level=self._log_level)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(len(self._sigmas), num_temp,
num_stored_grid_points, num_band0,
num_mesh_points, num_band),
dtype='double', order='C')
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas), num_temp, num_mesh_points * num_band),
dtype='double', order='C')
else:
self._mode_kappa = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
6), dtype='double')
self._mode_kappa_RTA = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
6), dtype='double')
self._rot_grid_points = np.zeros(
(len(self._ir_grid_points), len(self._point_operations)),
dtype='uintp')
for i, ir_gp in enumerate(self._ir_grid_points):
self._rot_grid_points[i] = get_grid_points_by_rotations(
self._grid_address[ir_gp],
self._point_operations,
self._mesh)
self._collision = CollisionMatrix(
self._pp,
point_operations=self._point_operations,
ir_grid_points=self._ir_grid_points,
rot_grid_points=self._rot_grid_points,
log_level=self._log_level)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(len(self._sigmas),
num_temp,
num_stored_grid_points, num_band0, 3,
num_ir_grid_points, num_band, 3),
dtype='double', order='C')
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas),
num_temp,
num_ir_grid_points * num_band * 3),
dtype='double', order='C')
def _set_collision_matrix_at_sigmas(self, i):
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "Calculating collision matrix with "
if sigma is None:
text += "tetrahedron method."
else:
text += "sigma=%s" % sigma
if self._sigma_cutoff is None:
text += "."
else:
text += "(%4.2f SD)." % self._sigma_cutoff
print(text)
self._collision.set_sigma(sigma, sigma_cutoff=self._sigma_cutoff)
self._collision.set_integration_weights()
if self._read_pp:
pp, _g_zero = read_pp_from_hdf5(
self._mesh,
grid_point=self._grid_points[i],
sigma=sigma,
sigma_cutoff=self._sigma_cutoff,
filename=self._pp_filename,
verbose=(self._log_level > 0))
_, g_zero = self._collision.get_integration_weights()
if self._log_level:
if len(self._sigmas) > 1:
print("Multiple sigmas or mixing smearing and "
"tetrahedron method is not supported.")
if _g_zero is not None and (_g_zero != g_zero).any():
raise ValueError("Inconsistency found in g_zero.")
self._collision.set_interaction_strength(pp)
elif j != 0 and (self._is_full_pp or self._sigma_cutoff is None):
if self._log_level:
print("Existing ph-ph interaction is used.")
else:
if self._log_level:
print("Calculating ph-ph interaction...")
self._collision.run_interaction(is_full_pp=self._is_full_pp)
if self._is_full_pp and j == 0:
self._averaged_pp_interaction[i] = (
self._pp.get_averaged_interaction())
for k, t in enumerate(self._temperatures):
self._collision.set_temperature(t)
self._collision.run()
if self._all_grid_points:
if self._is_reducible_collision_matrix:
i_data = self._grid_points[i]
else:
i_data = i
else:
i_data = 0
self._gamma[j, k, i_data] = (
self._collision.get_imag_self_energy())
self._collision_matrix[j, k, i_data] = (
self._collision.get_collision_matrix())
def _set_kappa_at_sigmas(self):
if self._is_reducible_collision_matrix:
if self._is_kappa_star:
self._average_collision_matrix_by_degeneracy()
self._expand_collisions()
self._combine_reducible_collisions()
weights = np.ones(np.prod(self._mesh), dtype='intc')
self._symmetrize_collision_matrix()
else:
self._combine_collisions()
weights = self._get_weights()
for i, w_i in enumerate(weights):
for j, w_j in enumerate(weights):
self._collision_matrix[:, :, i, :, :, j, :, :] *= w_i * w_j
self._average_collision_matrix_by_degeneracy()
self._symmetrize_collision_matrix()
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "----------- Thermal conductivity (W/m-k) "
if sigma:
text += "for sigma=%s -----------" % sigma
else:
text += "with tetrahedron method -----------"
print(text)
sys.stdout.flush()
for k, t in enumerate(self._temperatures):
if t > 0:
self._set_kappa_RTA(j, k, weights)
w = diagonalize_collision_matrix(
self._collision_matrix,
i_sigma=j, i_temp=k,
pinv_solver=self._pinv_solver,
log_level=self._log_level)
self._collision_eigenvalues[j, k] = w
self._set_kappa(j, k, weights)
if self._log_level:
print(("#%6s " + " %-10s" * 6) %
("T(K)", "xx", "yy", "zz", "yz", "xz", "xy"))
print(("%7.1f " + " %10.3f" * 6) %
((t,) + tuple(self._kappa[j, k])))
print((" %6s " + " %10.3f" * 6) %
(("(RTA)",) + tuple(self._kappa_RTA[j, k])))
print("-" * 76)
sys.stdout.flush()
sys.stdout.flush()
if self._log_level:
print('')
def _combine_collisions(self):
num_band = self._primitive.get_number_of_atoms() * 3
for j, k in list(np.ndindex((len(self._sigmas),
len(self._temperatures)))):
for i, ir_gp in enumerate(self._ir_grid_points):
for r, r_gp in zip(self._rotations_cartesian,
self._rot_grid_points[i]):
if ir_gp != r_gp:
continue
main_diagonal = self._get_main_diagonal(i, j, k)
for l in range(num_band):
self._collision_matrix[
j, k, i, l, :, i, l, :] += main_diagonal[l] * r
def _combine_reducible_collisions(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_mesh_points = np.prod(self._mesh)
for j, k in list(
np.ndindex((len(self._sigmas), len(self._temperatures)))):
for i in range(num_mesh_points):
main_diagonal = self._get_main_diagonal(i, j, k)
for l in range(num_band):
self._collision_matrix[
j, k, i, l, i, l] += main_diagonal[l]
def _expand_collisions(self):
start = time.time()
if self._log_level:
sys.stdout.write("- Expanding properties to all grid points ")
sys.stdout.flush()
num_mesh_points = np.prod(self._mesh)
num_rot = len(self._point_operations)
rot_grid_points = np.zeros((num_rot, num_mesh_points), dtype='uintp')
for i in range(num_mesh_points):
rot_grid_points[:, i] = get_grid_points_by_rotations(
self._grid_address[i],
self._point_operations,
self._mesh)
try:
import phono3py._phono3py as phono3c
phono3c.expand_collision_matrix(self._collision_matrix,
self._ir_grid_points,
rot_grid_points)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
for i, ir_gp in enumerate(self._ir_grid_points):
multi = (rot_grid_points[:, ir_gp] == ir_gp).sum()
colmat_irgp = self._collision_matrix[:, :, ir_gp, :, :, :].copy()
colmat_irgp /= multi
self._collision_matrix[:, :, ir_gp, :, :, :] = 0
for j, r in enumerate(self._rotations_cartesian):
gp_r = rot_grid_points[j, ir_gp]
for k in range(num_mesh_points):
gp_c = rot_grid_points[j, k]
self._collision_matrix[:, :, gp_r, :, gp_c, :] += (
colmat_irgp[:, :, :, k, :])
for i, ir_gp in enumerate(self._ir_grid_points):
gv_irgp = self._gv[ir_gp].copy()
self._gv[ir_gp] = 0
cv_irgp = self._cv[:, ir_gp, :].copy()
self._cv[:, ir_gp, :] = 0
gamma_irgp = self._gamma[:, :, ir_gp, :].copy()
self._gamma[:, :, ir_gp, :] = 0
multi = (rot_grid_points[:, ir_gp] == ir_gp).sum()
if self._gamma_iso is not None:
gamma_iso_irgp = self._gamma_iso[:, ir_gp, :].copy()
self._gamma_iso[:, ir_gp, :] = 0
for j, r in enumerate(self._rotations_cartesian):
gp_r = rot_grid_points[j, ir_gp]
self._gamma[:, :, gp_r, :] += gamma_irgp / multi
if self._gamma_iso is not None:
self._gamma_iso[:, gp_r, :] += gamma_iso_irgp / multi
self._gv[gp_r] += np.dot(gv_irgp, r.T) / multi
self._cv[:, gp_r, :] += cv_irgp / multi
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _get_weights(self):
"""Returns weights used for collision matrix and |X> and |f>
self._rot_grid_points : ndarray
shape=(ir_grid_points, point_operations), dtype='uintp'
r_gps : grid points of arms of k-star with duplicates
len(r_gps) == order of crystallographic point group
len(unique(r_gps)) == number of arms of the k-star
Returns
-------
weights : list
sqrt(g_k)/|g|, where g is the crystallographic point group and
g_k is the number of arms of k-star.
"""
weights = []
n = float(self._rot_grid_points.shape[1])
for r_gps in self._rot_grid_points:
weights.append(np.sqrt(len(np.unique(r_gps)) / n))
return weights
def _symmetrize_collision_matrix(self):
start = time.time()
try:
import phono3py._phono3py as phono3c
if self._log_level:
sys.stdout.write("- Making collision matrix symmetric "
"(built-in) ")
sys.stdout.flush()
phono3c.symmetrize_collision_matrix(self._collision_matrix)
except ImportError:
if self._log_level:
sys.stdout.write("- Making collision matrix symmetric "
"(numpy) ")
sys.stdout.flush()
if self._is_reducible_collision_matrix:
size = np.prod(self._collision_matrix.shape[2:4])
else:
size = np.prod(self._collision_matrix.shape[2:5])
for i in range(self._collision_matrix.shape[0]):
for j in range(self._collision_matrix.shape[1]):
col_mat = self._collision_matrix[i, j].reshape(size, size)
col_mat += col_mat.T
col_mat /= 2
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _average_collision_matrix_by_degeneracy(self):
start = time.time()
# Average matrix elements belonging to degenerate bands
if self._log_level:
sys.stdout.write("- Averaging collision matrix elements "
"by phonon degeneracy ")
sys.stdout.flush()
col_mat = self._collision_matrix
for i, gp in enumerate(self._ir_grid_points):
freqs = self._frequencies[gp]
deg_sets = degenerate_sets(freqs)
for dset in deg_sets:
bi_set = []
for j in range(len(freqs)):
if j in dset:
bi_set.append(j)
if self._is_reducible_collision_matrix:
sum_col = (col_mat[:, :, gp, bi_set, :, :].sum(axis=2) /
len(bi_set))
for j in bi_set:
col_mat[:, :, gp, j, :, :] = sum_col
else:
sum_col = (
col_mat[:, :, i, bi_set, :, :, :, :].sum(axis=2) /
len(bi_set))
for j in bi_set:
col_mat[:, :, i, j, :, :, :, :] = sum_col
for i, gp in enumerate(self._ir_grid_points):
freqs = self._frequencies[gp]
deg_sets = degenerate_sets(freqs)
for dset in deg_sets:
bi_set = []
for j in range(len(freqs)):
if j in dset:
bi_set.append(j)
if self._is_reducible_collision_matrix:
sum_col = (col_mat[:, :, :, :, gp, bi_set].sum(axis=4) /
len(bi_set))
for j in bi_set:
col_mat[:, :, :, :, gp, j] = sum_col
else:
sum_col = (
col_mat[:, :, :, :, :, i, bi_set, :].sum(axis=5) /
len(bi_set))
for j in bi_set:
col_mat[:, :, :, :, :, i, j, :] = sum_col
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _get_X(self, i_temp, weights, gv):
num_band = self._primitive.get_number_of_atoms() * 3
X = gv.copy()
if self._is_reducible_collision_matrix:
num_mesh_points = np.prod(self._mesh)
freqs = self._frequencies[:num_mesh_points]
else:
freqs = self._frequencies[self._ir_grid_points]
t = self._temperatures[i_temp]
sinh = np.where(freqs > self._cutoff_frequency,
np.sinh(freqs * THzToEv / (2 * Kb * t)),
-1.0)
inv_sinh = np.where(sinh > 0, 1.0 / sinh, 0)
freqs_sinh = freqs * THzToEv * inv_sinh / (4 * Kb * t ** 2)
for i, f in enumerate(freqs_sinh):
X[i] *= weights[i]
for j in range(num_band):
X[i, j] *= f[j]
if t > 0:
return X.reshape(-1, 3)
else:
return np.zeros_like(X.reshape(-1, 3))
def _get_Y(self, i_sigma, i_temp, weights, X):
solver = _select_solver(self._pinv_solver)
num_band = self._primitive.get_number_of_atoms() * 3
if self._is_reducible_collision_matrix:
num_grid_points = np.prod(self._mesh)
size = num_grid_points * num_band
else:
num_grid_points = len(self._ir_grid_points)
size = num_grid_points * num_band * 3
v = self._collision_matrix[i_sigma, i_temp].reshape(size, size)
# Transpose eigvecs because colmat was solved by column major order
if solver in [1, 2, 4, 5]:
v = v.T
start = time.time()
if solver in [0, 1, 2, 3, 4, 5]:
if self._log_level:
sys.stdout.write("Calculating pseudo-inv with cutoff=%-.1e "
"(np.dot) " % self._pinv_cutoff)
sys.stdout.flush()
e = self._get_eigvals_pinv(i_sigma, i_temp)
if self._is_reducible_collision_matrix:
X1 = np.dot(v.T, X)
for i in range(3):
X1[:, i] *= e
Y = np.dot(v, X1)
else:
Y = np.dot(v, e * np.dot(v.T, X.ravel())).reshape(-1, 3)
else: # solver=6 This is slower as far as tested.
import phono3py._phono3py as phono3c
if self._log_level:
sys.stdout.write("Calculating pseudo-inv with cutoff=%-.1e "
"(built-in) " % self._pinv_cutoff)
sys.stdout.flush()
w = self._collision_eigenvalues[i_sigma, i_temp]
phono3c.pinv_from_eigensolution(self._collision_matrix,
w,
i_sigma,
i_temp,
self._pinv_cutoff,
0)
if self._is_reducible_collision_matrix:
Y = np.dot(v, X)
else:
Y = np.dot(v, X.ravel()).reshape(-1, 3)
self._set_f_vectors(Y, num_grid_points, weights)
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
return Y
def _set_f_vectors(self, Y, num_grid_points, weights):
# Collision matrix is half of that defined in Chaput's paper.
# Therefore Y is divided by 2.
num_band = self._primitive.get_number_of_atoms() * 3
self._f_vectors[:] = ((Y / 2).reshape(num_grid_points, num_band * 3).T
/ weights).T.reshape(self._f_vectors.shape)
def _get_eigvals_pinv(self, i_sigma, i_temp):
w = self._collision_eigenvalues[i_sigma, i_temp]
e = np.zeros_like(w)
for l, val in enumerate(w):
if abs(val) > self._pinv_cutoff:
e[l] = 1 / val
return e
def _get_I(self, a, b, size, plus_transpose=True):
"""Return I matrix in Chaput's PRL paper.
None is returned if I is zero matrix.
"""
r_sum = np.zeros((3, 3), dtype='double', order='C')
for r in self._rotations_cartesian:
for i in range(3):
for j in range(3):
r_sum[i, j] += r[a, i] * r[b, j]
if plus_transpose:
r_sum += r_sum.T
# Return None not to consume computer for diagonalization
if (np.abs(r_sum) < 1e-10).all():
return None
# Same as np.kron(np.eye(size), r_sum), but writen as below
# to be sure the values in memory C-congiguous with 'double'.
I_mat = np.zeros((3 * size, 3 * size), dtype='double', order='C')
for i in range(size):
I_mat[(i * 3):((i + 1) * 3), (i * 3):((i + 1) * 3)] = r_sum
return I_mat
def _set_kappa(self, i_sigma, i_temp, weights):
N = self._num_sampling_grid_points
if self._is_reducible_collision_matrix:
X = self._get_X(i_temp, weights, self._gv)
num_mesh_points = np.prod(self._mesh)
Y = self._get_Y(i_sigma, i_temp, weights, X)
self._set_mean_free_path(i_sigma, i_temp, weights, Y)
# Putting self._rotations_cartesian is to symmetrize kappa.
# None can be put instead for watching pure information.
self._set_mode_kappa(self._mode_kappa,
X,
Y,
num_mesh_points,
self._rotations_cartesian,
i_sigma,
i_temp)
self._mode_kappa[i_sigma, i_temp] /= len(self._rotations_cartesian)
self._kappa[i_sigma, i_temp] = (
self._mode_kappa[i_sigma, i_temp].sum(axis=0).sum(axis=0) / N)
else:
if self._solve_collective_phonon:
self._set_mode_kappa_Chaput(i_sigma, i_temp, weights)
else:
X = self._get_X(i_temp, weights, self._gv)
num_ir_grid_points = len(self._ir_grid_points)
Y = self._get_Y(i_sigma, i_temp, weights, X)
self._set_mean_free_path(i_sigma, i_temp, weights, Y)
self._set_mode_kappa(self._mode_kappa,
X,
Y,
num_ir_grid_points,
self._rotations_cartesian,
i_sigma,
i_temp)
# self._set_mode_kappa_from_mfp(weights,
# num_ir_grid_points,
# self._rotations_cartesian,
# i_sigma,
# i_temp)
self._kappa[i_sigma, i_temp] = (
self._mode_kappa[i_sigma, i_temp].sum(axis=0).sum(axis=0) / N)
def _set_kappa_RTA(self, i_sigma, i_temp, weights):
N = self._num_sampling_grid_points
num_band = self._primitive.get_number_of_atoms() * 3
X = self._get_X(i_temp, weights, self._gv)
Y = np.zeros_like(X)
if self._is_reducible_collision_matrix:
# This RTA is not equivalent to conductivity_RTA.
# The lifetime is defined from the diagonal part of
# collision matrix.
num_mesh_points = np.prod(self._mesh)
size = num_mesh_points * num_band
v_diag = np.diagonal(
self._collision_matrix[i_sigma, i_temp].reshape(size, size))
for gp in range(num_mesh_points):
frequencies = self._frequencies[gp]
for j, f in enumerate(frequencies):
if f > self._cutoff_frequency:
i_mode = gp * num_band + j
Y[i_mode, :] = X[i_mode, :] / v_diag[i_mode]
# Putting self._rotations_cartesian is to symmetrize kappa.
# None can be put instead for watching pure information.
self._set_mode_kappa(self._mode_kappa_RTA,
X,
Y,
num_mesh_points,
self._rotations_cartesian,
i_sigma,
i_temp)
g = len(self._rotations_cartesian)
self._mode_kappa_RTA[i_sigma, i_temp] /= g
self._kappa_RTA[i_sigma, i_temp] = (
self._mode_kappa_RTA[i_sigma, i_temp].sum(axis=0).sum(axis=0) /
N)
else:
# This RTA is supposed to be the same as conductivity_RTA.
num_ir_grid_points = len(self._ir_grid_points)
size = num_ir_grid_points * num_band * 3
for i, gp in enumerate(self._ir_grid_points):
g = self._get_main_diagonal(i, i_sigma, i_temp)
frequencies = self._frequencies[gp]
for j, f in enumerate(frequencies):
if f > self._cutoff_frequency:
i_mode = i * num_band + j
old_settings = np.seterr(all='raise')
try:
Y[i_mode, :] = X[i_mode, :] / g[j]
except:
print("=" * 26 + " Warning " + "=" * 26)
print(" Unexpected physical condition of ph-ph "
"interaction calculation was found.")
print(" g[j]=%f at gp=%d, band=%d, freq=%f" %
(g[j], gp, j + 1, f))
print("=" * 61)
np.seterr(**old_settings)
self._set_mode_kappa(self._mode_kappa_RTA,
X,
Y,
num_ir_grid_points,
self._rotations_cartesian,
i_sigma,
i_temp)
self._kappa_RTA[i_sigma, i_temp] = (
self._mode_kappa_RTA[i_sigma, i_temp].sum(axis=0).sum(axis=0) /
N)
def _set_mode_kappa(self,
mode_kappa,
X,
Y,
num_grid_points,
rotations_cartesian,
i_sigma,
i_temp):
num_band = self._primitive.get_number_of_atoms() * 3
for i, (v_gp, f_gp) in enumerate(zip(X.reshape(num_grid_points,
num_band, 3),
Y.reshape(num_grid_points,
num_band, 3))):
for j, (v, f) in enumerate(zip(v_gp, f_gp)):
# Do not consider three lowest modes at Gamma-point
# It is assumed that there are no imaginary modes.
if (self._grid_address[i] == 0).all() and j < 3:
continue
if rotations_cartesian is None:
sum_k = np.outer(v, f)
else:
sum_k = np.zeros((3, 3), dtype='double')
for r in rotations_cartesian:
sum_k += np.outer(np.dot(r, v), np.dot(r, f))
sum_k = sum_k + sum_k.T
for k, vxf in enumerate(
((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))):
mode_kappa[i_sigma, i_temp, i, j, k] = sum_k[vxf]
t = self._temperatures[i_temp]
# Collision matrix is half of that defined in Chaput's paper.
# Therefore here 2 is not necessary multiplied.
# sum_k = sum_k + sum_k.T is equivalent to I(a,b) + I(b,a).
mode_kappa[i_sigma, i_temp] *= self._conversion_factor * Kb * t ** 2
def _set_mode_kappa_from_mfp(self,
weights,
num_grid_points,
rotations_cartesian,
i_sigma,
i_temp):
for i, (v_gp, mfp_gp, cv_gp) in enumerate(
zip(self._gv, self._mfp[i_sigma, i_temp], self._cv[i_temp])):
for j, (v, mfp, cv) in enumerate(zip(v_gp, mfp_gp, cv_gp)):
sum_k = np.zeros((3, 3), dtype='double')
for r in rotations_cartesian:
sum_k += np.outer(np.dot(r, v), np.dot(r, mfp))
sum_k = (sum_k + sum_k.T) / 2 * cv * weights[i] ** 2 * 2 * np.pi
for k, vxf in enumerate(
((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))):
self._mode_kappa[i_sigma, i_temp, i, j, k] = sum_k[vxf]
self._mode_kappa *= - self._conversion_factor
def _set_mean_free_path(self, i_sigma, i_temp, weights, Y):
t = self._temperatures[i_temp]
# shape = (num_grid_points, num_band, 3),
for i, f_gp in enumerate(self._f_vectors):
for j, f in enumerate(f_gp):
cv = self._cv[i_temp, i, j]
if cv < 1e-10:
continue
self._mfp[i_sigma, i_temp, i, j] = (
- 2 * t * np.sqrt(Kb / cv) * f / (2 * np.pi))
def _show_log(self, i):
gp = self._grid_points[i]
frequencies = self._frequencies[gp]
if self._is_reducible_collision_matrix:
gv = self._gv[gp]
else:
gv = self._gv[i]
if self._is_full_pp:
ave_pp = self._averaged_pp_interaction[i]
text = "Frequency group velocity (x, y, z) |gv| Pqj"
else:
text = "Frequency group velocity (x, y, z) |gv|"
if self._gv_delta_q is None:
pass
else:
text += " (dq=%3.1e)" % self._gv_delta_q
print(text)
if self._is_full_pp:
for f, v, pp in zip(frequencies, gv, ave_pp):
print("%8.3f (%8.3f %8.3f %8.3f) %8.3f %11.3e" %
(f, v[0], v[1], v[2], np.linalg.norm(v), pp))
else:
for f, v in zip(frequencies, gv):
print("%8.3f (%8.3f %8.3f %8.3f) %8.3f" %
(f, v[0], v[1], v[2], np.linalg.norm(v)))
sys.stdout.flush()
def _py_symmetrize_collision_matrix(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
for i in range(num_ir_grid_points):
for j in range(num_band):
for k in range(3):
for l in range(num_ir_grid_points):
for m in range(num_band):
for n in range(3):
self._py_set_symmetrized_element(
i, j, k, l, m, n)
def _py_set_symmetrized_element(self, i, j, k, l, m, n):
sym_val = (self._collision_matrix[:, :, i, j, k, l, m, n] +
self._collision_matrix[:, :, l, m, n, i, j, k]) / 2
self._collision_matrix[:, :, i, j, k, l, m, n] = sym_val
self._collision_matrix[:, :, l, m, n, i, j, k] = sym_val
def _py_symmetrize_collision_matrix_no_kappa_stars(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
for i in range(num_ir_grid_points):
for j in range(num_band):
for k in range(num_ir_grid_points):
for l in range(num_band):
self._py_set_symmetrized_element_no_kappa_stars(
i, j, k, l)
def _py_set_symmetrized_element_no_kappa_stars(self, i, j, k, l):
sym_val = (self._collision_matrix[:, :, i, j, k, l] +
self._collision_matrix[:, :, k, l, i, j]) / 2
self._collision_matrix[:, :, i, j, k, l] = sym_val
self._collision_matrix[:, :, k, l, i, j] = sym_val
|
atztogo/phono3py | phono3py/phonon3/displacement_fc3.py | get_third_order_displacements | python | def get_third_order_displacements(cell,
symmetry,
is_plusminus='auto',
is_diagonal=False):
positions = cell.get_scaled_positions()
lattice = cell.get_cell().T
# Least displacements of first atoms (Atom 1) are searched by
# using respective site symmetries of the original crystal.
# 'is_diagonal=False' below is made intentionally to expect
# better accuracy.
disps_first = get_least_displacements(symmetry,
is_plusminus=is_plusminus,
is_diagonal=False)
symprec = symmetry.get_symmetry_tolerance()
dds = []
for disp in disps_first:
atom1 = disp[0]
disp1 = disp[1:4]
site_sym = symmetry.get_site_symmetry(atom1)
dds_atom1 = {'number': atom1,
'direction': disp1,
'second_atoms': []}
# Reduced site symmetry at the first atom with respect to
# the displacement of the first atoms.
reduced_site_sym = get_reduced_site_symmetry(site_sym, disp1, symprec)
# Searching orbits (second atoms) with respect to
# the first atom and its reduced site symmetry.
second_atoms = get_least_orbits(atom1,
cell,
reduced_site_sym,
symprec)
for atom2 in second_atoms:
dds_atom2 = get_next_displacements(atom1,
atom2,
reduced_site_sym,
lattice,
positions,
symprec,
is_diagonal)
min_vec = get_equivalent_smallest_vectors(atom1,
atom2,
cell,
symprec)[0]
min_distance = np.linalg.norm(np.dot(lattice, min_vec))
dds_atom2['distance'] = min_distance
dds_atom1['second_atoms'].append(dds_atom2)
dds.append(dds_atom1)
return dds | Create dispalcement dataset
Note
----
Atoms 1, 2, and 3 are defined as follows:
Atom 1: The first displaced atom. Third order force constant
between Atoms 1, 2, and 3 is calculated.
Atom 2: The second displaced atom. Second order force constant
between Atoms 2 and 3 is calculated.
Atom 3: Force is mesuared on this atom.
Parameters
----------
cell : PhonopyAtoms
Supercell
symmetry : Symmetry
Symmetry of supercell
is_plusminus : str or bool, optional
Type of displacements, plus only (False), always plus and minus (True),
and plus and minus depending on site symmetry ('auto').
is_diagonal : bool, optional
Whether allow diagonal displacements of Atom 2 or not
Returns
-------
dict
Data structure is like:
{'natom': 64,
'cutoff_distance': 4.000000,
'first_atoms':
[{'number': atom1,
'displacement': [0.03, 0., 0.],
'second_atoms': [ {'number': atom2,
'displacement': [0., -0.03, 0.],
'distance': 2.353},
{'number': ... }, ... ] },
{'number': atom1, ... } ]} | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/displacement_fc3.py#L52-L148 | [
"def get_reduced_site_symmetry(site_sym, direction, symprec=1e-5):\n reduced_site_sym = []\n for rot in site_sym:\n if (abs(direction - np.dot(direction, rot.T)) < symprec).all():\n reduced_site_sym.append(rot)\n return np.array(reduced_site_sym, dtype='intc')\n",
"def get_least_orbits(atom_index, cell, site_symmetry, symprec=1e-5):\n \"\"\"Find least orbits for a centering atom\"\"\"\n orbits = _get_orbits(atom_index, cell, site_symmetry, symprec)\n mapping = np.arange(cell.get_number_of_atoms())\n\n for i, orb in enumerate(orbits):\n for num in np.unique(orb):\n if mapping[num] > mapping[i]:\n mapping[num] = mapping[i]\n\n return np.unique(mapping)\n",
"def get_next_displacements(atom1,\n atom2,\n reduced_site_sym,\n lattice,\n positions,\n symprec,\n is_diagonal):\n # Bond symmetry between first and second atoms.\n reduced_bond_sym = get_bond_symmetry(\n reduced_site_sym,\n lattice,\n positions,\n atom1,\n atom2,\n symprec)\n\n # Since displacement of first atom breaks translation\n # symmetry, the crystal symmetry is reduced to point\n # symmetry and it is equivalent to the site symmetry\n # on the first atom. Therefore site symmetry on the\n # second atom with the displacement is equivalent to\n # this bond symmetry.\n if is_diagonal:\n disps_second = get_displacement(reduced_bond_sym)\n else:\n disps_second = get_displacement(reduced_bond_sym, directions_axis)\n dds_atom2 = {'number': atom2, 'directions': []}\n for disp2 in disps_second:\n dds_atom2['directions'].append(disp2)\n if is_minus_displacement(disp2, reduced_bond_sym):\n dds_atom2['directions'].append(-disp2)\n\n return dds_atom2\n",
"def get_equivalent_smallest_vectors(atom_number_supercell,\n atom_number_primitive,\n supercell,\n symprec):\n s_pos = supercell.get_scaled_positions()\n svecs, multi = get_smallest_vectors(supercell.get_cell(),\n [s_pos[atom_number_supercell]],\n [s_pos[atom_number_primitive]],\n symprec=symprec)\n return svecs[0, 0]\n"
] | import numpy as np
from phonopy.harmonic.displacement import (get_least_displacements,
directions_axis, get_displacement,
is_minus_displacement)
from phonopy.structure.cells import get_smallest_vectors
def direction_to_displacement(dataset,
distance,
supercell,
cutoff_distance=None):
lattice = supercell.get_cell().T
new_dataset = {}
new_dataset['natom'] = supercell.get_number_of_atoms()
if cutoff_distance is not None:
new_dataset['cutoff_distance'] = cutoff_distance
new_first_atoms = []
for first_atoms in dataset:
atom1 = first_atoms['number']
direction1 = first_atoms['direction']
disp_cart1 = np.dot(direction1, lattice.T)
disp_cart1 *= distance / np.linalg.norm(disp_cart1)
new_second_atoms = []
for second_atom in first_atoms['second_atoms']:
atom2 = second_atom['number']
pair_distance = second_atom['distance']
included = (cutoff_distance is None or
pair_distance < cutoff_distance)
for direction2 in second_atom['directions']:
disp_cart2 = np.dot(direction2, lattice.T)
disp_cart2 *= distance / np.linalg.norm(disp_cart2)
if cutoff_distance is None:
new_second_atoms.append({'number': atom2,
'direction': direction2,
'displacement': disp_cart2,
'pair_distance': pair_distance})
else:
new_second_atoms.append({'number': atom2,
'direction': direction2,
'displacement': disp_cart2,
'pair_distance': pair_distance,
'included': included})
new_first_atoms.append({'number': atom1,
'direction': direction1,
'displacement': disp_cart1,
'second_atoms': new_second_atoms})
new_dataset['first_atoms'] = new_first_atoms
return new_dataset
def get_next_displacements(atom1,
atom2,
reduced_site_sym,
lattice,
positions,
symprec,
is_diagonal):
# Bond symmetry between first and second atoms.
reduced_bond_sym = get_bond_symmetry(
reduced_site_sym,
lattice,
positions,
atom1,
atom2,
symprec)
# Since displacement of first atom breaks translation
# symmetry, the crystal symmetry is reduced to point
# symmetry and it is equivalent to the site symmetry
# on the first atom. Therefore site symmetry on the
# second atom with the displacement is equivalent to
# this bond symmetry.
if is_diagonal:
disps_second = get_displacement(reduced_bond_sym)
else:
disps_second = get_displacement(reduced_bond_sym, directions_axis)
dds_atom2 = {'number': atom2, 'directions': []}
for disp2 in disps_second:
dds_atom2['directions'].append(disp2)
if is_minus_displacement(disp2, reduced_bond_sym):
dds_atom2['directions'].append(-disp2)
return dds_atom2
def get_reduced_site_symmetry(site_sym, direction, symprec=1e-5):
reduced_site_sym = []
for rot in site_sym:
if (abs(direction - np.dot(direction, rot.T)) < symprec).all():
reduced_site_sym.append(rot)
return np.array(reduced_site_sym, dtype='intc')
def get_bond_symmetry(site_symmetry,
lattice,
positions,
atom_center,
atom_disp,
symprec=1e-5):
"""
Bond symmetry is the symmetry operations that keep the symmetry
of the cell containing two fixed atoms.
"""
bond_sym = []
pos = positions
for rot in site_symmetry:
rot_pos = (np.dot(pos[atom_disp] - pos[atom_center], rot.T) +
pos[atom_center])
diff = pos[atom_disp] - rot_pos
diff -= np.rint(diff)
dist = np.linalg.norm(np.dot(lattice, diff))
if dist < symprec:
bond_sym.append(rot)
return np.array(bond_sym)
def get_least_orbits(atom_index, cell, site_symmetry, symprec=1e-5):
"""Find least orbits for a centering atom"""
orbits = _get_orbits(atom_index, cell, site_symmetry, symprec)
mapping = np.arange(cell.get_number_of_atoms())
for i, orb in enumerate(orbits):
for num in np.unique(orb):
if mapping[num] > mapping[i]:
mapping[num] = mapping[i]
return np.unique(mapping)
def _get_orbits(atom_index, cell, site_symmetry, symprec=1e-5):
lattice = cell.get_cell().T
positions = cell.get_scaled_positions()
center = positions[atom_index]
# orbits[num_atoms, num_site_sym]
orbits = []
for pos in positions:
mapping = []
for rot in site_symmetry:
rot_pos = np.dot(pos - center, rot.T) + center
for i, pos in enumerate(positions):
diff = pos - rot_pos
diff -= np.rint(diff)
dist = np.linalg.norm(np.dot(lattice, diff))
if dist < symprec:
mapping.append(i)
break
if len(mapping) < len(site_symmetry):
print("Site symmetry is broken.")
raise ValueError
else:
orbits.append(mapping)
return np.array(orbits)
def get_equivalent_smallest_vectors(atom_number_supercell,
atom_number_primitive,
supercell,
symprec):
s_pos = supercell.get_scaled_positions()
svecs, multi = get_smallest_vectors(supercell.get_cell(),
[s_pos[atom_number_supercell]],
[s_pos[atom_number_primitive]],
symprec=symprec)
return svecs[0, 0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.