repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
spesmilo/electrum | electrum/interface.py | 2 | 50001 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import ssl
import sys
import traceback
import asyncio
import socket
from typing import Tuple, Union, List, TYPE_CHECKING, Optional, Set, NamedTuple, Any, Sequence, Dict
from collections import defaultdict
from ipaddress import IPv4Network, IPv6Network, ip_address, IPv6Address, IPv4Address
import itertools
import logging
import hashlib
import functools
import aiorpcx
from aiorpcx import RPCSession, Notification, NetAddress, NewlineFramer
from aiorpcx.curio import timeout_after, TaskTimeout
from aiorpcx.jsonrpc import JSONRPC, CodeMessageError
from aiorpcx.rawsocket import RSClient
import certifi
from .util import (ignore_exceptions, log_exceptions, bfh, MySocksProxy,
is_integer, is_non_negative_integer, is_hash256_str, is_hex_str,
is_int_or_float, is_non_negative_int_or_float, OldTaskGroup)
from . import util
from . import x509
from . import pem
from . import version
from . import blockchain
from .blockchain import Blockchain, HEADER_SIZE
from . import bitcoin
from . import constants
from .i18n import _
from .logging import Logger
from .transaction import Transaction
if TYPE_CHECKING:
from .network import Network
from .simple_config import SimpleConfig
ca_path = certifi.where()
BUCKET_NAME_OF_ONION_SERVERS = 'onion'
MAX_INCOMING_MSG_SIZE = 1_000_000 # in bytes
_KNOWN_NETWORK_PROTOCOLS = {'t', 's'}
PREFERRED_NETWORK_PROTOCOL = 's'
assert PREFERRED_NETWORK_PROTOCOL in _KNOWN_NETWORK_PROTOCOLS
class NetworkTimeout:
# seconds
class Generic:
NORMAL = 30
RELAXED = 45
MOST_RELAXED = 600
class Urgent(Generic):
NORMAL = 10
RELAXED = 20
MOST_RELAXED = 60
def assert_non_negative_integer(val: Any) -> None:
if not is_non_negative_integer(val):
raise RequestCorrupted(f'{val!r} should be a non-negative integer')
def assert_integer(val: Any) -> None:
if not is_integer(val):
raise RequestCorrupted(f'{val!r} should be an integer')
def assert_int_or_float(val: Any) -> None:
if not is_int_or_float(val):
raise RequestCorrupted(f'{val!r} should be int or float')
def assert_non_negative_int_or_float(val: Any) -> None:
if not is_non_negative_int_or_float(val):
raise RequestCorrupted(f'{val!r} should be a non-negative int or float')
def assert_hash256_str(val: Any) -> None:
if not is_hash256_str(val):
raise RequestCorrupted(f'{val!r} should be a hash256 str')
def assert_hex_str(val: Any) -> None:
if not is_hex_str(val):
raise RequestCorrupted(f'{val!r} should be a hex str')
def assert_dict_contains_field(d: Any, *, field_name: str) -> Any:
if not isinstance(d, dict):
raise RequestCorrupted(f'{d!r} should be a dict')
if field_name not in d:
raise RequestCorrupted(f'required field {field_name!r} missing from dict')
return d[field_name]
def assert_list_or_tuple(val: Any) -> None:
if not isinstance(val, (list, tuple)):
raise RequestCorrupted(f'{val!r} should be a list or tuple')
class NotificationSession(RPCSession):
def __init__(self, *args, interface: 'Interface', **kwargs):
super(NotificationSession, self).__init__(*args, **kwargs)
self.subscriptions = defaultdict(list)
self.cache = {}
self.default_timeout = NetworkTimeout.Generic.NORMAL
self._msg_counter = itertools.count(start=1)
self.interface = interface
self.cost_hard_limit = 0 # disable aiorpcx resource limits
async def handle_request(self, request):
self.maybe_log(f"--> {request}")
try:
if isinstance(request, Notification):
params, result = request.args[:-1], request.args[-1]
key = self.get_hashable_key_for_rpc_call(request.method, params)
if key in self.subscriptions:
self.cache[key] = result
for queue in self.subscriptions[key]:
await queue.put(request.args)
else:
raise Exception(f'unexpected notification')
else:
raise Exception(f'unexpected request. not a notification')
except Exception as e:
self.interface.logger.info(f"error handling request {request}. exc: {repr(e)}")
await self.close()
async def send_request(self, *args, timeout=None, **kwargs):
# note: semaphores/timeouts/backpressure etc are handled by
# aiorpcx. the timeout arg here in most cases should not be set
msg_id = next(self._msg_counter)
self.maybe_log(f"<-- {args} {kwargs} (id: {msg_id})")
try:
# note: RPCSession.send_request raises TaskTimeout in case of a timeout.
# TaskTimeout is a subclass of CancelledError, which is *suppressed* in TaskGroups
response = await asyncio.wait_for(
super().send_request(*args, **kwargs),
timeout)
except (TaskTimeout, asyncio.TimeoutError) as e:
raise RequestTimedOut(f'request timed out: {args} (id: {msg_id})') from e
except CodeMessageError as e:
self.maybe_log(f"--> {repr(e)} (id: {msg_id})")
raise
else:
self.maybe_log(f"--> {response} (id: {msg_id})")
return response
def set_default_timeout(self, timeout):
self.sent_request_timeout = timeout
self.max_send_delay = timeout
async def subscribe(self, method: str, params: List, queue: asyncio.Queue):
# note: until the cache is written for the first time,
# each 'subscribe' call might make a request on the network.
key = self.get_hashable_key_for_rpc_call(method, params)
self.subscriptions[key].append(queue)
if key in self.cache:
result = self.cache[key]
else:
result = await self.send_request(method, params)
self.cache[key] = result
await queue.put(params + [result])
def unsubscribe(self, queue):
"""Unsubscribe a callback to free object references to enable GC."""
# note: we can't unsubscribe from the server, so we keep receiving
# subsequent notifications
for v in self.subscriptions.values():
if queue in v:
v.remove(queue)
@classmethod
def get_hashable_key_for_rpc_call(cls, method, params):
"""Hashable index for subscriptions and cache"""
return str(method) + repr(params)
def maybe_log(self, msg: str) -> None:
if not self.interface: return
if self.interface.debug or self.interface.network.debug:
self.interface.logger.debug(msg)
def default_framer(self):
# overridden so that max_size can be customized
max_size = int(self.interface.network.config.get('network_max_incoming_msg_size',
MAX_INCOMING_MSG_SIZE))
return NewlineFramer(max_size=max_size)
async def close(self, *, force_after: int = None):
"""Closes the connection and waits for it to be closed.
We try to flush buffered data to the wire, which can take some time.
"""
if force_after is None:
# We give up after a while and just abort the connection.
# Note: specifically if the server is running Fulcrum, waiting seems hopeless,
# the connection must be aborted (see https://github.com/cculianu/Fulcrum/issues/76)
# Note: if the ethernet cable was pulled or wifi disconnected, that too might
# wait until this timeout is triggered
force_after = 1 # seconds
await super().close(force_after=force_after)
class NetworkException(Exception): pass
class GracefulDisconnect(NetworkException):
log_level = logging.INFO
def __init__(self, *args, log_level=None, **kwargs):
Exception.__init__(self, *args, **kwargs)
if log_level is not None:
self.log_level = log_level
class RequestTimedOut(GracefulDisconnect):
def __str__(self):
return _("Network request timed out.")
class RequestCorrupted(Exception): pass
class ErrorParsingSSLCert(Exception): pass
class ErrorGettingSSLCertFromServer(Exception): pass
class ErrorSSLCertFingerprintMismatch(Exception): pass
class InvalidOptionCombination(Exception): pass
class ConnectError(NetworkException): pass
class _RSClient(RSClient):
async def create_connection(self):
try:
return await super().create_connection()
except OSError as e:
# note: using "from e" here will set __cause__ of ConnectError
raise ConnectError(e) from e
class ServerAddr:
def __init__(self, host: str, port: Union[int, str], *, protocol: str = None):
assert isinstance(host, str), repr(host)
if protocol is None:
protocol = 's'
if not host:
raise ValueError('host must not be empty')
if host[0] == '[' and host[-1] == ']': # IPv6
host = host[1:-1]
try:
net_addr = NetAddress(host, port) # this validates host and port
except Exception as e:
raise ValueError(f"cannot construct ServerAddr: invalid host or port (host={host}, port={port})") from e
if protocol not in _KNOWN_NETWORK_PROTOCOLS:
raise ValueError(f"invalid network protocol: {protocol}")
self.host = str(net_addr.host) # canonical form (if e.g. IPv6 address)
self.port = int(net_addr.port)
self.protocol = protocol
self._net_addr_str = str(net_addr)
@classmethod
def from_str(cls, s: str) -> 'ServerAddr':
# host might be IPv6 address, hence do rsplit:
host, port, protocol = str(s).rsplit(':', 2)
return ServerAddr(host=host, port=port, protocol=protocol)
@classmethod
def from_str_with_inference(cls, s: str) -> Optional['ServerAddr']:
"""Construct ServerAddr from str, guessing missing details.
Ongoing compatibility not guaranteed.
"""
if not s:
return None
items = str(s).rsplit(':', 2)
if len(items) < 2:
return None # although maybe we could guess the port too?
host = items[0]
port = items[1]
if len(items) >= 3:
protocol = items[2]
else:
protocol = PREFERRED_NETWORK_PROTOCOL
return ServerAddr(host=host, port=port, protocol=protocol)
def to_friendly_name(self) -> str:
# note: this method is closely linked to from_str_with_inference
if self.protocol == 's': # hide trailing ":s"
return self.net_addr_str()
return str(self)
def __str__(self):
return '{}:{}'.format(self.net_addr_str(), self.protocol)
def to_json(self) -> str:
return str(self)
def __repr__(self):
return f'<ServerAddr host={self.host} port={self.port} protocol={self.protocol}>'
def net_addr_str(self) -> str:
return self._net_addr_str
def __eq__(self, other):
if not isinstance(other, ServerAddr):
return False
return (self.host == other.host
and self.port == other.port
and self.protocol == other.protocol)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.host, self.port, self.protocol))
def _get_cert_path_for_host(*, config: 'SimpleConfig', host: str) -> str:
filename = host
try:
ip = ip_address(host)
except ValueError:
pass
else:
if isinstance(ip, IPv6Address):
filename = f"ipv6_{ip.packed.hex()}"
return os.path.join(config.path, 'certs', filename)
class Interface(Logger):
LOGGING_SHORTCUT = 'i'
def __init__(self, *, network: 'Network', server: ServerAddr, proxy: Optional[dict]):
self.ready = network.asyncio_loop.create_future()
self.got_disconnected = asyncio.Event()
self.server = server
Logger.__init__(self)
assert network.config.path
self.cert_path = _get_cert_path_for_host(config=network.config, host=self.host)
self.blockchain = None # type: Optional[Blockchain]
self._requested_chunks = set() # type: Set[int]
self.network = network
self.session = None # type: Optional[NotificationSession]
self._ipaddr_bucket = None
# Set up proxy.
# - for servers running on localhost, the proxy is not used. If user runs their own server
# on same machine, this lets them enable the proxy (which is used for e.g. FX rates).
# note: we could maybe relax this further and bypass the proxy for all private
# addresses...? e.g. 192.168.x.x
if util.is_localhost(server.host):
self.logger.info(f"looks like localhost: not using proxy for this server")
proxy = None
self.proxy = MySocksProxy.from_proxy_dict(proxy)
# Latest block header and corresponding height, as claimed by the server.
# Note that these values are updated before they are verified.
# Especially during initial header sync, verification can take a long time.
# Failing verification will get the interface closed.
self.tip_header = None
self.tip = 0
self.fee_estimates_eta = {} # type: Dict[int, int]
# Dump network messages (only for this interface). Set at runtime from the console.
self.debug = False
self.taskgroup = OldTaskGroup()
async def spawn_task():
task = await self.network.taskgroup.spawn(self.run())
task.set_name(f"interface::{str(server)}")
asyncio.run_coroutine_threadsafe(spawn_task(), self.network.asyncio_loop)
@property
def host(self):
return self.server.host
@property
def port(self):
return self.server.port
@property
def protocol(self):
return self.server.protocol
def diagnostic_name(self):
return self.server.net_addr_str()
def __str__(self):
return f"<Interface {self.diagnostic_name()}>"
async def is_server_ca_signed(self, ca_ssl_context):
"""Given a CA enforcing SSL context, returns True if the connection
can be established. Returns False if the server has a self-signed
certificate but otherwise is okay. Any other failures raise.
"""
try:
await self.open_session(ca_ssl_context, exit_early=True)
except ConnectError as e:
cause = e.__cause__
if isinstance(cause, ssl.SSLError) and cause.reason == 'CERTIFICATE_VERIFY_FAILED':
# failures due to self-signed certs are normal
return False
raise
return True
async def _try_saving_ssl_cert_for_first_time(self, ca_ssl_context):
ca_signed = await self.is_server_ca_signed(ca_ssl_context)
if ca_signed:
if self._get_expected_fingerprint():
raise InvalidOptionCombination("cannot use --serverfingerprint with CA signed servers")
with open(self.cert_path, 'w') as f:
# empty file means this is CA signed, not self-signed
f.write('')
else:
await self._save_certificate()
def _is_saved_ssl_cert_available(self):
if not os.path.exists(self.cert_path):
return False
with open(self.cert_path, 'r') as f:
contents = f.read()
if contents == '': # CA signed
if self._get_expected_fingerprint():
raise InvalidOptionCombination("cannot use --serverfingerprint with CA signed servers")
return True
# pinned self-signed cert
try:
b = pem.dePem(contents, 'CERTIFICATE')
except SyntaxError as e:
self.logger.info(f"error parsing already saved cert: {e}")
raise ErrorParsingSSLCert(e) from e
try:
x = x509.X509(b)
except Exception as e:
self.logger.info(f"error parsing already saved cert: {e}")
raise ErrorParsingSSLCert(e) from e
try:
x.check_date()
except x509.CertificateError as e:
self.logger.info(f"certificate has expired: {e}")
os.unlink(self.cert_path) # delete pinned cert only in this case
return False
self._verify_certificate_fingerprint(bytearray(b))
return True
async def _get_ssl_context(self):
if self.protocol != 's':
# using plaintext TCP
return None
# see if we already have cert for this server; or get it for the first time
ca_sslc = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_path)
if not self._is_saved_ssl_cert_available():
try:
await self._try_saving_ssl_cert_for_first_time(ca_sslc)
except (OSError, ConnectError, aiorpcx.socks.SOCKSError) as e:
raise ErrorGettingSSLCertFromServer(e) from e
# now we have a file saved in our certificate store
siz = os.stat(self.cert_path).st_size
if siz == 0:
# CA signed cert
sslc = ca_sslc
else:
# pinned self-signed cert
sslc = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=self.cert_path)
sslc.check_hostname = False
return sslc
def handle_disconnect(func):
@functools.wraps(func)
async def wrapper_func(self: 'Interface', *args, **kwargs):
try:
return await func(self, *args, **kwargs)
except GracefulDisconnect as e:
self.logger.log(e.log_level, f"disconnecting due to {repr(e)}")
except aiorpcx.jsonrpc.RPCError as e:
self.logger.warning(f"disconnecting due to {repr(e)}")
self.logger.debug(f"(disconnect) trace for {repr(e)}", exc_info=True)
finally:
self.got_disconnected.set()
await self.network.connection_down(self)
# if was not 'ready' yet, schedule waiting coroutines:
self.ready.cancel()
return wrapper_func
@ignore_exceptions # do not kill network.taskgroup
@log_exceptions
@handle_disconnect
async def run(self):
try:
ssl_context = await self._get_ssl_context()
except (ErrorParsingSSLCert, ErrorGettingSSLCertFromServer) as e:
self.logger.info(f'disconnecting due to: {repr(e)}')
return
try:
await self.open_session(ssl_context)
except (asyncio.CancelledError, ConnectError, aiorpcx.socks.SOCKSError) as e:
# make SSL errors for main interface more visible (to help servers ops debug cert pinning issues)
if (isinstance(e, ConnectError) and isinstance(e.__cause__, ssl.SSLError)
and self.is_main_server() and not self.network.auto_connect):
self.logger.warning(f'Cannot connect to main server due to SSL error '
f'(maybe cert changed compared to "{self.cert_path}"). Exc: {repr(e)}')
else:
self.logger.info(f'disconnecting due to: {repr(e)}')
return
def _mark_ready(self) -> None:
if self.ready.cancelled():
raise GracefulDisconnect('conn establishment was too slow; *ready* future was cancelled')
if self.ready.done():
return
assert self.tip_header
chain = blockchain.check_header(self.tip_header)
if not chain:
self.blockchain = blockchain.get_best_chain()
else:
self.blockchain = chain
assert self.blockchain is not None
self.logger.info(f"set blockchain with height {self.blockchain.height()}")
self.ready.set_result(1)
def is_connected_and_ready(self) -> bool:
return self.ready.done() and not self.got_disconnected.is_set()
async def _save_certificate(self) -> None:
if not os.path.exists(self.cert_path):
# we may need to retry this a few times, in case the handshake hasn't completed
for _ in range(10):
dercert = await self._fetch_certificate()
if dercert:
self.logger.info("succeeded in getting cert")
self._verify_certificate_fingerprint(dercert)
with open(self.cert_path, 'w') as f:
cert = ssl.DER_cert_to_PEM_cert(dercert)
# workaround android bug
cert = re.sub("([^\n])-----END CERTIFICATE-----","\\1\n-----END CERTIFICATE-----",cert)
f.write(cert)
# even though close flushes, we can't fsync when closed.
# and we must flush before fsyncing, cause flush flushes to OS buffer
# fsync writes to OS buffer to disk
f.flush()
os.fsync(f.fileno())
break
await asyncio.sleep(1)
else:
raise GracefulDisconnect("could not get certificate after 10 tries")
async def _fetch_certificate(self) -> bytes:
sslc = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_CLIENT)
sslc.check_hostname = False
sslc.verify_mode = ssl.CERT_NONE
async with _RSClient(session_factory=RPCSession,
host=self.host, port=self.port,
ssl=sslc, proxy=self.proxy) as session:
asyncio_transport = session.transport._asyncio_transport # type: asyncio.BaseTransport
ssl_object = asyncio_transport.get_extra_info("ssl_object") # type: ssl.SSLObject
return ssl_object.getpeercert(binary_form=True)
def _get_expected_fingerprint(self) -> Optional[str]:
if self.is_main_server():
return self.network.config.get("serverfingerprint")
def _verify_certificate_fingerprint(self, certificate):
expected_fingerprint = self._get_expected_fingerprint()
if not expected_fingerprint:
return
fingerprint = hashlib.sha256(certificate).hexdigest()
fingerprints_match = fingerprint.lower() == expected_fingerprint.lower()
if not fingerprints_match:
util.trigger_callback('cert_mismatch')
raise ErrorSSLCertFingerprintMismatch('Refusing to connect to server due to cert fingerprint mismatch')
self.logger.info("cert fingerprint verification passed")
async def get_block_header(self, height, assert_mode):
if not is_non_negative_integer(height):
raise Exception(f"{repr(height)} is not a block height")
self.logger.info(f'requesting block header {height} in mode {assert_mode}')
# use lower timeout as we usually have network.bhi_lock here
timeout = self.network.get_network_timeout_seconds(NetworkTimeout.Urgent)
res = await self.session.send_request('blockchain.block.header', [height], timeout=timeout)
return blockchain.deserialize_header(bytes.fromhex(res), height)
async def request_chunk(self, height: int, tip=None, *, can_return_early=False):
if not is_non_negative_integer(height):
raise Exception(f"{repr(height)} is not a block height")
index = height // 2016
if can_return_early and index in self._requested_chunks:
return
self.logger.info(f"requesting chunk from height {height}")
size = 2016
if tip is not None:
size = min(size, tip - index * 2016 + 1)
size = max(size, 0)
try:
self._requested_chunks.add(index)
res = await self.session.send_request('blockchain.block.headers', [index * 2016, size])
finally:
self._requested_chunks.discard(index)
assert_dict_contains_field(res, field_name='count')
assert_dict_contains_field(res, field_name='hex')
assert_dict_contains_field(res, field_name='max')
assert_non_negative_integer(res['count'])
assert_non_negative_integer(res['max'])
assert_hex_str(res['hex'])
if len(res['hex']) != HEADER_SIZE * 2 * res['count']:
raise RequestCorrupted('inconsistent chunk hex and count')
# we never request more than 2016 headers, but we enforce those fit in a single response
if res['max'] < 2016:
raise RequestCorrupted(f"server uses too low 'max' count for block.headers: {res['max']} < 2016")
if res['count'] != size:
raise RequestCorrupted(f"expected {size} headers but only got {res['count']}")
conn = self.blockchain.connect_chunk(index, res['hex'])
if not conn:
return conn, 0
return conn, res['count']
def is_main_server(self) -> bool:
return (self.network.interface == self or
self.network.interface is None and self.network.default_server == self.server)
async def open_session(self, sslc, exit_early=False):
session_factory = lambda *args, iface=self, **kwargs: NotificationSession(*args, **kwargs, interface=iface)
async with _RSClient(session_factory=session_factory,
host=self.host, port=self.port,
ssl=sslc, proxy=self.proxy) as session:
self.session = session # type: NotificationSession
self.session.set_default_timeout(self.network.get_network_timeout_seconds(NetworkTimeout.Generic))
try:
ver = await session.send_request('server.version', [self.client_name(), version.PROTOCOL_VERSION])
except aiorpcx.jsonrpc.RPCError as e:
raise GracefulDisconnect(e) # probably 'unsupported protocol version'
if exit_early:
return
if ver[1] != version.PROTOCOL_VERSION:
raise GracefulDisconnect(f'server violated protocol-version-negotiation. '
f'we asked for {version.PROTOCOL_VERSION!r}, they sent {ver[1]!r}')
if not self.network.check_interface_against_healthy_spread_of_connected_servers(self):
raise GracefulDisconnect(f'too many connected servers already '
f'in bucket {self.bucket_based_on_ipaddress()}')
self.logger.info(f"connection established. version: {ver}")
try:
async with self.taskgroup as group:
await group.spawn(self.ping)
await group.spawn(self.request_fee_estimates)
await group.spawn(self.run_fetch_blocks)
await group.spawn(self.monitor_connection)
except aiorpcx.jsonrpc.RPCError as e:
if e.code in (JSONRPC.EXCESSIVE_RESOURCE_USAGE,
JSONRPC.SERVER_BUSY,
JSONRPC.METHOD_NOT_FOUND):
raise GracefulDisconnect(e, log_level=logging.WARNING) from e
raise
finally:
self.got_disconnected.set() # set this ASAP, ideally before any awaits
async def monitor_connection(self):
while True:
await asyncio.sleep(1)
# If the session/transport is no longer open, we disconnect.
# e.g. if the remote cleanly sends EOF, we would handle that here.
# note: If the user pulls the ethernet cable or disconnects wifi,
# ideally we would detect that here, so that the GUI/etc can reflect that.
# - On Android, this seems to work reliably , where asyncio.BaseProtocol.connection_lost()
# gets called with e.g. ConnectionAbortedError(103, 'Software caused connection abort').
# - On desktop Linux/Win, it seems BaseProtocol.connection_lost() is not called in such cases.
# Hence, in practice the connection issue will only be detected the next time we try
# to send a message (plus timeout), which can take minutes...
if not self.session or self.session.is_closing():
raise GracefulDisconnect('session was closed')
async def ping(self):
while True:
await asyncio.sleep(300)
await self.session.send_request('server.ping')
async def request_fee_estimates(self):
from .simple_config import FEE_ETA_TARGETS
while True:
async with OldTaskGroup() as group:
fee_tasks = []
for i in FEE_ETA_TARGETS:
fee_tasks.append((i, await group.spawn(self.get_estimatefee(i))))
for nblock_target, task in fee_tasks:
fee = task.result()
if fee < 0: continue
assert isinstance(fee, int)
self.fee_estimates_eta[nblock_target] = fee
self.network.update_fee_estimates()
await asyncio.sleep(60)
async def close(self, *, force_after: int = None):
"""Closes the connection and waits for it to be closed.
We try to flush buffered data to the wire, which can take some time.
"""
if self.session:
await self.session.close(force_after=force_after)
# monitor_connection will cancel tasks
async def run_fetch_blocks(self):
header_queue = asyncio.Queue()
await self.session.subscribe('blockchain.headers.subscribe', [], header_queue)
while True:
item = await header_queue.get()
raw_header = item[0]
height = raw_header['height']
header = blockchain.deserialize_header(bfh(raw_header['hex']), height)
self.tip_header = header
self.tip = height
if self.tip < constants.net.max_checkpoint():
raise GracefulDisconnect('server tip below max checkpoint')
self._mark_ready()
blockchain_updated = await self._process_header_at_tip()
# header processing done
if blockchain_updated:
util.trigger_callback('blockchain_updated')
util.trigger_callback('network_updated')
await self.network.switch_unwanted_fork_interface()
await self.network.switch_lagging_interface()
async def _process_header_at_tip(self) -> bool:
"""Returns:
False - boring fast-forward: we already have this header as part of this blockchain from another interface,
True - new header we didn't have, or reorg
"""
height, header = self.tip, self.tip_header
async with self.network.bhi_lock:
if self.blockchain.height() >= height and self.blockchain.check_header(header):
# another interface amended the blockchain
self.logger.info(f"skipping header {height}")
return False
_, height = await self.step(height, header)
# in the simple case, height == self.tip+1
if height <= self.tip:
await self.sync_until(height)
return True
async def sync_until(self, height, next_height=None):
if next_height is None:
next_height = self.tip
last = None
while last is None or height <= next_height:
prev_last, prev_height = last, height
if next_height > height + 10:
could_connect, num_headers = await self.request_chunk(height, next_height)
if not could_connect:
if height <= constants.net.max_checkpoint():
raise GracefulDisconnect('server chain conflicts with checkpoints or genesis')
last, height = await self.step(height)
continue
util.trigger_callback('network_updated')
height = (height // 2016 * 2016) + num_headers
assert height <= next_height+1, (height, self.tip)
last = 'catchup'
else:
last, height = await self.step(height)
assert (prev_last, prev_height) != (last, height), 'had to prevent infinite loop in interface.sync_until'
return last, height
async def step(self, height, header=None):
assert 0 <= height <= self.tip, (height, self.tip)
if header is None:
header = await self.get_block_header(height, 'catchup')
chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
if chain:
self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain
# note: there is an edge case here that is not handled.
# we might know the blockhash (enough for check_header) but
# not have the header itself. e.g. regtest chain with only genesis.
# this situation resolves itself on the next block
return 'catchup', height+1
can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height)
if not can_connect:
self.logger.info(f"can't connect {height}")
height, header, bad, bad_header = await self._search_headers_backwards(height, header)
chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height)
assert chain or can_connect
if can_connect:
self.logger.info(f"could connect {height}")
height += 1
if isinstance(can_connect, Blockchain): # not when mocking
self.blockchain = can_connect
self.blockchain.save_header(header)
return 'catchup', height
good, bad, bad_header = await self._search_headers_binary(height, bad, bad_header, chain)
return await self._resolve_potential_chain_fork_given_forkpoint(good, bad, bad_header)
async def _search_headers_binary(self, height, bad, bad_header, chain):
assert bad == bad_header['block_height']
_assert_header_does_not_check_against_any_chain(bad_header)
self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain
good = height
while True:
assert good < bad, (good, bad)
height = (good + bad) // 2
self.logger.info(f"binary step. good {good}, bad {bad}, height {height}")
header = await self.get_block_header(height, 'binary')
chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
if chain:
self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain
good = height
else:
bad = height
bad_header = header
if good + 1 == bad:
break
mock = 'mock' in bad_header and bad_header['mock']['connect'](height)
real = not mock and self.blockchain.can_connect(bad_header, check_height=False)
if not real and not mock:
raise Exception('unexpected bad header during binary: {}'.format(bad_header))
_assert_header_does_not_check_against_any_chain(bad_header)
self.logger.info(f"binary search exited. good {good}, bad {bad}")
return good, bad, bad_header
async def _resolve_potential_chain_fork_given_forkpoint(self, good, bad, bad_header):
assert good + 1 == bad
assert bad == bad_header['block_height']
_assert_header_does_not_check_against_any_chain(bad_header)
# 'good' is the height of a block 'good_header', somewhere in self.blockchain.
# bad_header connects to good_header; bad_header itself is NOT in self.blockchain.
bh = self.blockchain.height()
assert bh >= good, (bh, good)
if bh == good:
height = good + 1
self.logger.info(f"catching up from {height}")
return 'no_fork', height
# this is a new fork we don't yet have
height = bad + 1
self.logger.info(f"new fork at bad height {bad}")
forkfun = self.blockchain.fork if 'mock' not in bad_header else bad_header['mock']['fork']
b = forkfun(bad_header) # type: Blockchain
self.blockchain = b
assert b.forkpoint == bad
return 'fork', height
async def _search_headers_backwards(self, height, header):
async def iterate():
nonlocal height, header
checkp = False
if height <= constants.net.max_checkpoint():
height = constants.net.max_checkpoint()
checkp = True
header = await self.get_block_header(height, 'backward')
chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height)
if chain or can_connect:
return False
if checkp:
raise GracefulDisconnect("server chain conflicts with checkpoints")
return True
bad, bad_header = height, header
_assert_header_does_not_check_against_any_chain(bad_header)
with blockchain.blockchains_lock: chains = list(blockchain.blockchains.values())
local_max = max([0] + [x.height() for x in chains]) if 'mock' not in header else float('inf')
height = min(local_max + 1, height - 1)
while await iterate():
bad, bad_header = height, header
delta = self.tip - height
height = self.tip - 2 * delta
_assert_header_does_not_check_against_any_chain(bad_header)
self.logger.info(f"exiting backward mode at {height}")
return height, header, bad, bad_header
@classmethod
def client_name(cls) -> str:
return f'electrum/{version.ELECTRUM_VERSION}'
def is_tor(self):
return self.host.endswith('.onion')
def ip_addr(self) -> Optional[str]:
session = self.session
if not session: return None
peer_addr = session.remote_address()
if not peer_addr: return None
return str(peer_addr.host)
def bucket_based_on_ipaddress(self) -> str:
def do_bucket():
if self.is_tor():
return BUCKET_NAME_OF_ONION_SERVERS
try:
ip_addr = ip_address(self.ip_addr()) # type: Union[IPv4Address, IPv6Address]
except ValueError:
return ''
if not ip_addr:
return ''
if ip_addr.is_loopback: # localhost is exempt
return ''
if ip_addr.version == 4:
slash16 = IPv4Network(ip_addr).supernet(prefixlen_diff=32-16)
return str(slash16)
elif ip_addr.version == 6:
slash48 = IPv6Network(ip_addr).supernet(prefixlen_diff=128-48)
return str(slash48)
return ''
if not self._ipaddr_bucket:
self._ipaddr_bucket = do_bucket()
return self._ipaddr_bucket
async def get_merkle_for_transaction(self, tx_hash: str, tx_height: int) -> dict:
if not is_hash256_str(tx_hash):
raise Exception(f"{repr(tx_hash)} is not a txid")
if not is_non_negative_integer(tx_height):
raise Exception(f"{repr(tx_height)} is not a block height")
# do request
res = await self.session.send_request('blockchain.transaction.get_merkle', [tx_hash, tx_height])
# check response
block_height = assert_dict_contains_field(res, field_name='block_height')
merkle = assert_dict_contains_field(res, field_name='merkle')
pos = assert_dict_contains_field(res, field_name='pos')
# note: tx_height was just a hint to the server, don't enforce the response to match it
assert_non_negative_integer(block_height)
assert_non_negative_integer(pos)
assert_list_or_tuple(merkle)
for item in merkle:
assert_hash256_str(item)
return res
async def get_transaction(self, tx_hash: str, *, timeout=None) -> str:
if not is_hash256_str(tx_hash):
raise Exception(f"{repr(tx_hash)} is not a txid")
raw = await self.session.send_request('blockchain.transaction.get', [tx_hash], timeout=timeout)
# validate response
if not is_hex_str(raw):
raise RequestCorrupted(f"received garbage (non-hex) as tx data (txid {tx_hash}): {raw!r}")
tx = Transaction(raw)
try:
tx.deserialize() # see if raises
except Exception as e:
raise RequestCorrupted(f"cannot deserialize received transaction (txid {tx_hash})") from e
if tx.txid() != tx_hash:
raise RequestCorrupted(f"received tx does not match expected txid {tx_hash} (got {tx.txid()})")
return raw
async def get_history_for_scripthash(self, sh: str) -> List[dict]:
if not is_hash256_str(sh):
raise Exception(f"{repr(sh)} is not a scripthash")
# do request
res = await self.session.send_request('blockchain.scripthash.get_history', [sh])
# check response
assert_list_or_tuple(res)
prev_height = 1
for tx_item in res:
height = assert_dict_contains_field(tx_item, field_name='height')
assert_dict_contains_field(tx_item, field_name='tx_hash')
assert_integer(height)
assert_hash256_str(tx_item['tx_hash'])
if height in (-1, 0):
assert_dict_contains_field(tx_item, field_name='fee')
assert_non_negative_integer(tx_item['fee'])
prev_height = float("inf") # this ensures confirmed txs can't follow mempool txs
else:
# check monotonicity of heights
if height < prev_height:
raise RequestCorrupted(f'heights of confirmed txs must be in increasing order')
prev_height = height
hashes = set(map(lambda item: item['tx_hash'], res))
if len(hashes) != len(res):
# Either server is sending garbage... or maybe if server is race-prone
# a recently mined tx could be included in both last block and mempool?
# Still, it's simplest to just disregard the response.
raise RequestCorrupted(f"server history has non-unique txids for sh={sh}")
return res
async def listunspent_for_scripthash(self, sh: str) -> List[dict]:
if not is_hash256_str(sh):
raise Exception(f"{repr(sh)} is not a scripthash")
# do request
res = await self.session.send_request('blockchain.scripthash.listunspent', [sh])
# check response
assert_list_or_tuple(res)
for utxo_item in res:
assert_dict_contains_field(utxo_item, field_name='tx_pos')
assert_dict_contains_field(utxo_item, field_name='value')
assert_dict_contains_field(utxo_item, field_name='tx_hash')
assert_dict_contains_field(utxo_item, field_name='height')
assert_non_negative_integer(utxo_item['tx_pos'])
assert_non_negative_integer(utxo_item['value'])
assert_non_negative_integer(utxo_item['height'])
assert_hash256_str(utxo_item['tx_hash'])
return res
async def get_balance_for_scripthash(self, sh: str) -> dict:
if not is_hash256_str(sh):
raise Exception(f"{repr(sh)} is not a scripthash")
# do request
res = await self.session.send_request('blockchain.scripthash.get_balance', [sh])
# check response
assert_dict_contains_field(res, field_name='confirmed')
assert_dict_contains_field(res, field_name='unconfirmed')
assert_non_negative_integer(res['confirmed'])
assert_integer(res['unconfirmed'])
return res
async def get_txid_from_txpos(self, tx_height: int, tx_pos: int, merkle: bool):
if not is_non_negative_integer(tx_height):
raise Exception(f"{repr(tx_height)} is not a block height")
if not is_non_negative_integer(tx_pos):
raise Exception(f"{repr(tx_pos)} should be non-negative integer")
# do request
res = await self.session.send_request(
'blockchain.transaction.id_from_pos',
[tx_height, tx_pos, merkle],
)
# check response
if merkle:
assert_dict_contains_field(res, field_name='tx_hash')
assert_dict_contains_field(res, field_name='merkle')
assert_hash256_str(res['tx_hash'])
assert_list_or_tuple(res['merkle'])
for node_hash in res['merkle']:
assert_hash256_str(node_hash)
else:
assert_hash256_str(res)
return res
async def get_fee_histogram(self) -> Sequence[Tuple[Union[float, int], int]]:
# do request
res = await self.session.send_request('mempool.get_fee_histogram')
# check response
assert_list_or_tuple(res)
prev_fee = float('inf')
for fee, s in res:
assert_non_negative_int_or_float(fee)
assert_non_negative_integer(s)
if fee >= prev_fee: # check monotonicity
raise RequestCorrupted(f'fees must be in decreasing order')
prev_fee = fee
return res
async def get_server_banner(self) -> str:
# do request
res = await self.session.send_request('server.banner')
# check response
if not isinstance(res, str):
raise RequestCorrupted(f'{res!r} should be a str')
return res
async def get_donation_address(self) -> str:
# do request
res = await self.session.send_request('server.donation_address')
# check response
if not res: # ignore empty string
return ''
if not bitcoin.is_address(res):
# note: do not hard-fail -- allow server to use future-type
# bitcoin address we do not recognize
self.logger.info(f"invalid donation address from server: {repr(res)}")
res = ''
return res
async def get_relay_fee(self) -> int:
"""Returns the min relay feerate in sat/kbyte."""
# do request
res = await self.session.send_request('blockchain.relayfee')
# check response
assert_non_negative_int_or_float(res)
relayfee = int(res * bitcoin.COIN)
relayfee = max(0, relayfee)
return relayfee
async def get_estimatefee(self, num_blocks: int) -> int:
"""Returns a feerate estimate for getting confirmed within
num_blocks blocks, in sat/kbyte.
"""
if not is_non_negative_integer(num_blocks):
raise Exception(f"{repr(num_blocks)} is not a num_blocks")
# do request
res = await self.session.send_request('blockchain.estimatefee', [num_blocks])
# check response
if res != -1:
assert_non_negative_int_or_float(res)
res = int(res * bitcoin.COIN)
return res
def _assert_header_does_not_check_against_any_chain(header: dict) -> None:
chain_bad = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header)
if chain_bad:
raise Exception('bad_header must not check!')
def check_cert(host, cert):
try:
b = pem.dePem(cert, 'CERTIFICATE')
x = x509.X509(b)
except:
traceback.print_exc(file=sys.stdout)
return
try:
x.check_date()
expired = False
except:
expired = True
m = "host: %s\n"%host
m += "has_expired: %s\n"% expired
util.print_msg(m)
# Used by tests
def _match_hostname(name, val):
if val == name:
return True
return val.startswith('*.') and name.endswith(val[1:])
def test_certificates():
from .simple_config import SimpleConfig
config = SimpleConfig()
mydir = os.path.join(config.path, "certs")
certs = os.listdir(mydir)
for c in certs:
p = os.path.join(mydir,c)
with open(p, encoding='utf-8') as f:
cert = f.read()
check_cert(c, cert)
if __name__ == "__main__":
test_certificates()
| mit | fc629cb481802bc381a37c28dd9a2c3c | 41.699402 | 119 | 0.607968 | 4.01905 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/release/tests.py | 2 | 139157 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
import time
from rest_framework.test import APITestCase
from rest_framework import status
from django.urls import reverse
from django.test.client import Client
from pdc.apps.common.test_utils import TestCaseWithChangeSetMixin
from . import models
from pdc.apps.bindings.models import ReleaseBugzillaMapping, ReleaseDistGitMapping
from pdc.apps.compose import models as compose_models
class BaseProductRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/release.json",
]
def test_create(self):
args = {"name": "Our Awesome Product", "short": "product", "version": "1", "release_type": "ga"}
response = self.client.post(reverse('baseproduct-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
args.update({'base_product_id': 'product-1'})
self.assertEqual(args, dict(response.data))
self.assertEqual(1, len(models.BaseProduct.objects.filter(base_product_id='product-1')))
self.assertNumChanges([1])
def test_create_with_invalid_short(self):
args = {"name": "Fedora", "short": "F", "version": "1", "release_type": "ga"}
response = self.client.post(reverse('baseproduct-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertIn('Only accept lowercase letters, numbers or -', response.data['short'])
def test_create_with_extra_fields(self):
args = {"name": "Fedora", "short": "f", "version": "1", "release_type": "ga", "foo": "bar"}
response = self.client.post(reverse('baseproduct-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response.data.get('detail'), 'Unknown fields: "foo".')
def test_create_duplicate(self):
args = {"name": "Our Awesome Product", "short": "product", "version": "1", "release_type": "ga"}
response = self.client.post(reverse('baseproduct-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
response = self.client.post(reverse('baseproduct-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_put_as_create_disabled(self):
args = {"name": "Our Awesome Product", "short": "product", "version": "1", "release_type": "ga"}
response = self.client.put(reverse('baseproduct-detail', args=['product']), args)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([])
def test_update(self):
self.test_create()
response = self.client.put(reverse('baseproduct-detail', args=['product-1']),
{'short': 'product', 'name': 'OUR AWESOME PRODUCT',
'version': '1', 'release_type': 'ga'},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(models.BaseProduct.objects.get(base_product_id='product-1').name,
'OUR AWESOME PRODUCT')
self.assertNumChanges([1, 1])
def test_update_missing_field(self):
self.test_create()
response = self.client.put(reverse('baseproduct-detail', args=['product-1']),
{'short': 'product'}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([1])
def test_update_partial(self):
self.test_create()
response = self.client.patch(reverse('baseproduct-detail', args=['product-1']),
{'name': 'Our Product'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1, 1])
self.assertEqual(models.BaseProduct.objects.get(base_product_id='product-1').name,
'Our Product')
def test_update_only_short(self):
self.test_create()
response = self.client.patch(reverse('baseproduct-detail', args=['product-1']),
{'short': 'tcudorp'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1, 1])
response = self.client.get(reverse('baseproduct-detail', args=['product-1']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.get(reverse('baseproduct-detail', args=['tcudorp-1']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_only_version(self):
self.test_create()
response = self.client.patch(reverse('baseproduct-detail', args=['product-1']),
{'version': '2'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1, 1])
response = self.client.get(reverse('baseproduct-detail', args=['product-1']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.get(reverse('baseproduct-detail', args=['product-2']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_query_with_multi_values(self):
args = {"name": "Our Awesome Product1", "short": "product", "version": "1", "release_type": "ga"}
self.client.post(reverse('baseproduct-list'), args)
args = {"name": "Our Awesome Product2", "short": "product", "version": "2", "release_type": "ga"}
self.client.post(reverse('baseproduct-list'), args)
url = reverse('baseproduct-list')
response = self.client.get(url + '?version=1&version=2')
self.assertEqual(2, response.data['count'])
class ProductRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/product.json",
]
def test_create(self):
args = {"name": "Fedora", "short": "f"}
response = self.client.post(reverse('product-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
args.update({'active': False, 'product_versions': [], 'allowed_push_targets': []})
self.assertEqual(args, response.data)
self.assertNumChanges([1])
def test_create_invalid_short(self):
args = {'name': 'Fedora', 'short': 'F'}
response = self.client.post(reverse('product-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertIn('Only accept lowercase letters, numbers or -', response.data['short'])
def test_create_with_extra_field(self):
args = {'name': 'Fedora', 'short': 'f', 'foo': 'bar'}
response = self.client.post(reverse('product-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response.data.get('detail'), 'Unknown fields: "foo".')
def test_create_duplicate(self):
args = {"name": "Fedora", "short": "f"}
response = self.client.post(reverse('product-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
response = self.client.post(reverse('product-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_create_with_bad_field(self):
args = {"name": "Fedora", "short": "f", "foo": "bar"}
response = self.client.post(reverse('product-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_get(self):
response = self.client.get(reverse('product-detail', args=['product']))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(dict(response.data),
{"short": "product", "name": "Test Product",
"product_versions": [], "active": False,
"allowed_push_targets": []})
def test_all(self):
response = self.client.get(reverse('product-list'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(2, response.data['count'])
data = response.data['results']
expected = [
{'name': u'Dummy product',
'short': u'dummy',
'active': False,
'allowed_push_targets': [],
'product_versions': []},
{'name': u'Test Product',
'short': u'product',
'active': False,
'allowed_push_targets': [],
'product_versions': []},
]
self.assertEqual(sorted(data), sorted(expected))
def test_get_after_create(self):
self.test_create()
response = self.client.get(reverse('product-detail', args=["f"]))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(response.data, {"short": "f",
"name": "Fedora",
"active": False,
"product_versions": [],
"allowed_push_targets": []})
def test_query_with_illegal_active(self):
response = self.client.get(reverse('product-list'), {"active": "abcd"})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_query_multi_values(self):
response = self.client.get(reverse('product-list') + '?short=product&short=dummy')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
class ProductUpdateTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/product.json",
]
def test_update(self):
response = self.client.put(reverse('product-detail', args=['product']),
{'short': 'product', 'name': 'MY PRODUCT'},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(models.Product.objects.get(short='product').name, 'MY PRODUCT')
self.assertNumChanges([1])
def test_put_as_create_disabled(self):
args = {"name": "Product", "short": "p"}
response = self.client.put(reverse('product-detail', args=['p']), args)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([])
def test_update_missing_field(self):
response = self.client.put(reverse('product-detail', args=['product']),
{'short': 'product'}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_update_partial(self):
response = self.client.patch(reverse('product-detail', args=['product']),
{'name': 'tcudorp'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertEqual(models.Product.objects.get(short='product').name, 'tcudorp')
def test_partial_update_empty(self):
url = reverse('product-detail', args=['product'])
response = self.client.patch(url, format='json', data={})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_only_short(self):
response = self.client.patch(reverse('product-detail', args=['product']),
{'short': 'tcudorp'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
response = self.client.get(reverse('product-detail', args=['product']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.get(reverse('product-detail', args=['tcudorp']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_patch_read_only_field(self):
response = self.client.patch(reverse('product-detail', args=['product']),
{'active': True}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
class ProductVersionRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/product.json",
"pdc/apps/release/fixtures/tests/product_version.json",
]
def test_create(self):
args = {"name": "Our Awesome Product", "short": "product",
"version": "2", "product": "product"}
response = self.client.post(reverse('productversion-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
args.update({'product_version_id': 'product-2',
'active': False,
'releases': [],
'allowed_push_targets': [],
'product': 'product'})
self.assertEqual(args, dict(response.data))
self.assertEqual(1, models.ProductVersion.objects.filter(product_version_id='product-2').count())
self.assertNumChanges([1])
def test_create_without_short(self):
args = {"name": "Our Awesome Product", "version": "2", "product": "product"}
response = self.client.post(reverse('productversion-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
args.update({'product_version_id': 'product-2',
'active': False,
'releases': [],
'allowed_push_targets': [],
'product': 'product',
'short': 'product'})
self.assertDictEqual(args, dict(response.data))
self.assertEqual(1, models.ProductVersion.objects.filter(product_version_id='product-2').count())
self.assertNumChanges([1])
def test_create_with_invalid_short(self):
args = {"name": "Our Awesome Product", "short": "PRODUCT",
"version": "2", "product": "product"}
response = self.client.post(reverse('productversion-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(0, models.ProductVersion.objects.filter(product_version_id='product-2').count())
self.assertNumChanges([])
self.assertIn('Only accept lowercase letters, numbers or -', response.data['short'])
def test_create_with_extra_field(self):
args = {"name": "Our Awesome Product", "short": "product",
"version": "2", "product": "product", "foo": "bar"}
response = self.client.post(reverse('productversion-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response.data.get('detail'), 'Unknown fields: "foo".')
self.assertNumChanges([])
def test_create_with_non_existing_product(self):
args = {"name": "Our Awesome Product", "short": "product",
"version": "2", "product": "foo"}
response = self.client.post(reverse('productversion-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(0, models.ProductVersion.objects.filter(product_version_id='product-2').count())
self.assertNumChanges([])
def test_create_duplicate(self):
args = {"name": "Our Awesome Product", "short": "product",
"version": "2", "product": "product"}
response = self.client.post(reverse('productversion-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
response = self.client.post(reverse('productversion-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_get(self):
response = self.client.get(reverse('productversion-detail', args=["product-1"]))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(dict(response.data), {"product": "product",
"product_version_id": "product-1",
"short": "product",
"name": "Product Version",
"active": False,
"releases": [],
"allowed_push_targets": [],
"version": "1"})
def test_all_for_dummy(self):
response = self.client.get(reverse('product-detail', args=["dummy"]))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(response.data['product_versions'], [])
def test_all_for_product(self):
response = self.client.get(reverse('product-detail', args=["product"]))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(set(response.data['product_versions']),
set(["product-0", "product-1"]))
def test_clone(self):
response = self.client.get(reverse('productversion-detail', args=['product-1']))
self.assertEqual(status.HTTP_200_OK, response.status_code)
response.data['version'] = 2
del response.data['product_version_id']
del response.data['releases']
del response.data['allowed_push_targets']
del response.data['active']
response = self.client.post(reverse('productversion-list'), response.data)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(dict(response.data),
{'product': 'product', 'product_version_id': 'product-2',
'short': 'product', 'name': 'Product Version', 'version': '2',
'active': False, 'releases': [], 'allowed_push_targets': []})
self.assertNumChanges([1])
def test_releases_are_ordered(self):
release_type = models.ReleaseType.objects.get(short='ga')
pv = models.ProductVersion.objects.get(product_version_id='product-1')
for x in range(11, 7, -1):
models.Release.objects.create(short='product',
name='Product',
version='1.%d' % x,
release_type=release_type,
product_version=pv)
response = self.client.get(reverse('productversion-detail', args=['product-1']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data.get('releases', []),
['product-1.8', 'product-1.9', 'product-1.10', 'product-1.11']
)
def test_query_with_illegal_active(self):
response = self.client.get(reverse('productversion-list'), {"active": "abcd"})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_query_multi_values(self):
response = self.client.get(reverse('productversion-list') +
'?product_version_id=product-1&product_version_id=product-0')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
class ProductVersionUpdateRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/product.json",
"pdc/apps/release/fixtures/tests/product_version.json",
]
def test_update(self):
response = self.client.put(reverse('productversion-detail', args=['product-1']),
{'short': 'product', 'name': 'TEST PRODUCT',
'version': '1', 'product': 'product'},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(models.ProductVersion.objects.get(product_version_id='product-1').name,
'TEST PRODUCT')
self.assertNumChanges([1])
def test_put_as_create_disabled(self):
args = {'name': 'Product', 'short': 'p', 'version': '1'}
response = self.client.put(reverse('productversion-detail', args=['p-1']), args)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([])
def test_update_missing_field(self):
response = self.client.put(reverse('productversion-detail', args=['product-1']),
{'short': 'product'}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_update_partial(self):
response = self.client.patch(reverse('productversion-detail', args=['product-1']),
{'name': 'Tcudorp'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertEqual(models.ProductVersion.objects.get(product_version_id='product-1').name,
'Tcudorp')
def test_partial_update_empty(self):
url = reverse('productversion-detail', args=['product-1'])
response = self.client.patch(url, format='json', data={})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_only_short(self):
response = self.client.patch(reverse('productversion-detail', args=['product-1']),
{'short': 'tcudorp'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
response = self.client.get(reverse('productversion-detail', args=['product-1']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.get(reverse('productversion-detail', args=['tcudorp-1']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_only_version(self):
response = self.client.patch(reverse('productversion-detail', args=['product-1']),
{'version': '2'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
response = self.client.get(reverse('productversion-detail', args=['product-1']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.get(reverse('productversion-detail', args=['product-2']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_change_product(self):
self.client.post(reverse('product-list'), {'short': 'test', 'name': 'Test'}, format='json')
response = self.client.patch(reverse('productversion-detail', args=['product-1']),
{'product': 'test'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1, 1])
pv = models.ProductVersion.objects.get(product_version_id='product-1')
self.assertEqual(pv.product.name, 'Test')
self.assertEqual(pv.short, 'product')
def test_change_short_on_put_implicitly(self):
self.client.post(reverse('product-list'), {'short': 'test', 'name': 'Test'}, format='json')
response = self.client.put(reverse('productversion-detail', args=['product-1']),
{'product': 'test', 'name': 'Test product',
'version': '1'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('short'), 'test')
self.assertEqual(response.data.get('product'), 'test')
self.assertNumChanges([1, 1])
pv = models.ProductVersion.objects.get(product_version_id='test-1')
self.assertEqual(pv.product.name, 'Test')
class ActiveCountTestCase(APITestCase):
fixtures = ["pdc/apps/release/fixtures/tests/active-filter.json"]
def test_active_for_product_version_with_mixed(self):
pv = models.ProductVersion.objects.get(pk=3)
self.assertTrue(pv.active)
self.assertEqual(pv.release_count, 2)
self.assertEqual(pv.active_release_count, 1)
def test_active_for_product_version_with_active_only(self):
pv = models.ProductVersion.objects.get(pk=2)
self.assertTrue(pv.active)
self.assertEqual(pv.release_count, 1)
self.assertEqual(pv.active_release_count, 1)
def test_active_for_product_version_with_inactive_only(self):
pv = models.ProductVersion.objects.get(pk=4)
self.assertFalse(pv.active)
self.assertEqual(pv.release_count, 1)
self.assertEqual(pv.active_release_count, 0)
def test_active_for_product_with_mixed(self):
p = models.Product.objects.get(pk=2)
self.assertTrue(p.active)
self.assertEqual(p.product_version_count, 3)
self.assertEqual(p.active_product_version_count, 2)
self.assertEqual(p.release_count, 4)
self.assertEqual(p.active_release_count, 2)
def test_active_for_product_with_active_only(self):
p = models.Product.objects.get(pk=1)
self.assertTrue(p.active)
self.assertEqual(p.product_version_count, 1)
self.assertEqual(p.active_product_version_count, 1)
self.assertEqual(p.release_count, 1)
self.assertEqual(p.active_release_count, 1)
def test_active_for_product_with_inactive_only(self):
p = models.Product.objects.get(pk=3)
self.assertFalse(p.active)
self.assertEqual(p.product_version_count, 1)
self.assertEqual(p.active_product_version_count, 0)
self.assertEqual(p.release_count, 1)
self.assertEqual(p.active_release_count, 0)
class ActiveFilterTestCase(APITestCase):
fixtures = ["pdc/apps/release/fixtures/tests/active-filter.json"]
def test_filter_active_releases(self):
response = self.client.get(reverse('release-list') + '?active=True')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(set(x['release_id'] for x in response.data['results']),
set(['x-1.0', 'y-1.0', 'y-2.0']))
def test_filter_inactive_releases(self):
response = self.client.get(reverse('release-list') + '?active=False')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(set(x['release_id'] for x in response.data['results']),
set(['y-2.1', 'y-3.0', 'z-1.0']))
def test_filter_active_product_versions(self):
response = self.client.get(reverse('productversion-list') + '?active=True')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(set(x['product_version_id'] for x in response.data['results']),
set(['x-1', 'y-1', 'y-2']))
def test_filter_inactive_product_versions(self):
response = self.client.get(reverse('productversion-list') + '?active=False')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(set(x['product_version_id'] for x in response.data['results']),
set(['y-3', 'z-1']))
def test_filter_product_versions_with_invalid_value(self):
response = self.client.get(reverse('productversion-list') + '?active=foo')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_active_products(self):
response = self.client.get(reverse('product-list') + '?active=True')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(set(x['short'] for x in response.data['results']),
set(['x', 'y']))
def test_filter_inactive_products(self):
response = self.client.get(reverse('product-list') + '?active=False')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(set(x['short'] for x in response.data['results']),
set(['z']))
def test__filter_products_with_invalid_value(self):
response = self.client.get(reverse('product-list') + '?active=foo')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class ReleaseRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/repository/fixtures/tests/push_target.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/product.json",
"pdc/apps/release/fixtures/tests/base_product.json",
"pdc/apps/release/fixtures/tests/product_version.json",
"pdc/apps/bindings/fixtures/tests/releasedistgitmapping.json",
"pdc/apps/common/fixtures/test/sigkey.json"
]
def test_create_without_product_version(self):
args = {"name": "Fedora", "short": "f", "version": '20', "release_type": "ga"}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
args.update({"active": True, 'allow_buildroot_push': False, 'integrated_with': None,
'base_product': None, 'product_version': None, 'compose_set': [],
'dist_git': None, 'release_id': 'f-20',
'bugzilla': None, 'sigkey': 'ABCDEF',
'allowed_debuginfo_services': [],
'allowed_push_targets': []})
self.assertEqual(1, models.Release.objects.filter(release_id='f-20').count())
self.assertEqual(dict(response.data), args)
self.assertNumChanges([1])
def test_create_with_bugzilla_mapping(self):
args = {"name": u"Fedora", "short": u"f", "version": u'20', "release_type": u"ga",
"bugzilla": {"product": u"Fedora Bugzilla Product"}}
response = self.client.post(reverse('release-list'), args, format='json')
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
args.update({"active": True, 'allow_buildroot_push': False, 'integrated_with': None,
'base_product': None, 'product_version': None, 'compose_set': [],
'dist_git': None, 'release_id': u'f-20', 'sigkey': 'ABCDEF',
'allowed_debuginfo_services': [],
'allowed_push_targets': []})
self.assertEqual(ReleaseBugzillaMapping.objects.count(), 1)
self.assertDictEqual(dict(response.data.pop('bugzilla')), args.pop('bugzilla'))
self.assertDictEqual(dict(response.data), args)
self.assertNumChanges([2])
def test_create_with_dist_git_mapping(self):
args = {"name": "Fedora", "short": "f", "version": '20', "release_type": "ga",
"dist_git": {"branch": "dist_git_branch"}}
response = self.client.post(reverse('release-list'), args, format='json')
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
args.update({"active": True, 'integrated_with': None,
'base_product': None, 'product_version': None, 'compose_set': [],
'release_id': 'f-20', 'bugzilla': None, 'sigkey': 'ABCDEF', 'allow_buildroot_push': False,
'allowed_debuginfo_services': [],
'allowed_push_targets': []})
self.assertEqual(ReleaseDistGitMapping.objects.count(), 2)
self.assertDictEqual(dict(response.data), args)
self.assertNumChanges([2])
def test_create_with_invalid_active(self):
args = {"name": "Fedora", "short": "f", "version": '20',
"release_type": "ga", "active": "yes please"}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertIn(u'"yes please" is not a valid boolean', response.data['active'][0])
def test_create_with_invalid_allow_buildroot_push(self):
args = {"name": "Fedora", "short": "f", "version": '20',
"release_type": "ga", "allow_buildroot_push": "wrong input"}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertIn(u'"wrong input" is not a valid boolean', response.data['allow_buildroot_push'][0])
def test_create_with_invalid_short(self):
args = {"name": "Fedora", "short": "F", "version": '20', "release_type": "ga"}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertIn('Only accept lowercase letters, numbers or -', response.data['short'])
def test_create_with_extra_fields(self):
args = {"name": "Fedora", "short": "f", "version": '20', "release_type": "ga", "foo": "bar"}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response.data.get('detail'), 'Unknown fields: "foo".')
def test_create_duplicate(self):
args = {"name": "Fedora", "short": "f", "version": '20', "release_type": "ga"}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_create_with_product_version(self):
args = {"name": "Our Awesome Product", "short": "product", "version": "1.1",
"release_type": "ga", "product_version": "product-1"}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
args.update({'product_version': 'product-1',
'release_id': 'product-1.1', 'active': True, 'base_product': None,
'compose_set': [], 'dist_git': None,
'bugzilla': None, 'integrated_with': None, 'sigkey': 'ABCDEF',
'allow_buildroot_push': False,
'allowed_debuginfo_services': [],
'allowed_push_targets': []})
self.assertEqual(args, dict(response.data))
self.assertEqual(1, models.Release.objects.filter(release_id='product-1.1').count())
self.assertNumChanges([1])
response = self.client.get(reverse('release-list') + '?product_version=product-1')
self.assertEqual(1, response.data['count'])
def test_create_with_base_product(self):
args = {"name": "Supplementary", "short": "supp", "version": "1.1",
"release_type": "ga", "base_product": "product-1"}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
args.update({'base_product': 'product-1',
'active': True, 'compose_set': [], 'dist_git': None,
'release_id': 'supp-1.1@product-1', 'product_version': None,
'bugzilla': None, 'integrated_with': None, 'sigkey': 'ABCDEF',
'allow_buildroot_push': False,
'allowed_debuginfo_services': [],
'allowed_push_targets': []})
self.assertEqual(args, dict(response.data))
self.assertNumChanges([1])
response = self.client.get(reverse('release-list') + '?base_product=product-1')
self.assertEqual(1, response.data['count'])
def test_create_with_null_integrated_with(self):
args = {"name": "Fedora", "short": "f", "version": "20", "release_type": "ga", "integrated_with": None}
response = self.client.post(reverse('release-list'), args, format='json')
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
self.assertNumChanges([1])
def test_update_with_patch_null_dist_git_mapping(self):
args = {"name": "Fedora", "short": "f", "version": '20', "release_type": "ga"}
response = self.client.post(reverse('release-list'), args)
args = {"dist_git": None}
response = self.client.patch(reverse('release-detail', kwargs={'release_id': 'f-20'}), args, format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(ReleaseDistGitMapping.objects.count(), 1)
self.assertEqual(response.data['dist_git'], None)
self.assertNumChanges([1])
def test_update_with_patch_dist_git_mapping_to_null(self):
args = {"dist_git": None}
response = self.client.patch(reverse('release-detail',
kwargs={'release_id': 'release-1.0'}),
args, format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(ReleaseDistGitMapping.objects.count(), 0)
self.assertEqual(response.data['dist_git'], None)
self.assertNumChanges([1])
def test_query_with_filter(self):
url = reverse('release-list')
response = self.client.get(url + '?release_id=release-1.0')
self.assertEqual(1, response.data['count'])
response = self.client.get(url + '?name=Test%20Release')
self.assertEqual(1, response.data['count'])
response = self.client.get(url + '?short=release')
self.assertEqual(1, response.data['count'])
response = self.client.get(url + '?version=1.0')
self.assertEqual(1, response.data['count'])
response = self.client.get(url + '?release_type=ga')
self.assertEqual(1, response.data['count'])
response = self.client.get(url + '?has_base_product=False')
self.assertEqual(1, response.data['count'])
response = self.client.get(url + '?has_base_product=True')
self.assertEqual(0, response.data['count'])
response = self.client.get(url + '?bugzilla_product=null')
self.assertEqual(1, response.data['count'])
response = self.client.get(url + '?allow_buildroot_push=False')
self.assertEqual(1, response.data['count'])
self.test_create_with_bugzilla_mapping()
response = self.client.get(url + '?bugzilla_product=Fedora Bugzilla Product')
self.assertEqual(1, response.data['count'])
response = self.client.get(url + '?dist_git_branch=release_branch')
self.assertEqual(1, response.data['count'])
def test_query_unknown_filter(self):
response = self.client.get(reverse('release-list'), {'foo': 'bar'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual('Unknown query params: foo.', response.data.get('detail'))
def test_query_illegal_active_filter(self):
response = self.client.get(reverse('release-list'), {'active': 'abcd'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_query_illegal_allow_buildroot_push_filter(self):
response = self.client.get(reverse('release-list'), {'allow_buildroot_push': 'wrongvalue'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_query_with_multi_value_filter(self):
args = {"name": "Fedora", "short": "f", "version": '20', "release_type": "ga",
"dist_git": {"branch": "dist_git_branch"}}
self.client.post(reverse('release-list'), args, format='json')
url = reverse('release-list')
response = self.client.get(url + '?release_id=release-1.0&release_id=f-20')
self.assertEqual(response.data['count'], 2)
def test_list_ordered(self):
release_type = models.ReleaseType.objects.get(short='ga')
for x in range(11, 7, -1):
models.Release.objects.create(short='product',
name='Product',
version='1.%d' % x,
release_type=release_type)
response = self.client.get(reverse('release-list'), {'short': 'product'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
[x.get('release_id') for x in response.data.get('results', [])],
['product-1.8', 'product-1.9', 'product-1.10', 'product-1.11']
)
def test_create_list_with_sigkey(self):
args = {"name": "Supplementary", "short": "supp", "version": "1.1",
"release_type": "ga", "base_product": "product-1", 'sigkey': 'ABCDEF'}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
args.update({'base_product': 'product-1',
'active': True, 'compose_set': [], 'dist_git': None,
'release_id': 'supp-1.1@product-1', 'product_version': None,
'bugzilla': None, 'integrated_with': None, 'sigkey': 'ABCDEF',
'allow_buildroot_push': False,
'allowed_debuginfo_services': [],
'allowed_push_targets': []})
self.assertEqual(args, dict(response.data))
self.assertNumChanges([1])
response = self.client.get(reverse('release-list'), {'sigkey': 'ABCDEF'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(args, response.data['results'][0])
def test_create_with_non_exist_sigkey(self):
args = {"name": "Supplementary", "short": "supp", "version": "1.1",
"release_type": "ga", "base_product": "product-1", 'sigkey': 'ABCD'}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_create_with_default_sigkey(self):
args = {"name": "TestSigkey", "short": "supp", "version": "1.1",
"release_type": "ga", "base_product": "product-1"}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
response = self.client.get(reverse('release-list'), {'name': 'TestSigkey'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual("ABCDEF", response.data['results'][0]["sigkey"])
def test_create_list_with_allowed_debuginfo(self):
args = {"name": "Supplementary", "short": "supp", "version": "1.1",
"release_type": "ga", "base_product": "product-1",
"allowed_push_targets": [],
"allowed_debuginfo_services": ["rhn", "ftp"]}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
args.update({'base_product': 'product-1',
'active': True, 'compose_set': [], 'dist_git': None,
'release_id': 'supp-1.1@product-1', 'product_version': None,
'bugzilla': None, 'integrated_with': None, 'sigkey': 'ABCDEF', 'allow_buildroot_push': False})
self.assertEqual(args, dict(response.data))
self.assertNumChanges([1])
response = self.client.get(reverse('release-list'), {"allowed_debuginfo_services": "ftp"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(args, response.data['results'][0])
def test_create_with_non_exist_allowed_debuginfo_services(self):
args = {"name": "Supplementary", "short": "supp", "version": "1.1",
"release_type": "ga", "base_product": "product-1",
"allowed_push_targets": [],
"allowed_debuginfo_services": ["test"]}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
class ReleaseCloneTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/bindings/fixtures/tests/releasedistgitmapping.json",
"pdc/apps/common/fixtures/test/sigkey.json"
]
def test_clone_new_version(self):
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1'},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(2, models.Release.objects.count())
self.assertEqual(2, models.Variant.objects.count())
self.assertEqual(2, models.VariantArch.objects.count())
release = models.Release.objects.latest('id')
self.assertEqual(release.variant_set.count(), 1)
self.assertEqual(release.variant_set.all()[0].variantarch_set.count(), 1)
self.assertDictEqual(response.data,
{'short': 'release', 'version': '1.1', 'release_type': 'ga',
'name': 'Test Release', 'dist_git': {'branch': 'release_branch'},
'product_version': None, 'base_product': None, 'active': True,
'release_id': 'release-1.1', 'compose_set': [],
'bugzilla': None, 'integrated_with': None, 'sigkey': 'ABCDEF',
'allow_buildroot_push': False,
'allowed_debuginfo_services': [],
'allowed_push_targets': []})
self.assertNumChanges([4])
def test_clone_extra_fields(self):
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1', 'foo': 'bar'},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Unknown fields: "foo".')
self.assertNumChanges([])
def test_clone_bad_variant_format_no_period(self):
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1',
'include_trees': ['sparkly-unicorn']},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
self.assertEqual(models.Release.objects.filter(release_id='release-1.1').count(),
0)
def test_clone_bad_variant_format_two_periods(self):
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1',
'include_trees': ['ponies.and.rainbows']},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
self.assertEqual(models.Release.objects.filter(release_id='release-1.1').count(),
0)
def test_clone_bad_variant_format_not_a_list(self):
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1',
'include_trees': ''},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('not a list', response.data['detail'][0])
self.assertNumChanges([])
self.assertEqual(models.Release.objects.filter(release_id='release-1.1').count(),
0)
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1',
'include_trees': 'not-a-list'},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('not a list', response.data['detail'][0])
self.assertNumChanges([])
self.assertEqual(models.Release.objects.filter(release_id='release-1.1').count(),
0)
def test_clone_variant_not_in_original(self):
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1',
'include_trees': ['Foo.Bar']},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
self.assertEqual(models.Release.objects.filter(release_id='release-1.1').count(),
0)
def test_clone_with_filter_variants(self):
v = models.Variant.objects.create(
release=models.Release.objects.get(release_id='release-1.0'),
variant_uid='Client',
variant_id='Client',
variant_name='Client',
variant_type=models.VariantType.objects.get(name='variant'),
)
models.VariantArch.objects.create(variant=v, arch_id=1)
models.VariantArch.objects.create(
variant=models.Variant.objects.get(variant_uid='Server'),
arch_id=1
)
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1',
'include_trees': ['Server.x86_64']},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(2, models.Release.objects.count())
self.assertEqual(3, models.Variant.objects.count())
self.assertEqual(4, models.VariantArch.objects.count())
release = models.Release.objects.latest('id')
self.assertEqual(release.variant_set.count(), 1)
self.assertEqual(release.variant_set.all()[0].variantarch_set.count(), 1)
self.assertItemsEqual(release.trees, ['Server.x86_64'])
self.assertNumChanges([4])
def test_clone_with_explicit_empty_trees(self):
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1',
'include_trees': []},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(2, models.Release.objects.count())
self.assertEqual(1, models.Variant.objects.count())
self.assertEqual(1, models.VariantArch.objects.count())
release = models.Release.objects.latest('id')
self.assertEqual(release.variant_set.count(), 0)
self.assertDictEqual(response.data,
{'short': 'release', 'version': '1.1', 'release_type': 'ga',
'name': 'Test Release', 'dist_git': {'branch': 'release_branch'},
'product_version': None, 'base_product': None, 'active': True,
'release_id': 'release-1.1', 'compose_set': [],
'bugzilla': None, 'integrated_with': None, 'sigkey': 'ABCDEF',
'allow_buildroot_push': False,
'allowed_debuginfo_services': [],
'allowed_push_targets': []})
self.assertNumChanges([2])
def test_clone_not_unique(self):
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'active': False},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(1, models.Release.objects.count())
self.assertNumChanges([])
def test_clone_missing_old_release_id(self):
response = self.client.post(reverse('releaseclone-list'),
{'foo': 'bar'}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(1, models.Release.objects.count())
def test_clone_bad_param(self):
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1',
'product_version': 'no'}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(1, models.Release.objects.count())
def test_clone_non_existing_release(self):
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-2.0', 'version': '2.1'},
format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(1, models.Release.objects.count())
self.assertNumChanges([])
def test_clone_create_bugzilla_mapping(self):
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1',
'bugzilla': {'product': 'Test Release 1'}},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get('bugzilla', {}).get('product'),
'Test Release 1')
self.assertEqual(1, ReleaseBugzillaMapping.objects.count())
self.assertNumChanges([5])
def test_clone_old_bugzilla_mapping(self):
ReleaseBugzillaMapping.objects.create(
release=models.Release.objects.get(release_id='release-1.0'),
bugzilla_product='Test Release 1'
)
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1'},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get('bugzilla', {}).get('product'),
'Test Release 1')
self.assertEqual(2, ReleaseBugzillaMapping.objects.count())
self.assertNumChanges([5])
def test_clone_remove_bugzilla_mapping(self):
ReleaseBugzillaMapping.objects.create(
release=models.Release.objects.get(release_id='release-1.0'),
bugzilla_product='Test Release 1'
)
response = self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1',
'bugzilla': None},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get('bugzilla'), None)
self.assertEqual(1, ReleaseBugzillaMapping.objects.count())
self.assertNumChanges([4])
class ReleaseRPMMappingViewSetTestCase(APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def test_get_for_single_compose(self):
expected_data = {
'compose': 'compose-1',
'mapping': {
'Server': {
'x86_64': {
'bash': ['x86_64'],
}
}
}
}
response = self.client.get(reverse('releaserpmmapping-detail',
args=['release-1.0', 'bash']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, expected_data)
def test_get_for_no_compose(self):
compose_models.Compose.objects.filter(release__release_id='product-1.0').delete()
response = self.client.get(reverse('releaserpmmapping-detail',
args=['product-1.0', 'bash']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_for_no_compose_without_include(self):
compose_models.Compose.objects.filter(release__release_id='release-1.0').delete()
response = self.client.get(reverse('releaserpmmapping-detail',
args=['release-1.0', 'bash']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_for_no_compose_with_include(self):
override = compose_models.OverrideRPM.objects.get(id=1)
override.include = True
override.save()
compose_models.Compose.objects.filter(release__release_id='release-1.0').delete()
response = self.client.get(reverse('releaserpmmapping-detail',
args=['release-1.0', 'bash']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {"compose": None, "mapping": {"Server": {"x86_64": {"bash-doc": ["x86_64"]}}}})
def test_get_for_nonexisting_release(self):
response = self.client.get(reverse('releaserpmmapping-detail',
args=['product-1.1', 'bash']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_for_nonexisting_package(self):
response = self.client.get(reverse('releaserpmmapping-detail',
args=['product-1.0', 'ponies']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_for_more_compose(self):
# There is compose-1 with some rpms, and newer ComposeWithNoRPMs with
# no rpms. The view grabs the newest mapping, finds there is nothing to
# show as mapping and returns 404.
release = models.Release.objects.get(release_id='release-1.0')
compose_models.Compose.objects.create(
release=release,
compose_respin=0,
compose_date='2015-01-30',
compose_id='ComposeWithNoRPMs',
compose_type=compose_models.ComposeType.objects.get(name='production'),
acceptance_testing=compose_models.ComposeAcceptanceTestingState.objects.get(name='untested'),
)
response = self.client.get(reverse('releaserpmmapping-detail',
args=['release-1.0', 'ponies']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_options_on_list_url(self):
response = self.client.options(reverse('release-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
class ReleaseUpdateRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
'pdc/apps/release/fixtures/tests/release.json',
"pdc/apps/bindings/fixtures/tests/releasedistgitmapping.json"
]
def setUp(self):
self.url = reverse('release-detail', args=['release-1.0'])
self.release = models.Release.objects.get(release_id='release-1.0')
self.serialized_release = {
'short': 'release',
'version': '1.0',
'name': 'Test Release',
'active': True,
'dist_git': {'branch': 'release_branch'},
'release_type': 'ga'
}
def test_update(self):
response = self.client.put(self.url,
{'short': 'product', 'version': '1.0', 'release_type': 'ga',
'name': 'Our Product'},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('name'), 'Our Product')
self.assertEqual(models.Release.objects.get(release_id='product-1.0').name,
'Our Product')
self.assertNumChanges([2])
def test_partial_update_empty(self):
url = reverse('release-detail', args=['product-1.0'])
response = self.client.patch(url, format='json', data={})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_missing_optional_fields_are_erased(self):
self.release.product_version = models.ProductVersion.objects.create(
product=models.Product.objects.create(short='p', name='Product'),
short='p',
version=1,
name='Product Version'
)
release_type = models.ReleaseType.objects.get(short="ga")
self.release.base_product = models.BaseProduct.objects.create(
name='Base Product',
short='bp',
version='1',
release_type=release_type,
)
self.release.save()
response = self.client.put(reverse('release-detail', args=[self.release.release_id]),
{'short': 'release',
'version': '3.0',
'release_type': 'ga',
'name': 'update',
},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('release_id'), 'release-3.0')
self.assertIsNone(response.data.get('base_product'))
self.assertIsNone(response.data.get('product_version'))
self.assertIsNone(response.data.get('dist_git'))
self.assertIsNone(response.data.get('bugzilla'))
self.assertIsNone(response.data.get('integrated_with'))
self.assertEqual(response.data.get('active'), True)
release = models.Release.objects.get(release_id='release-3.0')
self.assertIsNone(release.dist_git_branch)
self.assertIsNone(release.base_product)
self.assertIsNone(release.product_version)
self.assertNumChanges([2])
def test_update_can_explicitly_erase_optional_field(self):
response = self.client.put(self.url,
{'short': 'release', 'version': '1.0', 'release_type': 'ga',
'name': 'Test Release', 'dist_git': None},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsNone(response.data.get('dist_git'))
self.assertIsNone(models.Release.objects.get(release_id='release-1.0').dist_git_branch)
self.assertNumChanges([2])
def test_update_can_reset_base_product(self):
release_type = models.ReleaseType.objects.get(short="ga")
self.release.base_product = models.BaseProduct.objects.create(
name='Base Product',
short='bp',
version='1',
release_type=release_type,
)
self.release.save()
response = self.client.patch(reverse('release-detail', args=['release-1.0@bp-1']),
{'base_product': None}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# The dist-git mapping mentioned in changelog because release_id changes.
self.assertNumChanges([2])
self.assertIsNone(response.data['base_product'])
release = models.Release.objects.get(release_id='release-1.0')
self.assertIsNone(release.base_product)
def test_update_can_reset_product_version(self):
self.release.product_version = models.ProductVersion.objects.create(
name='Base Product',
short='p',
version='1',
product=models.Product.objects.create(name='Product', short='p')
)
self.release.save()
response = self.client.patch(reverse('release-detail', args=['release-1.0']),
{'product_version': None}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertIsNone(response.data['product_version'])
release = models.Release.objects.get(release_id='release-1.0')
self.assertIsNone(release.product_version)
def test_update_can_explicitly_erase_optional_field_via_patch(self):
response = self.client.patch(self.url, {'dist_git': None}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsNone(response.data.get('dist_git'))
self.assertIsNone(models.Release.objects.get(release_id='release-1.0').dist_git_branch)
self.assertNumChanges([1])
def test_update_single_field(self):
response = self.client.patch(self.url, {'active': False}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertFalse(response.data.get('active'))
self.assertFalse(models.Release.objects.get(release_id='release-1.0').active)
self.assertNumChanges([1])
def test_update_product_version(self):
self.client.post(reverse('product-list'),
{'name': 'Test Release', 'short': 'release'},
format='json')
self.client.post(reverse('productversion-list'),
{'name': 'Test Release', 'short': 'release', 'version': '1', 'product': 'release'},
format='json')
response = self.client.patch(self.url, {'product_version': 'release-1'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('product_version'), 'release-1')
self.assertNumChanges([1, 1, 1])
response = self.client.get(reverse('productversion-detail', args=['release-1']))
self.assertItemsEqual(response.data.get('releases'), ['release-1.0'])
def test_update_to_change_release_id(self):
response = self.client.patch(self.url, {'release_type': 'eus'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('release_type'), 'eus')
self.assertEqual(response.data.get('release_id'), 'release-1.0-eus')
# Dist git mapping is not changed as such, only its readable
# representation returned by export().
self.assertNumChanges([2])
response = self.client.get(reverse('release-detail', args=['release-1.0']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.get(reverse('release-detail', args=['release-1.0-eus']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_add_bugzilla_mapping(self):
response = self.client.patch(self.url,
{'bugzilla': {'product': 'Test Product'}},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('bugzilla', {}).get('product'),
'Test Product')
self.assertEqual(ReleaseBugzillaMapping.objects.get(release__release_id='release-1.0').bugzilla_product,
'Test Product')
self.assertNumChanges([1])
def test_update_bugzilla_mapping(self):
ReleaseBugzillaMapping.objects.create(release=self.release, bugzilla_product='Old product')
response = self.client.patch(self.url, {'bugzilla': {'product': 'New product'}}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('bugzilla', {}).get('product'), 'New product')
self.assertEqual(ReleaseBugzillaMapping.objects.get(release__release_id='release-1.0').bugzilla_product,
'New product')
self.assertNumChanges([1])
def test_remove_bugzilla_mapping(self):
ReleaseBugzillaMapping.objects.create(release=self.release, bugzilla_product='Old product')
self.serialized_release['bugzilla'] = None
response = self.client.put(self.url, self.serialized_release, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('bugzilla'), None)
response = self.client.get(reverse('release-detail', args=['product-1.0']))
self.assertEqual(response.data.get('bugzilla'), None)
self.assertEqual(ReleaseBugzillaMapping.objects.filter(release__release_id='product-1.0').count(), 0)
self.assertNumChanges([2])
def test_remove_bugzilla_mapping_and_switch_active(self):
ReleaseBugzillaMapping.objects.create(release=self.release, bugzilla_product='Old product')
self.serialized_release['bugzilla'] = None
self.serialized_release['active'] = False
response = self.client.put(self.url, self.serialized_release, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('bugzilla'), None)
self.assertFalse(response.data.get('active'))
response = self.client.get(reverse('release-detail', args=['product-1.0']))
self.assertEqual(response.data.get('bugzilla'), None)
self.assertFalse(response.data.get('active'))
self.assertEqual(ReleaseBugzillaMapping.objects.filter(release__release_id='product-1.0').count(), 0)
self.assertNumChanges([2])
def test_missing_bugzilla_mapping_should_be_removed(self):
ReleaseBugzillaMapping.objects.create(release=self.release, bugzilla_product='Old product')
response = self.client.put(self.url, self.serialized_release, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsNone(response.data.get('bugzilla'))
response = self.client.get(reverse('release-detail', args=['product-1.0']))
self.assertIsNone(response.data.get('bugzilla'))
self.assertEqual(ReleaseBugzillaMapping.objects.filter(release__release_id='product-1.0').count(), 0)
self.assertNumChanges([2])
def test_put_as_create_is_disabled(self):
response = self.client.put(reverse('release-detail', args=['i-do-not-exist']),
{'short': 'test',
'version': '3.1',
'release_type': 'ga',
'name': 'release'},
format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([])
def test_update_bugzilla_with_wrong_field(self):
response = self.client.patch(reverse('release-detail', args=[self.release.release_id]),
{'bugzilla': {'king': 'Richard III.'}},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'detail': 'Unknown fields: "king".'})
self.assertNumChanges([])
def test_update_dist_git_with_wrong_field(self):
response = self.client.patch(reverse('release-detail', args=[self.release.release_id]),
{'dist_git': {'leaf': 'maple'}},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'detail': 'Unknown fields: "leaf".'})
self.assertNumChanges([])
def test_patch_integrated_with(self):
models.Release.objects.create(short='release', version='2.0', name='Test release',
release_type=self.release.release_type)
response = self.client.patch(reverse('release-detail', args=[self.release.release_id]),
{'integrated_with': 'release-2.0'},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('integrated_with'), 'release-2.0')
self.assertNumChanges([1])
def test_update_sigkey(self):
response = self.client.put(self.url,
{'short': 'product', 'version': '1.0', 'release_type': 'ga',
'name': 'Our Product', 'sigkey': 'ABCDEF'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('name'), 'Our Product')
self.assertEqual(response.data.get('sigkey'), 'ABCDEF')
def test_update_allowed_debuginfo(self):
response = self.client.put(self.url,
{'short': 'product', 'version': '1.0', 'release_type': 'ga',
'name': 'Our Product',
'allowed_push_targets': [],
'allowed_debuginfo_services': ['rhn']}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('name'), 'Our Product')
self.assertEqual(response.data.get('allowed_debuginfo_services'), ["rhn"])
class ReleaseLatestComposeTestCase(APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/release.json",
]
def setUp(self):
self.release = models.Release.objects.get(release_id='release-1.0')
self.ct_test = compose_models.ComposeType.objects.get(name='test')
self.ct_prod = compose_models.ComposeType.objects.get(name='production')
self.ct_nightly = compose_models.ComposeType.objects.get(name='nightly')
self.untested = compose_models.ComposeAcceptanceTestingState.objects.get(name='untested')
def test_compare_by_date(self):
compose_models.Compose.objects.create(compose_respin=0,
compose_date='2015-02-10',
compose_type=self.ct_test,
compose_id='compose-1',
release=self.release,
acceptance_testing=self.untested)
compose_models.Compose.objects.create(compose_respin=0,
compose_date='2015-02-09',
compose_type=self.ct_test,
compose_id='compose-2',
release=self.release,
acceptance_testing=self.untested)
latest = self.release.get_latest_compose()
self.assertEqual(latest.compose_id, 'compose-1')
def test_compare_by_date_then_respin(self):
compose_models.Compose.objects.create(compose_respin=1,
compose_date='2015-02-09',
compose_type=self.ct_test,
compose_id='compose-1',
release=self.release,
acceptance_testing=self.untested)
compose_models.Compose.objects.create(compose_respin=0,
compose_date='2015-02-09',
compose_type=self.ct_test,
compose_id='compose-2',
release=self.release,
acceptance_testing=self.untested)
latest = self.release.get_latest_compose()
self.assertEqual(latest.compose_id, 'compose-1')
def test_compare_by_date_then_respin_then_compose_type(self):
compose_models.Compose.objects.create(compose_respin=0,
compose_date='2015-02-09',
compose_type=self.ct_prod,
compose_id='compose-1',
release=self.release,
acceptance_testing=self.untested)
compose_models.Compose.objects.create(compose_respin=0,
compose_date='2015-02-09',
compose_type=self.ct_test,
compose_id='compose-2',
release=self.release,
acceptance_testing=self.untested)
latest = self.release.get_latest_compose()
self.assertEqual(latest.compose_id, 'compose-1')
def test_exclude_deleted(self):
compose_models.Compose.objects.create(compose_respin=0,
compose_date='2015-02-09',
compose_type=self.ct_prod,
compose_id='compose-1',
release=self.release,
acceptance_testing=self.untested,
deleted=True)
compose_models.Compose.objects.create(compose_respin=0,
compose_date='2015-02-09',
compose_type=self.ct_test,
compose_id='compose-2',
release=self.release,
acceptance_testing=self.untested)
latest = self.release.get_latest_compose()
self.assertEqual(latest.compose_id, 'compose-2')
class ReleaseComposeLinkingTestCase(APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/compose_release_linking.json",
]
def test_linking_visible_in_rest(self):
response = self.client.get(reverse('release-detail', args=['product-1.0-eus']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data.get('compose_set'),
['compose-1'])
response = self.client.get(reverse('release-detail', args=['product-1.0']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data.get('compose_set'),
['compose-1', 'compose-2'])
response = self.client.get(reverse('release-detail', args=['product-1.0-updates']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data.get('compose_set'),
['compose-1', 'compose-2'])
def test_deleted_compose_does_not_show_up(self):
c = compose_models.Compose.objects.get(compose_id='compose-1')
c.deleted = True
c.save()
response = self.client.get(reverse('release-detail', args=['product-1.0-eus']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data.get('compose_set'), [])
response = self.client.get(reverse('release-detail', args=['product-1.0-updates']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data.get('compose_set'), ['compose-2'])
def test_linking_visible_in_web_ui(self):
client = Client()
response = client.get('/compose/1/')
self.assertEqual(response.status_code, 200)
self.assertIn('product-1.0', str(response))
self.assertIn('product-1.0-updates', str(response))
self.assertIn('product-1.0-eus', str(response))
response = client.get('/compose/2/')
self.assertEqual(response.status_code, 200)
self.assertIn('product-1.0', str(response))
self.assertIn('product-1.0-updates', str(response))
self.assertNotIn('product-1.0-eus', str(response))
def test_release_rpm_mapping_only_includes_variants_from_release(self):
response = self.client.get(reverse('releaserpmmapping-detail',
args=['product-1.0-updates', 'bash']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictEqual(response.data,
{'compose': 'compose-1',
'mapping': {'Server': {'x86_64': {'bash': ['x86_64']}}}})
def test_release_rpm_mapping_uses_overrides_from_linked_release(self):
response = self.client.get(reverse('releaserpmmapping-detail',
args=['product-1.0-eus', 'bash']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictEqual(response.data,
{'compose': 'compose-1',
'mapping': {'Server': {'x86_64': {'bash': ['x86_64'],
'bash-doc': ['x86_64']},
'src': {'bash': ['x86_64']}},
'Client': {'x86_64': {'bash': ['x86_64']}}}})
class ReleaseImportTestCase(TestCaseWithChangeSetMixin, APITestCase):
def test_import_correct_data(self):
with open('pdc/apps/release/fixtures/tests/composeinfo-0.3.json', 'r') as f:
data = json.loads(f.read())
response = self.client.post(reverse('releaseimportcomposeinfo-list'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get('url'), '/rest_api/v1/releases/tp-1.0/')
self.assertNumChanges([11])
self.assertEqual(models.Product.objects.count(), 2)
self.assertEqual(models.ProductVersion.objects.count(), 2)
self.assertEqual(models.Release.objects.count(), 2)
self.assertEqual(models.Variant.objects.count(), 4)
self.assertEqual(models.VariantArch.objects.count(), 6)
self.assertEqual(models.BaseProduct.objects.count(), 1)
response = self.client.get(reverse('product-detail', args=['tp']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('product_versions'), ['tp-1'])
response = self.client.get(reverse('productversion-detail', args=['tp-1']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('releases'), ['tp-1.0'])
response = self.client.get(reverse('release-detail', args=['tp-1.0']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictEqual(dict(response.data),
{'short': 'tp', 'release_id': 'tp-1.0', 'version': '1.0',
'name': 'Test Product', 'product_version': 'tp-1',
'base_product': None, 'compose_set': [],
'integrated_with': None, 'bugzilla': None,
'active': True, 'release_type': 'ga', 'dist_git': None,
'sigkey': None, 'allow_buildroot_push': False,
'allowed_debuginfo_services': [],
'allowed_push_targets': []})
release = models.Release.objects.get(release_id='tp-1.0')
self.assertItemsEqual(release.trees,
['Client.x86_64', 'Server.x86_64', 'Server.s390x',
'Server.ppc64', 'Server-SAP.x86_64'])
self.assertEqual(release.variant_set.get(variant_uid='Server-SAP').integrated_from.release_id,
'sap-1.0@tp-1')
response = self.client.get(reverse('product-detail', args=['sap']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('product_versions'), ['sap-1'])
response = self.client.get(reverse('productversion-detail', args=['sap-1']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('releases'), ['sap-1.0@tp-1'])
response = self.client.get(reverse('release-detail', args=['sap-1.0@tp-1']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictEqual(dict(response.data),
{'short': 'sap', 'release_id': 'sap-1.0@tp-1', 'version': '1.0',
'name': 'SAP', 'product_version': 'sap-1',
'base_product': 'tp-1', 'compose_set': [],
'integrated_with': 'tp-1.0', 'bugzilla': None,
'active': True, 'release_type': 'ga', 'dist_git': None,
'sigkey': None, 'allow_buildroot_push': False,
'allowed_debuginfo_services': [],
'allowed_push_targets': []})
release = models.Release.objects.get(release_id='sap-1.0@tp-1')
self.assertItemsEqual(release.trees, ['Server-SAP.x86_64'])
self.assertEqual(release.variant_set.get(variant_uid='Server-SAP').integrated_to.release_id,
'tp-1.0')
response = self.client.post(reverse('releaseimportcomposeinfo-list'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get('url'), '/rest_api/v1/releases/tp-1.0/')
def test_import_via_get(self):
data = {'garbage': 'really'}
response = self.client.get(reverse('releaseimportcomposeinfo-list'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_import_garbage(self):
data = {'garbage': 'really'}
response = self.client.post(reverse('releaseimportcomposeinfo-list'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_import_incorrect_layered_product_version_mismatch(self):
with open('pdc/apps/release/fixtures/tests/composeinfo-0.3.json', 'r') as f:
data = json.loads(f.read())
# Import version 1.0
response = self.client.post(reverse('releaseimportcomposeinfo-list'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Bump release version and import again. Note that layered product
# version remained the same.
data['payload']['product']['version'] = '1.1'
response = self.client.post(reverse('releaseimportcomposeinfo-list'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('version mismatch', response.content)
self.assertIn('sap-1.0@tp-1', response.content)
class ReleaseTypeTestCase(TestCaseWithChangeSetMixin, APITestCase):
def test_list(self):
response = self.client.get(reverse("releasetype-list"))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 6)
def test_filter(self):
response = self.client.get(reverse("releasetype-list"), data={"name": "re"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
response = self.client.get(reverse("releasetype-list"), data={"short": "ga"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_multi_value(self):
response = self.client.get(reverse("releasetype-list") + '?short=ga&short=updates')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
class VariantRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/variants_standalone.json",
]
def test_create(self):
args = {
'uid': 'Variant-UID',
'id': 'Variant-ID',
'release': 'release-1.0',
'name': 'Variant',
'type': 'variant',
'arches': ['x86_64']
}
response = self.client.post(reverse('variant-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
expected = args.copy()
expected.update({
'arches': ['x86_64'],
'variant_version': None,
'variant_release': None,
'allowed_push_targets': [],
})
self.assertEqual(response.data, expected)
self.assertNumChanges([1])
self.assertEqual(models.Variant.objects.count(), 4)
self.assertEqual(models.VariantArch.objects.count(), 5)
def test_create_missing_fields(self):
args = {
'release': 'release-1.0',
'name': 'Variant',
'type': 'variant'
}
response = self.client.post(reverse('variant-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
self.assertEqual(models.Variant.objects.count(), 3)
self.assertEqual(models.VariantArch.objects.count(), 4)
def test_create_bad_release(self):
args = {
'uid': 'Variant-UID',
'id': 'Variant-ID',
'release': 'release-2.0',
'name': 'Variant',
'type': 'variant',
'arches': ['x86_64'],
}
response = self.client.post(reverse('variant-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
self.assertEqual(models.Variant.objects.count(), 3)
self.assertEqual(models.VariantArch.objects.count(), 4)
def test_create_bad_variant_type(self):
args = {
'uid': 'Variant-UID',
'id': 'Variant-ID',
'release': 'release-1.0',
'name': 'Variant',
'type': 'bad-type',
'arches': ['x86_64'],
}
response = self.client.post(reverse('variant-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
self.assertEqual(models.Variant.objects.count(), 3)
self.assertEqual(models.VariantArch.objects.count(), 4)
def test_create_duplicit(self):
args = {
'uid': 'Server-UID',
'id': 'Server',
'release': 'release-1.0',
'name': 'Server name',
'type': 'variant',
'arches': ['x86_64'],
}
response = self.client.post(reverse('variant-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
self.assertEqual(models.Variant.objects.count(), 3)
self.assertEqual(models.VariantArch.objects.count(), 4)
def test_create_extra_fields(self):
args = {
'uid': 'Server-UID',
'id': 'Server',
'release': 'release-1.0',
'name': 'Server name',
'type': 'variant',
'arches': ['x86_64'],
'foo': 'bar',
}
response = self.client.post(reverse('variant-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
self.assertEqual(response.data.get('detail'), 'Unknown fields: "foo".')
self.assertEqual(models.Variant.objects.count(), 3)
self.assertEqual(models.VariantArch.objects.count(), 4)
def test_list(self):
response = self.client.get(reverse('variant-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 3)
def test_filter_id(self):
response = self.client.get(reverse('variant-list'), {'id': 'Server'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_uid(self):
response = self.client.get(reverse('variant-list'), {'uid': 'Server-UID'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_name(self):
response = self.client.get(reverse('variant-list'), {'name': 'Server name'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_type(self):
response = self.client.get(reverse('variant-list'), {'type': 'variant'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_release(self):
response = self.client.get(reverse('variant-list'), {'release': 'foo'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_update(self):
args = {
'uid': u'Workstation-UID',
'id': u'Workstation',
'release': u'release-1.0',
'name': u'Workstation variant',
'type': u'variant',
'arches': ['ppc64', 'x86_64'],
'variant_version': None,
'variant_release': None,
'allowed_push_targets': [],
}
response = self.client.put(reverse('variant-detail', args=['release-1.0/Server-UID']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data.pop('arches'), args.pop('arches'))
self.assertDictEqual(dict(response.data), args)
self.assertNumChanges([1])
def test_update_missing_field(self):
args = {
'uid': 'Workstation-UID',
'release': 'release-1.0',
'name': 'Workstation variant',
'type': 'variant',
'arches': ['ppc64', 'x86_64'],
}
response = self.client.put(reverse('variant-detail', args=['release-1.0/Server-UID']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_update_bad_release(self):
args = {
'uid': 'Workstation-UID',
'id': 'Workstation',
'release': 'release-1.0',
'name': 'Workstation variant',
'type': 'variant',
'arches': ['ppc64', 'x86_64'],
}
response = self.client.put(reverse('variant-detail', args=['release-2.0/foo']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([])
def test_patch(self):
args = {
'name': 'Workstation variant',
}
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['name'], args['name'])
self.assertNumChanges([1])
def test_patch_bad_variant_type(self):
args = {
'type': 'whatever',
}
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_patch_change_arches(self):
args = {'arches': ['ia64']}
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['arches'], ['ia64'])
self.assertNumChanges([1])
def test_patch_add_arches(self):
args = {'add_arches': ['ia64']}
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['arches'], ['ia64', 'ppc64', 'x86_64'])
self.assertNumChanges([1])
self.assertEqual(models.VariantArch.objects.count(), 5)
def test_patch_add_duplicit_arches(self):
args = {'add_arches': ['x86_64']}
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
self.assertEqual(models.VariantArch.objects.count(), 4)
def test_patch_remove_arches(self):
args = {'remove_arches': ['ppc64']}
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['arches'], ['x86_64'])
self.assertNumChanges([1])
self.assertEqual(models.VariantArch.objects.count(), 3)
def test_patch_add_and_remove_arches(self):
args = {'remove_arches': ['ppc64'], 'add_arches': ['ia64']}
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['arches'], ['ia64', 'x86_64'])
self.assertNumChanges([1])
self.assertEqual(models.VariantArch.objects.count(), 4)
def test_patch_can_not_set_and_add_or_remove_arches(self):
args = {'arches': ['ppc64'], 'add_arches': ['ia64']}
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
self.assertEqual(models.VariantArch.objects.count(), 4)
def test_patch_bad_arch_value(self):
args = {'arches': ['foo']}
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
args = {'arches': [{'this': ['is', 'not', 'a', 'string']}]}
response = self.client.patch(reverse('variant-detail', args=['release-1.0/Server-UID']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_retrieve(self):
response = self.client.get(reverse('variant-detail', args=['release-1.0/Server-UID']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected = {
'uid': 'Server-UID',
'id': 'Server',
'release': 'release-1.0',
'name': 'Server name',
'type': 'variant',
'variant_version': None,
'variant_release': None,
'allowed_push_targets': [],
}
self.assertItemsEqual(response.data.pop('arches'), ['x86_64', 'ppc64'])
self.assertDictEqual(dict(response.data), expected)
def test_retrieve_non_existing(self):
response = self.client.get(reverse('variant-detail', args=['release-1.0/foo']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.get(reverse('variant-detail', args=['release-2.0/foo']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# It is impossible to construct following URL directly by reverse
response = self.client.get(reverse('variant-list') + 'abc-def')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete(self):
response = self.client.delete(reverse('variant-detail', args=['release-1.0/Server-UID']))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(models.Variant.objects.count(), 2)
self.assertNumChanges([1])
def test_delete_non_existing(self):
response = self.client.delete(reverse('variant-detail', args=['release-1.0/foo']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.delete(reverse('variant-detail', args=['release-2.0/foo']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_bulk_delete(self):
response = self.client.delete(reverse('variant-list'),
['release-1.0/Client-UID', 'release-1.0/Server-UID'],
format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(models.Variant.objects.count(), 1)
self.assertNumChanges([2])
def test_bulk_delete_bad_identifier(self):
response = self.client.delete(reverse('variant-list'),
['/release-1.0/Client-UID'],
format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data,
{'detail': 'Not found.',
'id_of_invalid_data': '/release-1.0/Client-UID'})
self.assertEqual(models.Variant.objects.count(), 3)
self.assertNumChanges([])
def test_bulk_partial_update_empty_data(self):
response = self.client.patch(reverse('variant-list'),
{'release-1.0/Server-UID': {}},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data,
{'detail': 'Partial update with no changes does not make much sense.',
'id_of_invalid_data': 'release-1.0/Server-UID'})
self.assertNumChanges([])
class CPERESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/cpes.json",
]
def test_list_cpe(self):
response = self.client.get(reverse('cpe-list'), format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
self.assertEqual(models.CPE.objects.count(), 2)
def test_get_cpe(self):
response = self.client.get(reverse('cpe-detail', args=[1]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
del response.data['id']
self.assertEqual(response.data, {"cpe": "cpe:test1", "description": "CPE Test 1"})
def test_filter_cpe(self):
name = "cpe:test1"
response = self.client.get(reverse('cpe-list'), {"cpe": name}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data['results'][0]['cpe'], name)
def test_add_cpe(self):
args = {"cpe": "cpe:test-new", "description": "Test New"}
response = self.client.post(reverse('cpe-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
del response.data['id']
self.assertEqual(args, response.data)
self.assertNumChanges([1])
def test_add_cpe_without_description(self):
args = {"cpe": "cpe:test-new"}
response = self.client.post(reverse('cpe-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
del response.data['id']
args['description'] = ''
self.assertEqual(args, response.data)
self.assertNumChanges([1])
def test_add_bad_cpe(self):
response = self.client.post(reverse('cpe-list'), {"cpe": "not-cpe:"}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('cpe', {}).get('detail'), 'CPE must start with "cpe:"')
def test_add_duplicate(self):
response = self.client.post(reverse('cpe-list'), {"cpe": "cpe:test1"}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'detail': ['CPE "cpe:test1" already exists.']})
self.assertNumChanges([])
def test_delete_cpe(self):
response = self.client.delete(reverse('cpe-detail', args=[1]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(models.CPE.objects.count(), 1)
self.assertNumChanges([1])
class VariantCPERESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/variants_standalone.json",
"pdc/apps/release/fixtures/tests/cpes.json",
"pdc/apps/release/fixtures/tests/variant_cpes.json",
]
def test_list(self):
response = self.client.get(reverse('variantcpe-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_detail(self):
data = {
'id': 1,
'release': 'release-1.0',
'variant_uid': 'Client-UID',
'cpe': 'cpe:test1',
}
response = self.client.get(reverse('variantcpe-detail', args=[1]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, data)
def test_filter(self):
response = self.client.get(reverse('variantcpe-list'), {'release': 'release-1.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertEqual(response.data.get('results')[0].get('release'), 'release-1.0')
response = self.client.get(reverse('variantcpe-list'), {'variant_uid': 'Client-UID'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertEqual(response.data.get('results')[0].get('variant_uid'), 'Client-UID')
response = self.client.get(reverse('variantcpe-list'), {'cpe': 'cpe:test1'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertEqual(response.data.get('results')[0].get('cpe'), 'cpe:test1')
def test_add_cpe(self):
args = {
'release': 'release-1.0',
'variant_uid': 'Server-UID',
'cpe': 'cpe:test1',
}
response = self.client.post(reverse('variantcpe-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
del response.data['id']
self.assertEqual(args, response.data)
self.assertNumChanges([1])
# Add duplicate
response = self.client.post(reverse('variantcpe-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data,
{'detail': ['CPE(cpe:test1) binding for variant "release-1.0/Server-UID" already exists.']})
self.assertNumChanges([1])
# Add another cpe with same variant
args['cpe'] = 'cpe:test2'
response = self.client.post(reverse('variantcpe-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
del response.data['id']
self.assertEqual(args, response.data)
self.assertNumChanges([1, 1])
def test_missing_cpe(self):
args = {
'release': 'release-1.0',
'variant_uid': 'Server-UID',
'cpe': 'cpe:test99',
}
response = self.client.post(reverse('variantcpe-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'cpe "cpe:test99" does not exist')
def test_add_cpe_and_remove_variant(self):
args = {
'release': 'release-1.0',
'variant_uid': 'Server-UID',
'cpe': 'cpe:test2',
}
response = self.client.post(reverse('variantcpe-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.VariantCPE.objects.count(), 2)
response = self.client.delete(reverse('variant-detail', args=['release-1.0/Server-UID']))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(models.VariantCPE.objects.count(), 1)
def test_prevent_delete_used_cpe(self):
response = self.client.delete(reverse('cpe-detail', args=[1]))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(models.VariantCPE.objects.count(), 1)
self.assertEqual(models.CPE.objects.count(), 2)
self.assertNumChanges([])
def test_patch_release(self):
url = reverse('variantcpe-detail', args=[1])
args = {'release': 'release2-1.0'}
response = self.client.patch(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
args = {
'id': 1,
'release': 'release2-1.0',
'variant_uid': 'Client-UID',
'cpe': 'cpe:test1',
}
self.assertEqual(args, response.data)
self.assertNumChanges([1])
def test_patch_variant_uid(self):
url = reverse('variantcpe-detail', args=[1])
args = {'variant_uid': 'Server-UID'}
response = self.client.patch(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
args = {
'id': 1,
'release': 'release-1.0',
'variant_uid': 'Server-UID',
'cpe': 'cpe:test1',
}
self.assertEqual(args, response.data)
self.assertNumChanges([1])
def test_patch_non_existing_release(self):
url = reverse('variantcpe-detail', args=[1])
args = {'release': 'bad_release'}
response = self.client.patch(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data,
{'detail': 'variant (release=bad_release, uid=Client-UID) does not exist'})
self.assertNumChanges([])
def test_patch_non_existing_variant(self):
url = reverse('variantcpe-detail', args=[1])
args = {'variant_uid': 'BAD-UID'}
response = self.client.patch(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data,
{'detail': 'variant (release=release-1.0, uid=BAD-UID) does not exist'})
self.assertNumChanges([])
class ReleaseGroupRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/release_group_types.json",
"pdc/apps/release/fixtures/tests/release_groups.json",
"pdc/apps/release/fixtures/tests/release.json"
]
def test_list(self):
url = reverse("releasegroups-list")
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
def test_retrieve_with_name(self):
response = self.client.get(reverse("releasegroups-detail", args=["rhel_test"]))
expect_result = {'active': True, 'type': u'Async',
'name': u'rhel_test', 'releases': [u'release-1.0'],
'description': u'test'}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, expect_result)
def test_override_ordering_by_description_key(self):
response = self.client.get(reverse("releasegroups-list"), format='json')
expect_result = {'active': True, 'type': u'Async',
'name': u'rhel_test', 'releases': [u'release-1.0'],
'description': u'test'}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data.get('results')[0], expect_result)
response1 = self.client.get(reverse("releasegroups-list"), {'ordering': 'description'},
format='json')
expect_result1 = {'active': True, 'type': u'QuarterlyUpdate',
'name': u'rhel_test1', 'releases': [u'release-1.0'],
'description': u'good'}
self.assertEqual(response1.status_code, status.HTTP_200_OK)
self.assertEquals(response1.data.get('results')[0], expect_result1)
def test_override_ordering_with_both_character(self):
response = self.client.get(reverse("releasegroups-list"), format='json')
expect_result = {'active': True, 'type': u'Async',
'name': u'rhel_test', 'releases': [u'release-1.0'],
'description': u'test'}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data.get('results')[0], expect_result)
url = reverse("releasegroups-list")
response = self.client.get(url + '?ordering=type,-description')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data.get('results')[0], expect_result)
response = self.client.get(url + '?ordering=description,-type')
expect_result = {'active': True, 'type': u'QuarterlyUpdate',
'name': u'rhel_test1', 'releases': [u'release-1.0'],
'description': u'good'}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data.get('results')[0], expect_result)
def test_retrieve_with_description_para(self):
response = self.client.get(reverse("releasegroups-detail", args=["rhel_test"]),
args={'description': 'good'}, format='json')
expect_result = {'active': True, 'type': u'Async',
'name': u'rhel_test', 'releases': [u'release-1.0'],
'description': u'test'}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, expect_result)
def test_create(self):
args = {'type': 'Zstream', 'name': 'test', 'description': 'test_create',
'releases': [u'release-1.0']}
url = reverse("releasegroups-list")
response = self.client.post(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([1])
def test_create_without_name(self):
args = {'type': 'Zstream', 'description': 'test_create',
'releases': [u'release-1.0']}
url = reverse("releasegroups-list")
response = self.client.post(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_with_duplicate_name(self):
args = {'type': 'Zstream', 'name': 'test', 'description': 'test_create',
'releases': [u'release-1.0']}
url = reverse("releasegroups-list")
response = self.client.post(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
args = {'type': 'QuarterlyUpdate', 'name': 'test', 'description': 'test',
'releases': [u'release-1.0']}
url = reverse("releasegroups-list")
response = self.client.post(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_with_error_type(self):
args = {'type': 'stream', 'name': 'test', 'description': 'test_create',
'releases': ['release-1.0']}
url = reverse("releasegroups-list")
response = self.client.post(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_without_type(self):
args = {'name': 'test', 'description': 'test_create',
'releases': [u'release-1.0']}
url = reverse("releasegroups-list")
response = self.client.post(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_without_description(self):
args = {'type': 'Zstream', 'name': 'test', 'releases': [u'release-1.0']}
url = reverse("releasegroups-list")
response = self.client.post(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_with_error_key(self):
args = {'Error_key': 'test', 'type': 'Zstream', 'name': 'test', 'description': 'test_create',
'releases': [u'release-1.0']}
url = reverse("releasegroups-list")
response = self.client.post(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_bulk_create(self):
args1 = {'type': 'Zstream', 'name': 'test_bulk1', 'description': 'test1',
'releases': [u'release-1.0']}
args2 = {'type': 'Zstream', 'name': 'test_bulk2', 'description': 'test2',
'releases': [u'release-1.0']}
args = [args1, args2]
url = reverse("releasegroups-list")
response = self.client.post(url, args, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([2])
def test_update(self):
args = {'type': 'QuarterlyUpdate', 'name': 'test_update', 'description': 'good',
'releases': [u'release-1.0']}
response = self.client.put(reverse("releasegroups-detail", args=['rhel_test']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
def test_bulk_update(self):
args1 = {'type': 'Zstream', 'name': 'test_update1', 'description': 'test1'}
args2 = {'type': 'Zstream', 'name': 'test_update2', 'description': 'test2'}
data = {'rhel_test': args1, 'rhel_test1': args2}
response = self.client.put(reverse("releasegroups-list"), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([2])
def test_update_without_type(self):
self.test_create()
args = {'name': 'test_update', 'description': 'good',
'releases': [u'release-1.0']}
response = self.client.put(reverse("releasegroups-detail", args=['test']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_without_name(self):
self.test_create()
args = {'type': 'QuarterlyUpdate', 'description': 'good',
'releases': [u'release-1.0']}
response = self.client.put(reverse("releasegroups-detail", args=['test']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_without_description(self):
self.test_create()
args = {'type': 'QuarterlyUpdate', 'name': 'test_update',
'releases': [u'release-1.0']}
response = self.client.put(reverse("releasegroups-detail", args=['test']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_with_error_release(self):
args = {'type': 'QuarterlyUpdate', 'name': 'test_update', 'description': 'good',
'releases': [u'release']}
response = self.client.put(reverse("releasegroups-detail", args=['rhel_test']),
args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete(self):
response = self.client.delete(reverse('releasegroups-detail', args=['rhel_test']))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(models.ReleaseGroup.objects.count(), 1)
self.assertNumChanges([1])
def test_bulk_delete(self):
response = self.client.delete(reverse('releasegroups-list'),
['rhel_test', 'rhel_test1'], format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(models.ReleaseGroup.objects.count(), 0)
self.assertNumChanges([2])
class ReleaseLastModifiedResponseTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/product.json",
"pdc/apps/release/fixtures/tests/base_product.json",
"pdc/apps/release/fixtures/tests/product_version.json",
"pdc/apps/bindings/fixtures/tests/releasedistgitmapping.json"
]
def _get_last_modified_epoch(self, response):
time_str = response.get('Last-Modified')
temp_time = time.strptime(time_str, "%a, %d %b %Y %H:%M:%S %Z")
return int(time.mktime(temp_time))
def test_after_create_last_modified_time_should_change(self):
response = self.client.get(reverse('release-list') + '?active=True')
before_time = self._get_last_modified_epoch(response)
time.sleep(2)
args = {"name": "Fedora", "short": "f", "version": '20', "release_type": "ga"}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
response = self.client.get(reverse('release-list') + '?active=True')
after_time = self._get_last_modified_epoch(response)
self.assertGreaterEqual(after_time - before_time, 2)
response = self.client.get(reverse('release-detail', args=['release-1.0']))
before_time = self._get_last_modified_epoch(response)
time.sleep(3)
args = {"name": "Fedora", "short": "f", "version": '21', "release_type": "ga"}
response = self.client.post(reverse('release-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
response = self.client.get(reverse('release-detail', args=['release-1.0']))
after_time = self._get_last_modified_epoch(response)
self.assertGreaterEqual(after_time - before_time, 3)
def test_after_update_last_modified_time_should_change(self):
response = self.client.get(reverse('release-list') + '?active=True')
before_time = self._get_last_modified_epoch(response)
time.sleep(2)
args = {"active": False}
response = self.client.patch(reverse('release-detail',
kwargs={'release_id': 'release-1.0'}), args, format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
url = reverse('release-list') + '?active=True'
response = self.client.get(url)
after_time = self._get_last_modified_epoch(response)
self.assertGreaterEqual(after_time - before_time, 2)
response = self.client.get(reverse('release-detail', args=['release-1.0']))
before_time = self._get_last_modified_epoch(response)
time.sleep(3)
args = {"name": 'test_name'}
response = self.client.patch(reverse('release-detail',
kwargs={'release_id': 'release-1.0'}), args, format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
response = self.client.get(reverse('release-detail', args=['release-1.0']))
after_time = self._get_last_modified_epoch(response)
self.assertGreaterEqual(after_time - before_time, 3)
class ProductLastModifiedResponseTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/release/fixtures/tests/product.json",
"pdc/apps/release/fixtures/tests/base_product.json",
"pdc/apps/release/fixtures/tests/product_version.json"
]
def _get_last_modified_epoch(self, response):
time_str = response.get('Last-Modified')
temp_time = time.strptime(time_str, "%a, %d %b %Y %H:%M:%S %Z")
return int(time.mktime(temp_time))
def test_after_create_last_modified_time_should_change(self):
response = self.client.get(reverse('product-list'))
before_time = self._get_last_modified_epoch(response)
time.sleep(2)
args = {"name": "Fedora", "short": "f"}
response = self.client.post(reverse('product-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
response = self.client.get(reverse('product-list'))
after_time = self._get_last_modified_epoch(response)
self.assertGreaterEqual(after_time - before_time, 2)
def test_after_update_last_modified_time_should_change(self):
response = self.client.get(reverse('product-list'))
before_time = self._get_last_modified_epoch(response)
time.sleep(2)
self.client.patch(reverse('product-detail', args=['product']), {'name': 'changed_name'}, format='json')
response = self.client.get(reverse('product-list'))
after_time = self._get_last_modified_epoch(response)
self.assertGreaterEqual(after_time - before_time, 2)
def test_change_product_verion_modified_time_should_change(self):
response = self.client.get(reverse('product-list'))
before_time = self._get_last_modified_epoch(response)
time.sleep(3)
# add one to product's product version
args = {"name": "Our Awesome Product", "short": "product",
"version": "2", "product": "product"}
response = self.client.post(reverse('productversion-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
response = self.client.get(reverse('product-list'))
after_time = self._get_last_modified_epoch(response)
self.assertGreaterEqual(after_time - before_time, 3)
class AllowedPushTargetsRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/repository/fixtures/tests/push_target.json",
"pdc/apps/release/fixtures/tests/allowed_push_targets.json",
]
def test_filter_product_by_push_target(self):
for push_target in ['rhn-live', 'rhn-stage', 'rhn-qa']:
response = self.client.get(reverse('product-list'), {'allowed_push_targets': push_target})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertIn(push_target, response.data.get('results')[0].get('allowed_push_targets'))
def test_filter_product_version_by_push_target(self):
push_target = 'rhn-qa'
response = self.client.get(reverse('productversion-list'), {'allowed_push_targets': push_target})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertIn(push_target, response.data.get('results')[0].get('allowed_push_targets'))
push_target = 'rhn-live'
response = self.client.get(reverse('productversion-list'), {'allowed_push_targets': push_target})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
self.assertIn(push_target, response.data.get('results')[0].get('allowed_push_targets'))
self.assertIn(push_target, response.data.get('results')[1].get('allowed_push_targets'))
def test_filter_release_by_push_target(self):
push_target = 'rhn-qa'
response = self.client.get(reverse('release-list'), {'allowed_push_targets': push_target})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertIn(push_target, response.data.get('results')[0].get('allowed_push_targets'))
push_target = 'rhn-live'
response = self.client.get(reverse('release-list'), {'allowed_push_targets': push_target})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
self.assertIn(push_target, response.data.get('results')[0].get('allowed_push_targets'))
self.assertIn(push_target, response.data.get('results')[1].get('allowed_push_targets'))
def test_filter_variant_by_push_target(self):
push_target = 'rhn-qa'
response = self.client.get(reverse('variant-list'), {'allowed_push_targets': push_target})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertIn(push_target, response.data.get('results')[0].get('allowed_push_targets'))
push_target = 'rhn-live'
response = self.client.get(reverse('variant-list'), {'allowed_push_targets': push_target})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertIn(push_target, response.data.get('results')[0].get('allowed_push_targets'))
def test_create_product_with_valid_push_targets(self):
allowed_push_targets = ['rhn-live', 'rhn-stage', 'rhn-qa']
args = {'name': 'Fedora', 'short': 'f', 'allowed_push_targets': allowed_push_targets}
response = self.client.post(reverse('product-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(set(allowed_push_targets), set(response.data.get('allowed_push_targets')))
self.assertNumChanges([1])
def test_create_product_with_non_existing_push_targets(self):
args = {'name': 'Fedora', 'short': 'f', 'allowed_push_targets': ['rhn-test']}
response = self.client.post(reverse('product-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(
response.data.get('allowed_push_targets'), ['Object with name=rhn-test does not exist.'])
self.assertNumChanges([])
def test_get_product_version_inherited_allowed_push_targets(self):
response = self.client.get(reverse('productversion-detail', args=['product-1']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual({'rhn-live', 'rhn-stage'}, set(response.data.get('allowed_push_targets')))
def test_get_release_inherited_allowed_push_targets(self):
response = self.client.get(reverse('release-detail', args=['product-1.0']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual({'rhn-live', 'rhn-stage'}, set(response.data.get('allowed_push_targets')))
def test_get_variant_inherited_allowed_push_targets(self):
response = self.client.get(reverse('variant-detail', args=['product-1.0/Server']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual({'rhn-live', 'rhn-stage'}, set(response.data.get('allowed_push_targets')))
def test_update_product_propagates_allowed_push_targets(self):
allowed_push_targets = {'rhn-live', 'rhn-stage', 'rhn-qa'}
response = self.client.patch(
reverse('product-detail', args=['product']), {'allowed_push_targets': list(allowed_push_targets)})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(allowed_push_targets, set(response.data.get('allowed_push_targets')))
self.assertNumChanges([1])
response = self.client.get(reverse('productversion-detail', args=['product-1']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(allowed_push_targets, set(response.data.get('allowed_push_targets')))
response = self.client.get(reverse('release-detail', args=['product-1.0']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(allowed_push_targets, set(response.data.get('allowed_push_targets')))
response = self.client.get(reverse('variant-detail', args=['product-1.0/Server']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(allowed_push_targets, set(response.data.get('allowed_push_targets')))
def test_update_product_version_propagates_allowed_push_targets(self):
old_allowed_push_targets = {'rhn-live', 'rhn-stage'}
allowed_push_targets = {'rhn-live'}
response = self.client.patch(
reverse('productversion-detail', args=['product-1']), {'allowed_push_targets': list(allowed_push_targets)})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(allowed_push_targets, set(response.data.get('allowed_push_targets')))
self.assertNumChanges([1])
response = self.client.get(reverse('product-detail', args=['product']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(old_allowed_push_targets, set(response.data.get('allowed_push_targets')))
response = self.client.get(reverse('release-detail', args=['product-1.0']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(allowed_push_targets, set(response.data.get('allowed_push_targets')))
response = self.client.get(reverse('variant-detail', args=['product-1.0/Server']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(allowed_push_targets, set(response.data.get('allowed_push_targets')))
def test_update_release_propagates_allowed_push_targets(self):
old_allowed_push_targets = {'rhn-live', 'rhn-stage'}
allowed_push_targets = {'rhn-live'}
response = self.client.patch(
reverse('release-detail', args=['product-1.0']), {'allowed_push_targets': list(allowed_push_targets)})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(allowed_push_targets, set(response.data.get('allowed_push_targets')))
self.assertNumChanges([1])
response = self.client.get(reverse('product-detail', args=['product']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(old_allowed_push_targets, set(response.data.get('allowed_push_targets')))
response = self.client.get(reverse('productversion-detail', args=['product-1']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(old_allowed_push_targets, set(response.data.get('allowed_push_targets')))
response = self.client.get(reverse('variant-detail', args=['product-1.0/Server']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(allowed_push_targets, set(response.data.get('allowed_push_targets')))
def test_create_product_version_with_valid_allowed_push_targets(self):
allowed_push_targets = ['rhn-live']
args = {'name': 'Our Awesome Product', 'short': 'product',
'version': '3', 'product': 'product',
'allowed_push_targets': allowed_push_targets}
response = self.client.post(reverse('productversion-list'), args)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(response.data.get('allowed_push_targets'), allowed_push_targets)
self.assertNumChanges([1])
def test_create_product_version_with_not_allowed_push_targets(self):
args = {'name': 'Our Awesome Product', 'short': 'product',
'version': '3', 'product': 'product',
'allowed_push_targets': ['rhn-qa']}
response = self.client.post(reverse('productversion-list'), args)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(response.data.get('detail'), ["Push targets must be allowed in parent product: [u'rhn-qa']"])
self.assertNumChanges([])
def test_patch_bad_variant_allowed_push_targets(self):
args = {'allowed_push_targets': ['rhn-live', 'rhn-qa']}
response = self.client.patch(reverse('variant-detail', args=['product-1.0/Server']), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), ["Push targets must be allowed in parent release: [u'rhn-qa']"])
self.assertNumChanges([])
| mit | e130bd544f2173479d2acbdce94449d5 | 51.001868 | 119 | 0.596765 | 3.873111 | false | true | false | false |
product-definition-center/product-definition-center | pdc/apps/repository/views.py | 2 | 13569 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
from rest_framework import mixins, viewsets, status
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from . import models
from . import serializers
from . import filters
from pdc.apps.auth.permissions import APIPermission
from pdc.apps.common.constants import PUT_OPTIONAL_PARAM_WARNING
from pdc.apps.common.viewsets import (StrictQueryParamMixin,
PDCModelViewSet)
from pdc.apps.release.models import Release
from pdc.apps.common import hacks
from pdc.apps.common.serializers import StrictSerializerMixin
class RepoViewSet(PDCModelViewSet):
"""
An API endpoint providing access to content delivery repositories.
"""
queryset = models.Repo.objects.all().select_related().order_by('id')
serializer_class = serializers.RepoSerializer
filter_class = filters.RepoFilter
docstring_macros = PUT_OPTIONAL_PARAM_WARNING
doc_create = """
__Method__: `POST`
__URL__: $LINK:contentdeliveryrepos-list$
__Data__:
%(WRITABLE_SERIALIZER)s
* *content_category*: $LINK:contentdeliverycontentcategory-list$
* *content_format*: $LINK:contentdeliverycontentformat-list$
* *repo_family*: $LINK:contentdeliveryrepofamily-list$
* *service*: $LINK:contentdeliveryservice-list$
__Response__: Same as input data.
"""
doc_retrieve = """
__Method__: `GET`
__URL__: $LINK:contentdeliveryrepos-detail:id$
__Response__:
%(SERIALIZER)s
"""
doc_list = """
__Method__: `GET`
__URL__: $LINK:contentdeliveryrepos-list$
__Query params__:
%(FILTERS)s
__Response__:
%(SERIALIZER)s
"""
doc_update = """
%(PUT_OPTIONAL_PARAM_WARNING)s
__Method__: `PUT`, `PATCH`
__URL__: $LINK:contentdeliveryrepos-detail:id$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
doc_destroy = """
__Method__: `DELETE`
__URL__: $LINK:contentdeliveryrepos-detail:id$
"""
class RepoCloneViewSet(StrictQueryParamMixin, viewsets.GenericViewSet):
"""
Please access this endpoint by $LINK:cdreposclone-list$.
Endpoint $LINK:repoclone-list$ is deprecated.
"""
queryset = models.Repo.objects.none() # Required for permissions
permission_classes = (APIPermission,)
def create(self, request):
"""
Clone all content delivery repositories from one release under another release.
The call is atomic, i.e. either all content delivery repositories are cloned or nothing
is done.
If the source and target releases do not have the same variants, the
cloning will silently ignore content delivery repositories with Variant.Arch that is
present in source release but not in target release. It is not a
problem if the target release has additional variants.
__Method__: `POST`
__URL__: $LINK:cdreposclone-list$
__Data__:
{
"release_id_from": string,
"release_id_to": string
"include_service": [string], # optional
"include_repo_family": [string], # optional
"include_content_format": [string], # optional
"include_content_category": [string], # optional
"include_shadow": bool, # optional
"include_product_id": int # optional
}
The `include_*` keys are used to filter which releases should be
cloned. If any key is omitted, all values for that attribute will be
cloned.
__Response__:
The call returns a list of content delivery repositories created under target release.
[
{
"shadow": bool,
"release_id": string,
"variant_uid": string,
"arch": string,
"service": string,
"repo_family": string,
"content_format": string,
"content_category": string,
"name": string,
"product_id": int
},
...
]
"""
data = request.data
keys = set(['release_id_from', 'release_id_to'])
arg_filter_map = {'include_service': ('service__name__in', hacks.as_list),
'include_repo_family': ('repo_family__name__in', hacks.as_list),
'include_content_format': ('content_format__name__in', hacks.as_list),
'include_content_category': ('content_category__name__in', hacks.as_list),
'include_shadow': ('shadow', hacks.convert_str_to_bool),
'include_product_id': ('product_id', hacks.convert_str_to_int)}
allowed_keys = list(keys) + arg_filter_map.keys()
missing_keys = keys - set(data.keys())
if missing_keys:
errors = dict([(k, ['This field is required.']) for k in missing_keys])
return Response(status=status.HTTP_400_BAD_REQUEST, data=errors)
extra_keys = set(data.keys()) - set(allowed_keys)
StrictSerializerMixin.maybe_raise_error(extra_keys)
get_object_or_404(Release, release_id=data['release_id_from'])
target_release = get_object_or_404(Release, release_id=data['release_id_to'])
kwargs = {
'variant_arch__variant__release__release_id': data['release_id_from']
}
for arg, (filter, transform) in arg_filter_map.iteritems():
arg_data = request.data.get(arg)
if arg_data:
kwargs[filter] = transform(arg_data, name=arg)
repos = models.Repo.objects.filter(**kwargs)
# Skip repos from nonexisting trees.
repos_in_target_release = [repo for repo in repos if repo.tree in target_release.trees]
if not repos or not repos_in_target_release:
return Response(status=status.HTTP_400_BAD_REQUEST,
data={'detail': 'No repos to clone.'})
serializer = serializers.RepoSerializer(repos_in_target_release, many=True)
copy = serializer.data
for repo in copy:
# The serializer will reject read-only fields, so we need to drop the id.
del repo['id']
repo['release_id'] = target_release.release_id
new_repos = serializers.RepoSerializer(data=copy, many=True)
if not new_repos.is_valid():
return Response(status=status.HTTP_400_BAD_REQUEST,
data={'detail': dict((repo['name'], err)
for repo, err in zip(copy, new_repos.errors)
if err)})
for raw_repo, repo_obj in zip(copy, new_repos.save()):
request.changeset.add('Repo', repo_obj.pk,
'null', json.dumps(raw_repo))
return Response(status=status.HTTP_200_OK, data=new_repos.data)
class RepoFamilyViewSet(StrictQueryParamMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
##Overview##
This page shows the usage of the **ContentDeliveryRepoFamily API**, please see the
following for more details.
##Test tools##
You can use ``curl`` in terminal, with -X _method_ (GET|POST|PUT|PATCH|DELETE),
-d _data_ (a json string). or GUI plugins for
browsers, such as ``RESTClient``, ``RESTConsole``.
"""
queryset = models.RepoFamily.objects.all().order_by('id')
serializer_class = serializers.RepoFamilySerializer
filter_class = filters.RepoFamilyFilter
permission_classes = (APIPermission,)
doc_list = """
__Method__: `GET`
__URL__: $LINK:contentdeliveryrepofamily-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
__Example__:
$ curl "$URL:contentdeliveryrepofamily-list$
{
"count": 3,
"next": null,
"previous": null,
"results": [
{
"name": "dist",
"description": "Production content delivery repositories"
},
{
"name": "beta",
"description": "Beta (pre-production) content delivery repositories"
},
{
"name": "htb",
"description": "Content delivery repositories for High Touch Beta (HTB) customers"
}
]
}
"""
class ContentCategoryViewSet(StrictQueryParamMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
API endpoint that allows content_category to be viewed.
"""
serializer_class = serializers.ContentCategorySerializer
queryset = models.ContentCategory.objects.all().order_by('id')
permission_classes = (APIPermission,)
doc_list = """
__Method__: GET
__URL__: $LINK:contentdeliverycontentcategory-list$
__Response__:
%(SERIALIZER)s
"""
class ContentFormatViewSet(StrictQueryParamMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
API endpoint that allows content_format to be viewed.
"""
serializer_class = serializers.ContentFormatSerializer
queryset = models.ContentFormat.objects.all().order_by('id')
permission_classes = (APIPermission,)
doc_list = """
__Method__: GET
__URL__: $LINK:contentdeliverycontentformat-list$
__Response__:
%(SERIALIZER)s
"""
class ServiceViewSet(StrictQueryParamMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
API endpoint that allows service to be viewed.
"""
serializer_class = serializers.ServiceSerializer
queryset = models.Service.objects.all().order_by('id')
permission_classes = (APIPermission,)
doc_list = """
__Method__: GET
__URL__: $LINK:contentdeliveryservice-list$
__Response__:
%(SERIALIZER)s
"""
class PushTargetViewSet(PDCModelViewSet):
"""
Push targets for products, product versions, releases and release variants.
"""
queryset = models.PushTarget.objects.all()
serializer_class = serializers.PushTargetSerializer
filter_class = filters.PushTargetFilter
permission_classes = (APIPermission,)
doc_create = """
__Method__: POST
__URL__: $LINK:pushtarget-list$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
doc_retrieve = """
__Method__: GET
__URL__: $LINK:pushtarget-detail:instance_pk$
__Response__:
%(SERIALIZER)s
"""
doc_list = """
__Method__: GET
__URL__: $LINK:pushtarget-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
"""
doc_update = """
__Method__: PUT, PATCH
__URL__: $LINK:pushtarget-detail:instance_pk$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
doc_destroy = """
__Method__: `DELETE`
__URL__: $LINK:pushtarget-detail:instance_pk$
__Response__:
On success, HTTP status code is 204 and the response has no content.
"""
class MultiDestinationViewSet(PDCModelViewSet):
"""
Multi-destinations (multi-product) for mapping global component files
from an origin repository to a destination repository.
"""
queryset = models.MultiDestination.objects.all().select_related()
serializer_class = serializers.MultiDestinationSerializer
filter_class = filters.MultiDestinationFilter
permission_classes = (APIPermission,)
doc_create = """
__Method__: POST
__URL__: $LINK:multidestination-list$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
doc_retrieve = """
__Method__: GET
__URL__: $LINK:multidestination-detail:instance_pk$
__Response__:
%(SERIALIZER)s
"""
doc_list = """
__Method__: GET
__URL__: $LINK:multidestination-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
"""
doc_update = """
__Method__: PUT, PATCH
__URL__: $LINK:multidestination-detail:instance_pk$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
doc_destroy = """
__Method__: `DELETE`
__URL__: $LINK:multidestination-detail:instance_pk$
__Response__:
On success, HTTP status code is 204 and the response has no content.
"""
| mit | df32541f4b345ce82af8dab658b70cb8 | 27.26875 | 106 | 0.558626 | 4.339303 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/auth/middleware.py | 2 | 2657 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from django.contrib import auth
from django.conf import settings
from django.contrib.auth import load_backend
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.core.exceptions import ImproperlyConfigured
class RemoteUserMiddleware(RemoteUserMiddleware):
def process_request(self, request):
# Overwrite process_request from auth.middleware because it force
# user logout when REMOTE_USER header is not present which can
# cause problem while deploying with Kerberos authentication when
# we need to enable both anonymous access and kerberos login.
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
if settings.DEBUG and getattr(settings, "DEBUG_USER", None):
request.META[self.header] = settings.DEBUG_USER
try:
username = request.META[self.header]
except KeyError:
# When the page which requires kerberos login was redirected from
# kerberos login entrance, 'REMOTE_USER' header is lost in request
# meta, thus the RemoteUserMiddleware will make it falling into
# redirect loop.
return
# If the user is already authenticated and that user is the user we are
# getting passed in the headers, then the correct user is already
# persisted in the session and we don't need to continue.
if request.user.is_authenticated:
if request.user.get_username() == self.clean_username(username, request):
return
# We are seeing this user for the first time in this session, attempt
# to authenticate the user.
user = auth.authenticate(remote_user=username, request=request)
if user:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
request.session['auth_backend'] = user.backend
backend = load_backend(user.backend)
if getattr(backend, 'save_login', True):
auth.login(request, user)
| mit | 4614226f01be29290554fbc5399fa4f8 | 43.283333 | 85 | 0.659767 | 4.822142 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/contact/views.py | 2 | 13697 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from django.views.decorators.cache import never_cache
from pdc.apps.common import viewsets
from pdc.apps.common.constants import PUT_OPTIONAL_PARAM_WARNING
from .models import (Person, Maillist, ContactRole,
GlobalComponentContact, ReleaseComponentContact)
from .serializers import (PersonSerializer, MaillistSerializer, ContactRoleSerializer,
GlobalComponentContactSerializer, ReleaseComponentContactSerializer)
from .filters import (PersonFilterSet, MaillistFilterSet, ContactRoleFilterSet,
GlobalComponentContactFilter, ReleaseComponentContactFilter)
# Create your views here.
class PersonViewSet(viewsets.PDCModelViewSet):
"""
##Overview##
This page shows the usage of the **Person API**, please see the
following for more details.
##Test tools##
You can use ``curl`` in terminal, with -X _method_ (GET|POST|PUT|PATCH|DELETE),
-d _data_ (a json string). or GUI plugins for
browsers, such as ``RESTClient``, ``RESTConsole``.
"""
doc_create = """
### CREATE
__Method__:
POST
__URL__: $LINK:person-list$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
__Example__:
curl -H "Content-Type: application/json" -X POST -d '{"username": "test", "email": "test@example.com"}' $URL:person-list$
# output
{"id": 1, "username": "test", "email": "test@example.com"}
"""
doc_list = """
### LIST
__Method__:
GET
__URL__: $LINK:person-list$
__Query Params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
"""
doc_retrieve = """
### RETRIEVE
__Method__:
GET
__URL__: $LINK:person-detail:instance_pk$
__Response__:
%(SERIALIZER)s
__Example__:
curl -H "Content-Type: application/json" $URL:person-detail:1$
# output
{"id": 1, "username": "test", "email": "test@example.com"}
"""
doc_update = """
### UPDATE
__Method__: `PUT`, `PATCH`
__URL__: $LINK:person-detail:instance_pk$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
__Example__:
PUT:
curl -X PUT -d '{"username": "new_name", "email": "new_email"}' -H "Content-Type: application/json" $URL:person-detail:1$
# output
{"id": 1, "username": "new_name", "email": "new_email"}
PATCH:
curl -X PATCH -d '{"email": "new_email"}' -H "Content-Type: application/json" $URL:person-detail:1$
# output
{"id": 1, "username": "name", "email": "new_email"}
"""
doc_destroy = """
### DELETE
__Method__:
DELETE
__URL__: $LINK:person-detail:instance_pk$
__Response__:
STATUS: 204 NO CONTENT
__Example__:
curl -X DELETE -H "Content-Type: application/json" $URL:person-detail:1$
"""
serializer_class = PersonSerializer
queryset = Person.objects.all().order_by('id')
filter_class = PersonFilterSet
class MaillistViewSet(viewsets.PDCModelViewSet):
"""
##Overview##
This page shows the usage of the **Mailing list API**, please see the
following for more details.
##Test tools##
You can use ``curl`` in terminal, with -X _method_ (GET|POST|PUT|PATCH|DELETE),
-d _data_ (a json string). or GUI plugins for
browsers, such as ``RESTClient``, ``RESTConsole``.
"""
doc_create = """
### CREATE
__Method__:
POST
__URL__: $LINK:maillist-list$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
__Example__:
curl -H "Content-Type: application/json" -X POST -d '{"mail_name": "test", "email": "test@example.com"}' $URL:maillist-list$
# output
{"id": 1, "mail_name": "test", "email": "test@example.com"}
"""
doc_list = """
### LIST
__Method__:
GET
__URL__: $LINK:maillist-list$
__Query Params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
__Example__:
With query params:
curl -H "Content-Type: application/json" -G $URL:maillist-list$ --data-urlencode "mail_name=test"
# output
{
"count": 1,
"next": null,
"previous": null,
"results": [
{
"id": int,
"mail_name": "test",
"email": "test@example.com"
}
]
}
"""
doc_retrieve = """
### RETRIEVE
__Method__:
GET
__URL__: $LINK:maillist-detail:instance_pk$
__Response__:
%(SERIALIZER)s
__Example__:
curl -H "Content-Type: application/json" $URL:maillist-detail:1$
# output
{"id": 1, "mail_name": "test", "email": "test@example.com"}
"""
doc_update = """
### UPDATE
__Method__: `PUT`, `PATCH`
PUT: for full fields update
{'mail_name': 'new_name', 'email': 'new_email'}
PATCH: for partial update
{'mail_name': 'new_name'}
or
{'email': 'new_email'}
or
{'mail_name': 'new_name', 'email': 'new_email'}
__URL__: $LINK:maillist-detail:instance_pk$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
__Example__:
PUT:
curl -X PUT -d '{"mail_name": "new_name", "email": "new_email"}' -H "Content-Type: application/json" $URL:maillist-detail:1$
# output
{"id": 1, "mail_name": "new_name", "email": "new_email"}
PATCH:
curl -X PATCH -d '{"email": "new_email"}' -H "Content-Type: application/json" $URL:maillist-detail:1$
# output
{"id": 1, "mail_name": "name", "email": "new_email"}
"""
doc_destroy = """
### DELETE
__Method__:
DELETE
__URL__: $LINK:maillist-detail:instance_pk$
__Response__:
STATUS: 204 NO CONTENT
__Example__:
curl -X DELETE -H "Content-Type: application/json" $URL:maillist-detail:1$
"""
serializer_class = MaillistSerializer
queryset = Maillist.objects.all().order_by('id')
filter_class = MaillistFilterSet
class ContactRoleViewSet(viewsets.PDCModelViewSet):
"""
##Overview##
This page shows the usage of the **Contact Role API**, please see the
following for more details.
##Test tools##
You can use ``curl`` in terminal, with -X _method_ (GET|POST|PUT|PATCH|DELETE),
-d _data_ (a json string). or GUI plugins for
browsers, such as ``RESTClient``, ``RESTConsole``.
"""
docstring_macros = PUT_OPTIONAL_PARAM_WARNING
doc_create = """
### CREATE
__Method__:
POST
__URL__: $LINK:contactrole-list$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
__Example__:
curl -H "Content-Type: application/json" -X POST -d '{"name": "test"}' $URL:contactrole-list$
# output
{"name": "test", "count_limit": 1}
"""
doc_list = """
### LIST
__Method__:
GET
__URL__: $LINK:contactrole-list$
__Query Params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
__Example__:
curl -H "Content-Type: application/json" -X GET $URL:contactrole-list$
# output
{
"count": 4,
"next": null,
"previous": null,
"results": [
{
"name": "qe_leader",
"count_limit": 1
},
{
"name": "qe_group",
"count_limit": 1
},
...
]
}
With query params:
curl -H "Content-Type: application/json" -G $URL:contactrole-list$ --data-urlencode "name=test"
# output
{
"count": 1,
"next": null,
"previous": null,
"results": [
{
"name": "test",
"count_limit": 1
}
]
}
"""
doc_retrieve = """
### RETRIEVE
__Method__:
GET
__URL__: $LINK:contactrole-detail:role_name$
__Response__:
%(SERIALIZER)s
__Example__:
curl -H "Content-Type: application/json" $URL:contactrole-detail:QE_Leader$
# output
{"name": "QE_Leader", "count_limit": 1}
"""
doc_update = """
### UPDATE
%(PUT_OPTIONAL_PARAM_WARNING)s
__Method__: `PUT`, `PATCH`
__URL__: $LINK:contactrole-detail:role_name$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
__Example__:
PUT:
curl -X PUT -d '{"name": "new_name"}' -H "Content-Type: application/json" $URL:contactrole-detail:QE_Ack$
# output
{"name": "new_name", "count_limit": 1}
PATCH:
curl -X PATCH -d '{"count_limit": "unlimited"}' -H "Content-Type: application/json" $URL:contactrole-detail:QE_Ack$
# output
{"name": "new_name", "count_limit": "unlimited"}
"""
doc_destroy = """
### DELETE
__Method__:
DELETE
__URL__: $LINK:contactrole-detail:role_name$
__Response__:
STATUS: 204 NO CONTENT
__Example__:
curl -X DELETE -H "Content-Type: application/json" $URL:contactrole-detail:QE_Group$
"""
serializer_class = ContactRoleSerializer
queryset = ContactRole.objects.all().order_by('id')
filter_class = ContactRoleFilterSet
lookup_field = 'name'
overwrite_lookup_field = False
class _BaseContactViewSet(viewsets.PDCModelViewSet):
doc_list = """
__Method__: `GET`
__URL__: $LINK:%(BASENAME)s-list$
__Query params__:
%(FILTERS)s
The value of `contact` filter should either be a username or mailling
list name.
__Response__: a paged list of following objects
%(SERIALIZER)s
"""
doc_retrieve = """
__Method__: `GET`
__URL__: $LINK:%(BASENAME)s-detail:pk$
__Response__:
%(SERIALIZER)s
"""
doc_destroy = """
__Method__: `DELETE`
__URL__: $LINK:%(BASENAME)s-detail:pk$
__Response__: Nothing on success.
"""
doc_update = """
Please note that if you change the `contact` field here, only the single
updated relationship between contact and component will be updated.
Specifically, no other component will be affected.
If you update with new contact details and such contact does not exist
yet, it will be automatically created. The specific type will be chosen
based on whether `username` or `mail_name` was used.
__Method__: `PUT`, `PATCH`
__URL__: $LINK:%(BASENAME)s-detail:pk$
__Data__:
%(WRITABLE_SERIALIZER)s
%(WRITABLE_DATA_COMMENT)s
View [list of available contact roles]($URL:contactrole-list$).
__Response__:
%(SERIALIZER)s
"""
doc_create = """
If the contact does not exist, it will be created automatically.
__Method__: `POST`
__URL__: $LINK:%(BASENAME)s-list$
__Data__:
%(WRITABLE_SERIALIZER)s
%(WRITABLE_DATA_COMMENT)s
Depending on whether `username` or `mail_name` is used, a person or
mailling list will be linked to the component.
View [list of available contact roles]($URL:contactrole-list$).
__Response__:
%(SERIALIZER)s
"""
@never_cache
def list(self, *args, **kwargs):
return super(_BaseContactViewSet, self).list(*args, **kwargs)
@never_cache
def retrieve(self, *args, **kwargs):
return super(_BaseContactViewSet, self).retrieve(*args, **kwargs)
class GlobalComponentContactViewSet(_BaseContactViewSet):
queryset = GlobalComponentContact.objects.all().select_related().order_by('id')
serializer_class = GlobalComponentContactSerializer
filter_class = GlobalComponentContactFilter
docstring_macros = {
'BASENAME': 'globalcomponentcontacts',
'WRITABLE_DATA_COMMENT': '',
}
class ReleaseComponentContactViewSet(_BaseContactViewSet):
queryset = ReleaseComponentContact.objects.all().select_related().order_by('id')
serializer_class = ReleaseComponentContactSerializer
filter_class = ReleaseComponentContactFilter
docstring_macros = {
'BASENAME': 'releasecomponentcontacts',
'WRITABLE_DATA_COMMENT': 'The component can be alternatively specified ' +
'by its id as `{"id": "int"}`.',
}
| mit | d21c2c69cb72bb47e349ffbbc4220325 | 22.656304 | 137 | 0.517486 | 4.04997 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/auth/permissions.py | 3 | 3111 | import re
from restfw_composed_permissions.base import BasePermissionComponent, BaseComposedPermision
from restfw_composed_permissions.generic.components import AllowAll
from django.conf import settings
from pdc.apps.auth.models import Resource, GroupResourcePermission
from pdc.apps.utils.utils import read_permission_for_all
class APIPermissionComponent(BasePermissionComponent):
"""
Allow only anonymous requests.
"""
def has_permission(self, permission, request, view):
if request.user.is_superuser or (hasattr(settings, 'DISABLE_RESOURCE_PERMISSION_CHECK') and
settings.DISABLE_RESOURCE_PERMISSION_CHECK):
return True
api_name = request.path.replace("%s%s/" % (settings.REST_API_URL, settings.REST_API_VERSION), '').strip('/')
internal_permission = self._convert_permission(request.method)
if not internal_permission or (read_permission_for_all() and internal_permission == 'read'):
return True
return self._has_permission(internal_permission, request.user, str(view.__class__), api_name)
def _has_permission(self, internal_permission, user, view, api_name):
resources = Resource.objects.filter(view=view).all()
resource = None
if len(resources) == 1:
resource = resources[0]
elif len(resources) > 1:
# multiple api map to one view
resources = [obj for obj in Resource.objects.filter(view=view, name=api_name).all()]
if len(resources) == 1:
resource = resources[0]
else:
# maybe resouce name is regexp
resource = self._try_regexp_resource_match(api_name, resources)
if not resource:
# not restrict access to resource that is not in permission control
result = True
else:
group_id_list = [group.id for group in user.groups.all()]
result = GroupResourcePermission.objects.filter(
group__id__in=group_id_list, resource_permission__resource=resource,
resource_permission__permission__name=internal_permission).exists()
return result
@staticmethod
def _try_regexp_resource_match(api_name, resources):
result = None
api_str_list = api_name.split('/')
if len(api_str_list) > 1 and resources:
for resource_obj in resources:
if re.match(resource_obj.name, api_name):
result = resource_obj
break
return result
@staticmethod
def _convert_permission(in_method):
conversion_dict = {'patch': 'update',
'put': 'update',
'get': 'read',
'delete': 'delete',
'post': 'create'}
return conversion_dict.get(in_method.lower())
class APIPermission(BaseComposedPermision):
def global_permission_set(self):
return APIPermissionComponent
def object_permission_set(self):
return AllowAll
| mit | 6e168364790d0c5758df1c9929bbaabb | 39.402597 | 116 | 0.614915 | 4.406516 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/osbs/views.py | 2 | 2581 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from rest_framework import mixins
from rest_framework import viewsets
from pdc.apps.common import viewsets as common_viewsets
from pdc.apps.auth.permissions import APIPermission
from . import filters
from . import models
from . import serializers
class OSBSViewSet(common_viewsets.NotificationMixin,
common_viewsets.StrictQueryParamMixin,
common_viewsets.ChangeSetUpdateModelMixin,
mixins.ListModelMixin,
common_viewsets.MultiLookupFieldMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
"""
## Metadata for OpenShift Build Service
This viewset provides a list of all components relevant to OSBS. This
connection is realized through the `has_osbs` flag on [release component
types]($URL:releasecomponenttype-list$). The components appear in this API
automatically when they are assigned the proper type. Records here can only
be changed, they can't be created or deleted.
Currently there is just one flag tracked here:
* `autorebuild`: This flag indicates whether the component should be
automatically rebuilt when its dependencies change. If the value in PDC
is `null`, it indicates that the client should use its default value.
"""
queryset = models.OSBSRecord.objects.filter(component__type__has_osbs=True).order_by('component__id')
serializer_class = serializers.OSBSSerializer
filter_class = filters.OSBSFilter
permission_classes = (APIPermission,)
lookup_fields = (('component__release__release_id', r'[^/]+'),
('component__name', r'[^/]+'))
doc_retrieve = """
__Method__: `GET`
__URL__: $LINK:osbs-detail:release_id}/{component_name$
__Response__:
%(SERIALIZER)s
"""
doc_list = """
__Method__: `GET`
__URL__: $LINK:osbs-list$
__Query params__:
%(FILTERS)s
__Response__:
%(SERIALIZER)s
"""
doc_update = """
__Method__: `PUT`
__URL__: $LINK:osbs-detail:release_id}/{component_name$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
doc_partial_update = """
__Method__: `PATCH`
__URL__: $LINK:osbs-detail:release_id}/{component_name$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
| mit | d1fcb49aed43a88b2e960148dc5b6b6f | 25.885417 | 105 | 0.620302 | 4.123003 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/auth/backends.py | 2 | 4142 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015-2016 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import ldap
from django.conf import settings
from django.contrib.auth.backends import RemoteUserBackend
from django.contrib.auth.models import Group
def get_ldap_groups(l, login):
groups = l.search_s(settings.LDAP_GROUPS_DN,
ldap.SCOPE_SUBTREE,
"(memberUid=%s)" % login,
['cn'])
result = set()
for i in groups:
result.update(i[1]["cn"])
result.discard(login) # remove user group
return sorted(result)
def get_ldap_user(l, login):
user = l.search_s(settings.LDAP_USERS_DN, ldap.SCOPE_SUBTREE, "(uid=%s)" % login)
if not user:
return None
user = user[0]
return {
"login": user[1]["uid"][0],
"full_name": user[1]["givenName"][0] + ' ' + user[1]["sn"][0],
"email": user[1]["mail"][0],
}
def update_user_from_ldap(user, conn=None):
"""
Sync given user with LDAP. Use `conn` as connection if supplied, otherwise
create a new connection and unbind it when syncing is done. Passed
connection is not closed.
"""
if "/" in user.username:
# host principal -> no record in ldap
return
if not getattr(settings, "LDAP_URI", None):
return
try:
ldap_object = conn or ldap.initialize(settings.LDAP_URI)
user_data = get_ldap_user(ldap_object, user.username)
groups = get_ldap_groups(ldap_object, user.username)
finally:
if not conn:
ldap_object.unbind()
if user_data:
user.full_name = user_data["full_name"]
user.email = user_data["email"]
group_ids = set()
for group_name in groups:
group, _ = Group.objects.get_or_create(name=group_name)
group_ids.add(group.id)
user.groups = group_ids
user.save()
def update_user_from_auth_mellon(user, request):
user.full_name = request.META['MELLON_fullname']
user.email = request.META['MELLON_email']
group_ids = set()
for var in request.META:
if var.startswith('MELLON_groups_'):
group_name = request.META[var]
group, _ = Group.objects.get_or_create(name=group_name)
group_ids.add(group.id)
user.groups = group_ids
user.save()
def update_user_from_auth_oidc(user, request):
user.full_name = request.META['OIDC_CLAIM_name']
user.email = request.META['OIDC_CLAIM_email']
group_ids = set()
for group_name in request.META['OIDC_CLAIM_groups'].split(','):
group, _ = Group.objects.get_or_create(name=group_name)
group_ids.add(group.id)
user.groups = group_ids
user.save()
class KerberosUserBackend(RemoteUserBackend):
# TODO:
# * handle inactive users (mark inactive, remove groups)
# * sync daily all users (cron-job?)
def authenticate(self, request, remote_user):
return super(KerberosUserBackend, self).authenticate(request, remote_user)
def clean_username(self, username):
# remove @REALM from username
return username.split('@')[0]
def configure_user(self, user):
"""Fetch user data from LDAP and update the user."""
user = super(KerberosUserBackend, self).configure_user(user)
update_user_from_ldap(user)
user.set_unusable_password()
user.save()
return user
class AuthMellonUserBackend(RemoteUserBackend):
save_login = False
logout_url = '/saml2/logout?ReturnTo='
def authenticate(self, request, remote_user):
user = super(AuthMellonUserBackend, self).authenticate(request, remote_user)
if user:
update_user_from_auth_mellon(user, request)
return user
class AuthOIDCUserBackend(RemoteUserBackend):
save_login = False
logout_url = '/oidc_redirect?logout='
def authenticate(self, request, remote_user):
user = super(AuthOIDCUserBackend, self).authenticate(request, remote_user)
if user:
update_user_from_auth_oidc(user, request)
return user
| mit | 2c076c60597c71379a1f47bb14747826 | 28.375887 | 85 | 0.626026 | 3.579948 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/changeset/views.py | 2 | 6605 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from django.conf import settings
from django.core.exceptions import FieldError, ValidationError
from django.views.generic import ListView, DetailView
from rest_framework import viewsets, status
from rest_framework.response import Response
from pdc.apps.auth.permissions import APIPermission
from pdc.apps.common.viewsets import StrictQueryParamMixin
from . import models
from .filters import ChangesetFilterSet
from .serializers import ChangesetSerializer
class ChangesetListView(ListView):
queryset = models.Changeset.objects.all().order_by('-committed_on')
allow_empty = True
template_name = 'changeset_list.html'
context_object_name = 'changeset_list'
paginate_by = settings.ITEMS_PER_PAGE
class ChangesetDetailView(DetailView):
model = models.Changeset
pk_url_kwarg = "id"
template_name = "changeset_detail.html"
class ChangesetViewSet(StrictQueryParamMixin,
viewsets.ReadOnlyModelViewSet):
"""
PDC tracks every modification that was made through any of the API
end-points. This provides an auditable trail of who changed what and when.
Each request to the API creates one `Changeset`, which contains one or more
`Change`s.
Each `ChangeSet` carries metadata about author and date. Optionally, there
can also be a comment, which is an arbitrary string. It is extracted from
the `PDC-Change-Comment` HTTP header in the request.
A `Change` has information about which database model was changed, its
primary key and old and new value (provided as a JSON). If both the values
are provided, the `Change` represents an update in some of the fields. If
only new value is provided, the `Change` represents creation of new entity.
If only old value is non-null, an entity was deleted.
This page shows the usage of the **Changeset API**, please see the
following for more details. The access to this data is read-only. It is
possible to either request all changesets satisfying given criteria, or
view detail of a particular changeset.
"""
def list(self, request, *args, **kwargs):
"""
__Method__:
GET
__URL__: $LINK:changeset-list$
__Query Params__:
%(FILTERS)s
The dates for `changed_since` and `changed_until` should have one these
formats:
Format | Example
---------------------+---------------------------
%%Y-%%m-%%d %%H:%%M:%%S | 2006-10-25 14:30:59
%%Y-%%m-%%d %%H:%%M:%%S.%%f | 2006-10-25 14:30:59.000200
%%Y-%%m-%%d %%H:%%M | 2006-10-25 14:30
%%Y-%%m-%%d | 2006-10-25
Resource names for `resource` should be specified in all lower case.
__Response__: a paged list of following objects
%(SERIALIZER)s
The unit for duration is second.
__Example__:
curl -H "Content-Type: application/json" -X GET $URL:changeset-list$
# output
{
"count": 84,
"next": "$URL:changeset-list$?page=2",
"previous": null,
"results": [
{
{
"author": "xxx",
"requested_on": "2015-02-03T05:50:49.387Z",
"committed_on": "2015-02-03T05:51:17.262Z",
"duration": "27.875",
"changes": [
{
"id": 1
"resource": "person",
"resource_id": "2",
"old_value": "old",
"new_value": "new"
}
],
"comment": "xxx"
}
},
...
]
}
With query params:
curl -H "Content-Type: application/json" -G $URL:changeset-list$ --data-urlencode "resource=test"
# output
{
"count": 1,
"next": null,
"previous": null,
"results": [
{
"id": 1
"author": "xxx",
"requested_on": "2015-02-03T05:50:49.387Z",
"committed_on": "2015-02-03T05:51:17.262Z",
"duration": "27.875",
"changes": [
{
"resource": "person",
"resource_id": "2",
"old_value": "old",
"new_value": "new"
}
],
"comment": "xxx"
}
]
}
"""
try:
return super(ChangesetViewSet, self).list(request, *args, **kwargs)
except (FieldError, ValidationError) as exc:
msg = exc.messages if hasattr(exc, 'messages') else str(exc)
return Response({'detail': msg},
status=status.HTTP_400_BAD_REQUEST)
doc_retrieve = """
__Method__:
GET
__URL__: $LINK:changeset-detail:instance_pk$
__Response__:
%(SERIALIZER)s
The unit for duration is second.
__Example__:
curl -H "Content-Type: application/json" $URL:changeset-detail:1$
# output
{
"id": 1,
"author": "xxx",
"requested_on": "2015-02-03T05:50:49.387Z",
"committed_on": "2015-02-03T05:51:17.262Z",
"duration": "27.875",
"changes": [
{
"resource": "person",
"resource_id": "2",
"old_value": "old",
"new_value": "new"
}
],
"comment": "xxx"
}
"""
serializer_class = ChangesetSerializer
queryset = models.Changeset.objects.all().order_by('-committed_on')
filter_class = ChangesetFilterSet
permission_classes = (APIPermission,)
| mit | 316c1165f2b174bc680d889966c8b8a1 | 33.581152 | 110 | 0.479788 | 4.558316 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/contact/migrations/0003_auto_20151001_1309.py | 6 | 3413 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contact', '0002_auto_20151001_1239'),
]
operations = [
migrations.AlterField(
model_name='globalcomponentrolecontact',
name='component',
field=models.ForeignKey(to='component.GlobalComponent', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AlterField(
model_name='globalcomponentrolecontact',
name='contact',
field=models.ForeignKey(to='contact.Contact', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AlterField(
model_name='globalcomponentrolecontact',
name='contact_role',
field=models.ForeignKey(to='contact.ContactRole', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AlterField(
model_name='releasecomponentrolecontact',
name='component',
field=models.ForeignKey(to='component.ReleaseComponent', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AlterField(
model_name='releasecomponentrolecontact',
name='contact',
field=models.ForeignKey(to='contact.Contact', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AlterField(
model_name='releasecomponentrolecontact',
name='contact_role',
field=models.ForeignKey(to='contact.ContactRole', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AlterUniqueTogether(
name='globalcomponentrolecontact',
unique_together=set([('contact_role', 'component')]),
),
migrations.AlterUniqueTogether(
name='releasecomponentrolecontact',
unique_together=set([('contact_role', 'component')]),
),
migrations.RenameField(
model_name='globalcomponentrolecontact',
old_name='contact_role',
new_name='role',
),
migrations.RenameField(
model_name='releasecomponentrolecontact',
old_name='contact_role',
new_name='role',
),
migrations.AlterUniqueTogether(
name='globalcomponentrolecontact',
unique_together=set([('role', 'component')]),
),
migrations.AlterUniqueTogether(
name='releasecomponentrolecontact',
unique_together=set([('role', 'component')]),
),
migrations.RenameModel(
old_name='ReleaseComponentRoleContact',
new_name='GlobalComponentContact',
),
migrations.RenameModel(
old_name='GlobalComponentRoleContact',
new_name='ReleaseComponentContact',
),
migrations.AlterField(
model_name='globalcomponentcontact',
name='component',
field=models.ForeignKey(to='component.GlobalComponent', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AlterField(
model_name='releasecomponentcontact',
name='component',
field=models.ForeignKey(to='component.ReleaseComponent', on_delete=django.db.models.deletion.PROTECT),
),
]
| mit | 225f68cd3098732c5d986eccc3f5f99e | 37.348315 | 114 | 0.608849 | 4.746871 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/contact/serializers.py | 2 | 4777 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from pdc.apps.common.serializers import StrictSerializerMixin
from pdc.apps.component.models import GlobalComponent, ReleaseComponent
from pdc.apps.component.serializers import ReleaseComponentField
from .models import (ContactRole, Person, Maillist,
GlobalComponentContact, ReleaseComponentContact)
class LimitField(serializers.IntegerField):
UNLIMITED_STR = 'unlimited'
doc_format = '"{}"|int'.format(UNLIMITED_STR)
def __init__(self, unlimited_value, **kwargs):
kwargs['min_value'] = 0
super(LimitField, self).__init__(**kwargs)
self.unlimited_value = unlimited_value
def to_representation(self, obj):
if obj == self.unlimited_value:
return self.__class__.UNLIMITED_STR
return super(LimitField, self).to_representation(obj)
def to_internal_value(self, value):
if value == self.__class__.UNLIMITED_STR:
return self.unlimited_value
return super(LimitField, self).to_internal_value(value)
class ContactRoleSerializer(StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
name = serializers.SlugField()
count_limit = LimitField(required=False, unlimited_value=ContactRole.UNLIMITED, default=1)
class Meta:
model = ContactRole
fields = ('name', 'count_limit')
class PersonSerializer(StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
class Meta:
model = Person
fields = ('id', 'username', 'email')
class MaillistSerializer(StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
class Meta:
model = Maillist
fields = ('id', 'mail_name', 'email')
class ContactField(serializers.DictField):
doc_format = '{"id": "int", "email": "email address", "username|mail_name": "string"}'
writable_doc_format = '{"email": "email address", "username|mail_name": "string"}'
child = serializers.CharField()
field_to_class = {
"username": Person,
"mail_name": Maillist,
}
class_to_serializer = {
"Person": PersonSerializer,
"Maillist": MaillistSerializer,
}
def to_representation(self, value):
leaf_value = value.as_leaf_class()
serializer_cls = self.class_to_serializer.get(
type(leaf_value).__name__, None)
if serializer_cls:
leaf_serializer = serializer_cls(context=self.context)
return leaf_serializer.to_representation(leaf_value)
else:
raise serializers.ValidationError("Unsupported Contact: %s" % value)
def to_internal_value(self, data):
v_data = super(ContactField, self).to_internal_value(data)
for key, clazz in self.field_to_class.items():
if key in v_data:
contact, created = clazz.objects.get_or_create(**v_data)
if created:
request = self.context.get('request', None)
model_name = ContentType.objects.get_for_model(contact).model
if request:
request.changeset.add(model_name,
contact.id,
'null',
json.dumps(contact.export()))
return contact
raise serializers.ValidationError('Could not determine type of contact.')
class GlobalComponentContactSerializer(StrictSerializerMixin, serializers.ModelSerializer):
component = serializers.SlugRelatedField(slug_field='name', read_only=False,
queryset=GlobalComponent.objects.all())
role = serializers.SlugRelatedField(slug_field='name', read_only=False,
queryset=ContactRole.objects.all())
contact = ContactField()
class Meta:
model = GlobalComponentContact
fields = ('id', 'component', 'role', 'contact')
class ReleaseComponentContactSerializer(StrictSerializerMixin, serializers.ModelSerializer):
component = ReleaseComponentField(read_only=False,
queryset=ReleaseComponent.objects.all())
role = serializers.SlugRelatedField(slug_field='name', read_only=False,
queryset=ContactRole.objects.all())
contact = ContactField()
class Meta:
model = ReleaseComponentContact
fields = ('id', 'component', 'role', 'contact')
| mit | fd396c35f7b4baa26a34dae398e2f4fd | 36.912698 | 94 | 0.622985 | 4.502356 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/componentbranch/models.py | 2 | 2274 | #
# Copyright (c) 2017 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from django.db import models
from pdc.apps.component.models import GlobalComponent, ReleaseComponentType
class ComponentBranch(models.Model):
global_component = models.ForeignKey(GlobalComponent)
name = models.CharField(max_length=300)
type = models.ForeignKey(ReleaseComponentType)
# TODO: Should we include a the dist-git URL?
# dist_git_url = models.CharField(max_length=500, blank=True)
critical_path = models.BooleanField(default=False)
class Meta:
unique_together = [
('global_component', 'name', 'type'),
]
def __unicode__(self):
return u'{0}: {1} ({2})'.format(
self.global_component.name, self.name, self.type)
def export(self):
return {
'global_component_name': self.global_component.name,
'name': self.name,
'type_name': self.type.name,
'slas': [{'name': sla_to_branch.sla.name,
'eol': sla_to_branch.eol.strftime('%Y-%m-%d')}
for sla_to_branch in self.slas.all()],
'critical_path': self.critical_path
}
class SLA(models.Model):
name = models.CharField(max_length=300, unique=True)
description = models.TextField(blank=True)
def __unicode__(self):
return u'{0}'.format(self.name)
def export(self):
return {
'name': self.name,
'description': self.description,
}
class SLAToComponentBranch(models.Model):
sla = models.ForeignKey(SLA, on_delete=models.CASCADE)
branch = models.ForeignKey(ComponentBranch, related_name='slas',
on_delete=models.CASCADE)
eol = models.DateField()
class Meta:
unique_together = [
('sla', 'branch'),
]
def __unicode__(self):
return u'{0} support for {1} {2} ({3})'.format(
self.sla.name, self.branch.global_component.name, self.branch.name,
self.branch.type.name)
def export(self):
return {
'sla': self.sla.name,
'branch': self.branch.name,
'eol': self.eol.strftime('%Y-%m-%d')
}
| mit | 977e3cf12e1fdea415b9f58a6a20e4ee | 28.921053 | 79 | 0.585312 | 3.73399 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/compose/lib.py | 2 | 17655 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import os
import json
import productmd
from productmd.rpms import Rpms
from django.db import transaction, connection
from django.db.models import Q
from rest_framework import serializers
from pdc.apps.package.models import RPM
from pdc.apps.common import hacks as common_hacks
from pdc.apps.common import models as common_models
from pdc.apps.package import models as package_models
from pdc.apps.repository import models as repository_models
from pdc.apps.release import models as release_models
from pdc.apps.release import lib
from pdc.apps.compose import models
from pdc.apps.compose.serializers import ComposeTreeSerializer
from pdc.apps.release.models import Release
from pdc.apps.component.models import ReleaseComponent
from pdc.apps.repository.models import ContentCategory
from pdc.apps.utils.rpm import parse_nvr
def _maybe_raise_inconsistency_error(composeinfo, manifest, name):
"""Raise ValidationError if compose id is not the same in both files.
The name should describe the kind of manifest.
"""
if composeinfo.compose.id != manifest.compose.id:
raise serializers.ValidationError(
{'detail': ['Inconsistent data: different compose id in composeinfo and {0} file.'.format(name)]})
def get_or_insert_rpm(rpms_in_db, cursor, rpm_nevra, srpm_nevra, filename):
rpm_id = rpms_in_db.get(rpm_nevra, None)
if not rpm_id:
rpm_id = package_models.RPM.bulk_insert(cursor, rpm_nevra, filename, srpm_nevra)
rpms_in_db[rpm_nevra] = rpm_id
return rpm_id
def insert_compose_rpms_if_nonexist(compose_rpms_in_db, cursor,
variant_arch_id, rpm_id,
content_category_id, sigkey_id, path_id):
key = "%s/%s" % (variant_arch_id, rpm_id)
if key not in compose_rpms_in_db:
models.ComposeRPM.bulk_insert(cursor,
variant_arch_id,
rpm_id,
content_category_id,
sigkey_id,
path_id)
compose_rpms_in_db.add(key)
def _link_compose_to_integrated_product(request, compose, variant):
"""
If the variant belongs to an integrated layered product, update the compose
so that it is linked to the release for that product. Note that the variant
argument should be variant retrieved from compose info, not a PDC model.
"""
release = variant.release
if release.name:
integrated_from_release = lib.get_or_create_integrated_release(
request,
compose.release,
release
)
compose.linked_releases.add(integrated_from_release)
def _add_compose_create_msg(request, compose_obj):
"""
Add compose create message to request._messagings.
"""
msg = {'action': 'create',
'compose_id': compose_obj.compose_id,
'compose_date': compose_obj.compose_date.isoformat(),
'compose_type': compose_obj.compose_type.name,
'compose_respin': compose_obj.compose_respin}
request._request._messagings.append(('.compose', msg))
def _add_import_msg(request, compose_obj, attribute, count):
"""
Add import message to request._messagings.
- `attribute` should be something like 'images' or 'rpms'.
- `count` should indicate the number of those entities which were imported.
"""
msg = {'attribute': attribute,
'count': count,
'action': 'import',
'compose_id': compose_obj.compose_id,
'compose_date': compose_obj.compose_date.isoformat(),
'compose_type': compose_obj.compose_type.name,
'compose_respin': compose_obj.compose_respin}
request._request._messagings.append(('.' + attribute, msg))
def _store_relative_path_for_compose(compose_obj, variants_info, variant, variant_obj, add_to_changelog):
vp = productmd.composeinfo.VariantPaths(variant)
common_hacks.deserialize_wrapper(vp.deserialize, variants_info.get(variant.name, {}).get('paths', {}))
for path_type in vp._fields:
path_type_obj, created = models.PathType.objects.get_or_create(name=path_type)
if created:
add_to_changelog.append(path_type_obj)
for arch in variant.arches:
field_value = getattr(vp, path_type)
if field_value and field_value.get(arch, None):
arch_obj = common_models.Arch.objects.get(name=arch)
crp_obj, created = models.ComposeRelPath.objects.get_or_create(arch=arch_obj, variant=variant_obj,
compose=compose_obj, type=path_type_obj,
path=field_value[arch])
if created:
add_to_changelog.append(crp_obj)
@transaction.atomic(savepoint=False)
def compose__import_rpms(request, release_id, composeinfo, rpm_manifest):
release_obj = release_models.Release.objects.get(release_id=release_id)
ci = productmd.composeinfo.ComposeInfo()
common_hacks.deserialize_wrapper(ci.deserialize, composeinfo)
rm = Rpms()
common_hacks.deserialize_wrapper(rm.deserialize, rpm_manifest)
_maybe_raise_inconsistency_error(ci, rm, 'rpms')
compose_date = "%s-%s-%s" % (ci.compose.date[:4], ci.compose.date[4:6], ci.compose.date[6:])
compose_type = models.ComposeType.objects.get(name=ci.compose.type)
acceptance_status = models.ComposeAcceptanceTestingState.objects.get(name='untested')
compose_obj, created = lib._logged_get_or_create(
request, models.Compose,
release=release_obj,
compose_id=ci.compose.id,
compose_date=compose_date,
compose_type=compose_type,
compose_respin=ci.compose.respin,
compose_label=ci.compose.label or None,
acceptance_testing=acceptance_status,
)
if created and hasattr(request._request, '_messagings'):
# add message
_add_compose_create_msg(request, compose_obj)
rpms_in_db = {}
qs = package_models.RPM.objects.all()
for rpm in qs.iterator():
key = "%s-%s:%s-%s.%s" % (rpm.name, rpm.epoch, rpm.version, rpm.release, rpm.arch)
rpms_in_db[key] = rpm.id
cursor = connection.cursor()
add_to_changelog = []
imported_rpms = 0
variants_info = composeinfo['payload']['variants']
for variant in ci.get_variants(recursive=True):
_link_compose_to_integrated_product(request, compose_obj, variant)
variant_type = release_models.VariantType.objects.get(name=variant.type)
variant_obj, created = models.Variant.objects.get_or_create(
compose=compose_obj,
variant_id=variant.id,
variant_uid=variant.uid,
variant_name=variant.name,
variant_type=variant_type
)
if created:
add_to_changelog.append(variant_obj)
_store_relative_path_for_compose(compose_obj, variants_info, variant, variant_obj, add_to_changelog)
for arch in variant.arches:
arch_obj = common_models.Arch.objects.get(name=arch)
var_arch_obj, _ = models.VariantArch.objects.get_or_create(arch=arch_obj,
variant=variant_obj)
compose_rpms_in_db = set()
qs = models.ComposeRPM.objects.filter(variant_arch=var_arch_obj).values_list('variant_arch_id',
'rpm_id')
for (variant_arch_id, rpm_id) in qs.iterator():
key = "%s/%s" % (variant_arch_id, rpm_id)
compose_rpms_in_db.add(key)
sources = set()
for srpm_nevra, rpms in rm.rpms.get(variant.uid, {}).get(arch, {}).iteritems():
sources.add(srpm_nevra)
for rpm_nevra, rpm_data in rpms.iteritems():
imported_rpms += 1
path, filename = os.path.split(rpm_data['path'])
rpm_id = get_or_insert_rpm(rpms_in_db, cursor, rpm_nevra, srpm_nevra, filename)
sigkey_id = common_models.SigKey.get_cached_id(rpm_data["sigkey"], create=True)
path_id = models.Path.get_cached_id(path, create=True)
content_category = rpm_data["category"]
content_category_id = repository_models.ContentCategory.get_cached_id(content_category)
insert_compose_rpms_if_nonexist(compose_rpms_in_db, cursor,
var_arch_obj.id, rpm_id,
content_category_id, sigkey_id, path_id)
for obj in add_to_changelog:
lib._maybe_log(request, True, obj)
request.changeset.add('notice', 0, 'null',
json.dumps({
'compose': compose_obj.compose_id,
'num_linked_rpms': imported_rpms,
}))
if hasattr(request._request, '_messagings'):
_add_import_msg(request, compose_obj, 'rpms', imported_rpms)
return compose_obj.compose_id, imported_rpms
@transaction.atomic(savepoint=False)
def compose__import_images(request, release_id, composeinfo, image_manifest):
release_obj = release_models.Release.objects.get(release_id=release_id)
ci = productmd.composeinfo.ComposeInfo()
common_hacks.deserialize_wrapper(ci.deserialize, composeinfo)
im = productmd.images.Images()
common_hacks.deserialize_wrapper(im.deserialize, image_manifest)
_maybe_raise_inconsistency_error(ci, im, 'images')
compose_date = "%s-%s-%s" % (ci.compose.date[:4], ci.compose.date[4:6], ci.compose.date[6:])
compose_type = models.ComposeType.objects.get(name=ci.compose.type)
compose_obj, created = lib._logged_get_or_create(
request, models.Compose,
release=release_obj,
compose_id=ci.compose.id,
compose_date=compose_date,
compose_type=compose_type,
compose_respin=ci.compose.respin,
compose_label=ci.compose.label or None,
)
if created and hasattr(request._request, '_messagings'):
# add message
_add_compose_create_msg(request, compose_obj)
add_to_changelog = []
imported_images = 0
variants_info = composeinfo['payload']['variants']
for variant in ci.get_variants(recursive=True):
_link_compose_to_integrated_product(request, compose_obj, variant)
variant_type = release_models.VariantType.objects.get(name=variant.type)
variant_obj, created = models.Variant.objects.get_or_create(
compose=compose_obj,
variant_id=variant.id,
variant_uid=variant.uid,
variant_name=variant.name,
variant_type=variant_type
)
if created:
add_to_changelog.append(variant_obj)
_store_relative_path_for_compose(compose_obj, variants_info, variant, variant_obj, add_to_changelog)
for arch in variant.arches:
arch_obj = common_models.Arch.objects.get(name=arch)
var_arch_obj, created = models.VariantArch.objects.get_or_create(arch=arch_obj, variant=variant_obj)
for i in im.images.get(variant.uid, {}).get(arch, []):
path, file_name = os.path.split(i.path)
path_id = models.Path.get_cached_id(path, create=True)
image, _ = package_models.Image.objects.get_or_create(
file_name=file_name, sha256=i.checksums["sha256"],
defaults={
'image_format_id': package_models.ImageFormat.get_cached_id(i.format),
'image_type_id': package_models.ImageType.get_cached_id(i.type),
'disc_number': i.disc_number,
'disc_count': i.disc_count,
'arch': i.arch,
'mtime': i.mtime,
'size': i.size,
'bootable': i.bootable,
'implant_md5': i.implant_md5,
'volume_id': i.volume_id,
'md5': i.checksums.get("md5", None),
'sha1': i.checksums.get("sha1", None),
'subvariant': getattr(i, 'subvariant', None),
}
)
mi, created = models.ComposeImage.objects.get_or_create(
variant_arch=var_arch_obj,
image=image,
path_id=path_id)
imported_images += 1
for obj in add_to_changelog:
lib._maybe_log(request, True, obj)
request.changeset.add('notice', 0, 'null',
json.dumps({
'compose': compose_obj.compose_id,
'num_linked_images': imported_images,
}))
if hasattr(request._request, '_messagings'):
_add_import_msg(request, compose_obj, 'images', imported_images)
return compose_obj.compose_id, imported_images
def _set_compose_tree_location(request, compose_id, composeinfo, location, url, scheme):
ci = productmd.composeinfo.ComposeInfo()
common_hacks.deserialize_wrapper(ci.deserialize, composeinfo)
num_set_locations = 0
synced_content = [item.name for item in ContentCategory.objects.all()]
for variant in ci.get_variants(recursive=True):
variant_uid = variant.uid
variant_obj = models.Variant.objects.get(compose__compose_id=compose_id, variant_uid=variant_uid)
for arch_name in variant.arches:
data = {'compose': compose_id,
'variant': variant_uid,
'arch': arch_name,
'location': location,
'url': url,
'scheme': scheme,
'synced_content': synced_content}
request.data['compose'] = compose_id
try:
obj = models.ComposeTree.objects.get(compose__compose_id=compose_id, variant=variant_obj,
arch__name=arch_name, location__short=location)
# update
serializer = ComposeTreeSerializer(obj, data=data, many=False, context={'request': request})
except models.ComposeTree.DoesNotExist:
# create
serializer = ComposeTreeSerializer(data=data, many=False, context={'request': request})
if serializer.is_valid(raise_exception=True):
serializer.save()
num_set_locations += 1
request.changeset.add('notice', 0, 'null',
json.dumps({
'compose': compose_id,
'num_set_locations': num_set_locations,
}))
return num_set_locations
@transaction.atomic(savepoint=False)
def compose__full_import(request, release_id, composeinfo, rpm_manifest, image_manifest, location, url, scheme):
compose_id, imported_rpms = compose__import_rpms(request, release_id, composeinfo, rpm_manifest)
# if compose__import_images return successfully, it should return same compose id
_, imported_images = compose__import_images(request, release_id, composeinfo, image_manifest)
set_locations = _set_compose_tree_location(request, compose_id, composeinfo, location, url, scheme)
return compose_id, imported_rpms, imported_images, set_locations
def _find_composes_srpm_name_with_rpm_nvr(nvr):
"""
Filter composes and SRPM's name with rpm nvr
"""
try:
nvr = parse_nvr(nvr)
except ValueError:
raise ValueError("Invalid NVR: %s" % nvr)
q = Q()
q &= Q(variant__variantarch__composerpm__rpm__name=nvr["name"])
q &= Q(variant__variantarch__composerpm__rpm__version=nvr["version"])
q &= Q(variant__variantarch__composerpm__rpm__release=nvr["release"])
rpms = RPM.objects.filter(name=nvr["name"], version=nvr["version"], release=nvr["release"])
srpm_name = None
if rpms:
srpm_name = list(set([rpm.srpm_name for rpm in rpms.distinct()]))[0]
if srpm_name is None:
raise ValueError("not found")
return models.Compose.objects.filter(q).distinct(), srpm_name
def find_bugzilla_products_and_components_with_rpm_nvr(nvr):
"""
Filter bugzilla products and components with rpm nvr
"""
composes, srpm_name = _find_composes_srpm_name_with_rpm_nvr(nvr)
release_ids = [compose.release for compose in composes]
releases = [Release.objects.get(release_id=release_id) for release_id in release_ids]
result = []
for release in releases:
bugzilla = dict()
bugzilla['bugzilla_product'] = release.bugzilla_product
component_names = common_hacks.srpm_name_to_component_names(srpm_name)
release_components = ReleaseComponent.objects.filter(
release=release,
name__in=component_names).distinct()
bugzilla['bugzilla_component'] = [rc.bugzilla_component.export()
for rc in release_components
if rc.bugzilla_component]
if bugzilla not in result:
result.append(bugzilla)
return result
| mit | aac3e7c2c547c4ec2b98268258db64e7 | 42.272059 | 119 | 0.599037 | 3.836375 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/component/migrations/0002_auto_20150525_1410.py | 8 | 2168 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from __future__ import unicode_literals
from django.db import models, migrations
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('contact', '0001_initial'),
('component', '0001_initial'),
('common', '0001_initial'),
('release', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='releasecomponent',
name='contacts',
field=models.ManyToManyField(to='contact.RoleContact', blank=True),
),
migrations.AddField(
model_name='releasecomponent',
name='global_component',
field=models.ForeignKey(to='component.GlobalComponent'),
),
migrations.AddField(
model_name='releasecomponent',
name='release',
field=models.ForeignKey(to='release.Release'),
),
migrations.AddField(
model_name='globalcomponent',
name='contacts',
field=models.ManyToManyField(to='contact.RoleContact', blank=True),
),
migrations.AddField(
model_name='globalcomponent',
name='labels',
field=models.ManyToManyField(to='common.Label', blank=True),
),
migrations.AddField(
model_name='globalcomponent',
name='upstream',
field=models.OneToOneField(null=True, blank=True, to='component.Upstream'),
),
migrations.AddField(
model_name='bugzillacomponent',
name='parent_component',
field=mptt.fields.TreeForeignKey(related_name='children', blank=True, to='component.BugzillaComponent', null=True),
),
migrations.AlterUniqueTogether(
name='releasecomponent',
unique_together=set([('release', 'global_component', 'name')]),
),
migrations.AlterUniqueTogether(
name='bugzillacomponent',
unique_together=set([('name', 'parent_component')]),
),
]
| mit | 06755e540dc8c4b402a8500ebda78cfa | 31.848485 | 127 | 0.582565 | 4.632479 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/common/hacks.py | 2 | 3348 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import re
from django.db import connection
from django.conf import settings
from django.core.exceptions import ValidationError
from rest_framework import serializers
from pkg_resources import parse_version
def deserialize_wrapper(func, data):
"""
Convert generic productmd exceptions into validation errors.
"""
try:
func(data)
except KeyError as e:
raise serializers.ValidationError(
{'detail': 'Error parsing productmd metadata.',
'reason': 'Missing key %s' % e.message}
)
except Exception as e:
raise serializers.ValidationError(
{'detail': 'Error parsing productmd metadata.',
'reason': str(e)}
)
def add_returning(sql):
"""
Add SQL clause required to return id of inserted item if the backend needs
it. The suffix is created only once and then cached.
"""
if not hasattr(add_returning, '_returning'):
add_returning._returning = ""
r_fmt = connection.ops.return_insert_id()
if r_fmt:
add_returning._returning = " " + r_fmt[0] % "id"
return sql + add_returning._returning
def bool_from_native(value):
"""Convert value to bool."""
if value in ('false', 'f', 'False', '0'):
return False
return bool(value)
def convert_str_to_bool(value, name=None):
"""
Try to strictly convert a string value to boolean or raise ValidationError.
"""
if value in (True, 'true', 't', 'True', '1'):
return True
if value in (False, 'false', 'f', 'False', '0'):
return False
ident = ' of %s' % name if name else ''
raise serializers.ValidationError('Value [%s]%s is not a boolean' % (value, ident))
def as_instance(arg, type, name=None):
"""Return arg if it is an instance of type, otherwise raise ValidationError."""
if not isinstance(arg, type):
ident = '%s: ' % name if name else ''
raise ValidationError('%s"%s" is not a %s' % (ident, arg, type.__name__))
return arg
def as_list(arg, name=None):
return as_instance(arg, list, name)
def as_dict(arg, name=None):
return as_instance(arg, dict, name)
def convert_str_to_int(value, name=None):
"""
Convert a string value to int or raise ValidationError.
"""
try:
value = int(value)
except Exception:
ident = ' of %s' % name if name else ''
raise ValidationError('Value [%s]%s is not an integer' % (value, ident))
else:
return value
def validate_model(sender, **kwargs):
if "raw" in kwargs and not kwargs["raw"]:
kwargs["instance"].full_clean()
def srpm_name_to_component_names(srpm_name):
if settings.WITH_BINDINGS:
from pdc.apps.bindings import models as binding_models
return binding_models.ReleaseComponentSRPMNameMapping.get_component_names_by_srpm_name(srpm_name)
else:
return [srpm_name]
def parse_epoch_version(version):
"""
Wrapper around `pkg_resources.parse_version` that can handle epochs
delimited by colon as is customary for RPMs.
"""
if re.match(r'^\d+:', version):
version = re.sub(r'^(\d+):', r'\1!', version)
return parse_version(version)
| mit | 104439739f6d023c7ba568216d693bc1 | 27.862069 | 105 | 0.629331 | 3.795918 | false | false | false | false |
product-definition-center/product-definition-center | pdc/settings_common.py | 2 | 12352 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
"""
Move the settings generated by 'django-admin startproject' using Django 1.8.1
to this common file, this common file doesn't include settings_local's settings.
The benefit is other file like settings_test.py can import the original common settings,
not the customized settings.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3hm)=^*sowhxr%m)%_u3mk+!ncy=c)147xbevej%l_lcdogu#+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# Default configuration for debug toolbar.
DEBUG_TOOLBAR = False
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
]
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': lambda request: True,
}
ALLOWED_HOSTS = []
# The default number of seconds to cache a page for the cache middleware
CACHE_MIDDLEWARE_SECONDS = 30
ITEMS_PER_PAGE = 50
# ======== resource permissions configuration =========
# allow read permission for all users
ALLOW_ALL_USER_READ = True
# enable all resource permissions
DISABLE_RESOURCE_PERMISSION_CHECK = False
# send email to admin if one changeset's change is equal or greater than CHANGESET_SIZE_ANNOUNCE
CHANGESET_SIZE_ANNOUNCE = 1000
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_filters',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'pdc.apps.auth',
'pdc.apps.common',
'pdc.apps.compose',
'pdc.apps.module',
'pdc.apps.package',
'pdc.apps.release',
'pdc.apps.repository',
'pdc.apps.contact',
'pdc.apps.component',
'pdc.apps.changeset',
'pdc.apps.utils',
'pdc.apps.bindings',
'pdc.apps.usage',
'pdc.apps.osbs',
'pdc.apps.componentbranch',
'pdc.apps.unreleasedvariant',
'pdc.apps.releaseschedule',
'pdc.apps.messaging',
'mptt',
)
AUTH_USER_MODEL = 'kerb_auth.User'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'pdc.apps.auth.authentication.TokenAuthenticationWithChangeSet',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissions'
],
'DEFAULT_METADATA_CLASS': 'contrib.bulk_operations.metadata.BulkMetadata',
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',
'pdc.apps.utils.utils.RelatedNestedOrderingFilter'),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'pdc.apps.common.renderers.ReadOnlyBrowsableAPIRenderer',
),
'EXCEPTION_HANDLER': 'pdc.apps.common.handlers.exception_handler',
'DEFAULT_PAGINATION_CLASS': 'pdc.apps.common.pagination.AutoDetectedPageNumberPagination',
'NON_FIELD_ERRORS_KEY': 'detail',
}
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'pdc.apps.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'pdc.apps.menu.middleware.MenuMiddleware',
'pdc.apps.usage.middleware.UsageMiddleware',
'pdc.apps.changeset.middleware.ChangesetMiddleware',
'pdc.apps.messaging.middleware.MessagingMiddleware',
'pdc.apps.utils.middleware.RestrictAdminMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
if 'test' in sys.argv:
MIDDLEWARE.remove('pdc.apps.utils.middleware.RestrictAdminMiddleware')
CACHE_MIDDLEWARE_SECONDS = 0
AUTHENTICATION_BACKENDS = (
'pdc.apps.auth.backends.KerberosUserBackend',
#'pdc.apps.auth.backends.AuthMellonUserBackend',
#'pdc.apps.auth.backends.AuthOIDCUserBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/auth/krb5login'
LOGIN_REDIRECT_URL = '/'
ROOT_URLCONF = 'pdc.urls'
import kobo
ROOT_MENUCONF = "pdc.menu"
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, "pdc/templates"),
os.path.join(os.path.dirname(kobo.__file__), "hub", "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'kobo.django.menu.context_processors.menu_context_processor',
],
},
},
]
WSGI_APPLICATION = 'pdc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = '/usr/share/pdc/static'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "pdc/static"),
"/usr/share/patternfly1/resources",
)
REST_API_URL = 'rest_api/'
REST_API_VERSION = 'v1'
REST_API_PAGE_SIZE = 20
REST_API_PAGE_SIZE_QUERY_PARAM = 'page_size'
REST_API_MAX_PAGE_SIZE = 100
API_HELP_TEMPLATE = "api/help.html"
# Format string used for URLs in global and release components pointing to
# Dist-Git server. One value will be substituted into this format string: the
# name of the package.
DIST_GIT_REPO_FORMAT = "http://pkgs.example.com/cgit/rpms/%s"
# URL fragment used to point to a particular branch of a package on Dist-Git.
# This will be appended to the URL created with DIST_GIT_REPO_FORMAT.
# The default works for CGit.
DIST_GIT_BRANCH_FORMAT = "?h=%s"
# ldap settings
LDAP_URI = "ldap://ldap.example.com:389"
LDAP_USERS_DN = "ou=users,dc=example,dc=com"
LDAP_GROUPS_DN = "ou=groups,dc=example,dc=com"
LDAP_CACHE_HOURS = 24
#
# CORS settings
#
# The requests can come from any origin (hostname). If this is undesirable, use
# settings_local.py module, set this to False and either set
# CORS_ORIGIN_WHITELIST to a tuple of hostnames that are allowed to contact the
# API, or set CORS_ORIGIN_REGEX_WHITELIST, which again is a tuple of regular
# expressions.
CORS_ORIGIN_ALLOW_ALL = True
# Only the REST API can be accessed. If settings local override REST_API_URL,
# make sure to update this setting as well.
CORS_URLS_REGEX = '^/%s.*$' % REST_API_URL
# We want HTML/JS clients to be able to use Kerberos authentication.
CORS_ALLOW_CREDENTIALS = True
# Allow default headers from django-cors-headers package as well as
# PDC-Change-Comment custom header.
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken',
'pdc-change-comment',
)
# mock kerberos login for debugging
DEBUG_USER = None
BROWSABLE_DOCUMENT_MACROS = {
# need to be rewrite with the real host name when deploy.
'HOST_NAME': 'http://localhost:8000',
# make consistent with rest api root.
'API_PATH': '%s%s' % (REST_API_URL, REST_API_VERSION),
}
EMPTY_PATCH_ERROR_RESPONSE = {
'detail': 'Partial update with no changes does not make much sense.',
}
INTERNAL_SERVER_ERROR_RESPONSE = {
'detail': 'The server encountered an internal error or misconfiguration and was unable to complete your request.'
}
# Messaging Bus Config
MESSAGE_BUS = {
# MLP: Messaging Library Package
# e.g. `fedmsg` for fedmsg or `kombu` for AMQP and other transports that `kombu` supports.
# `stomp` for STOMP supports.
'MLP': '',
# # `fedmsg` config example:
# # fedmsg's config is managed by `fedmsg` package, so normally here just need to set the
# # 'MLP' to 'fedmsg'
# 'MLP': 'fedmsg',
#
# # `kombu` config example:
# 'MLP': 'kombu',
# 'URL': 'amqp://guest:guest@example.com:5672//',
# 'EXCHANGE': {
# 'name': 'pdc',
# 'type': 'topic',
# 'durable': False
# },
# 'OPTIONS': {
# # Set these two items to config `kombu` to use ssl.
# 'login_method': 'EXTERNAL',
# 'ssl': {
# 'ca_certs': '',
# 'keyfile': '',
# 'certfile': '',
# 'cert_reqs': ssl.CERT_REQUIRED,
# }
# }
#
# # `stomp` config items:
# 'MLP': 'stomp',
# 'HOST_AND_PORTS': [
# ('stomp.example1.com', 61613),
# ('stomp.example2.com', 61613),
# ('stomp.example3.com', 61613),
# ],
# 'TOPIC': 'pdc',
# 'CERT_FILE': '',
# 'KEY_FILE': '',
}
# ======== Email configuration =========
# Email addresses who would like to receive email
ADMINS = (('PDC Dev', 'pdc@example.com'),)
# Email SMTP HOST configuration
EMAIL_HOST = 'smtp.example.com'
# Email sender's address
SERVER_EMAIL = 'noreply@example.com'
EMAIL_SUBJECT_PREFIX = '[PDC]'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(asctime)s %(process)d [%(filename)s -- %(module)s.%(funcName)s:%(lineno)d] [%(levelname)s]- %(message)s'
},
},
'handlers': {
'stderr': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'stream': sys.stderr
},
'stdout': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'stream': sys.stdout
},
'watchedfile': {
'level': 'INFO',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'verbose',
'filename': '/var/log/pdc/server.log',
'delay': True,
},
# Send a warning email if we want it.
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
}
},
'loggers': {
'pdc': {
'handlers': ['stderr'],
'level': 'INFO',
},
'django.request': {
'handlers': ['stderr'],
'level': 'ERROR',
'propagate': False,
}
}
}
# ======== ComponentBranch App Configuration =========
COMPONENT_BRANCH_NAME_BLACKLIST_REGEX = r''
if 'pdc.apps.bindings' in INSTALLED_APPS:
WITH_BINDINGS = True
else:
WITH_BINDINGS = False
| mit | 8b6888fb5db419d0a47139108e8f675e | 28.692308 | 129 | 0.653416 | 3.418766 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/compose/views.py | 2 | 82602 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from itertools import groupby
import json
import os.path
from productmd.rpms import Rpms
from productmd.images import Images, Image
from django.conf import settings
from kobo.django.views.generic import DetailView, SearchView
from django.views.generic import View
from django.forms.formsets import formset_factory
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import redirect, get_object_or_404
from django.contrib import messages
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework import viewsets, mixins, status, serializers
from django.db.models import Q
from django.http import Http404
from contrib.bulk_operations import bulk_operations
from pdc.apps.package.serializers import RPMSerializer
from pdc.apps.common.models import Arch, SigKey
from pdc.apps.common.hacks import bool_from_native, convert_str_to_bool, as_dict
from pdc.apps.common.viewsets import (ChangeSetCreateModelMixin,
StrictQueryParamMixin,
NoEmptyPatchMixin,
ChangeSetDestroyModelMixin,
ChangeSetModelMixin,
MultiLookupFieldMixin,
NotificationMixin,
ChangeSetUpdateModelMixin)
from pdc.apps.release.models import Release
from pdc.apps.utils.utils import generate_warning_header_dict
from pdc.apps.auth.permissions import APIPermission
from .models import (Compose, VariantArch, Variant, ComposeRPM, OverrideRPM,
ComposeImage, ComposeRPMMapping, ComposeAcceptanceTestingState,
ComposeTree)
from .forms import (ComposeSearchForm, ComposeRPMSearchForm, ComposeImageSearchForm,
ComposeRPMDisableForm, OverrideRPMForm, VariantArchForm, OverrideRPMActionForm)
from .serializers import (ComposeSerializer, OverrideRPMSerializer, ComposeTreeSerializer,
ComposeImageRTTTestSerializer, ComposeTreeRTTTestSerializer)
from .filters import (ComposeFilter, OverrideRPMFilter, ComposeTreeFilter, ComposeImageRTTTestFilter,
ComposeTreeRTTTestFilter)
from . import lib
class ComposeListView(SearchView):
form_class = ComposeSearchForm
queryset = Compose.objects.all() \
.select_related('release', 'compose_type') \
.prefetch_related('linked_releases').order_by('-id')
allow_empty = True
template_name = "compose_list.html"
context_object_name = "compose_list"
paginate_by = settings.ITEMS_PER_PAGE
class ComposeDetailView(DetailView):
queryset = Compose.objects.select_related('release', 'compose_type') \
.prefetch_related('linked_releases', 'variant_set__variantarch_set',
'variant_set__variantarch_set__arch',
'variant_set__variant_type')
pk_url_kwarg = "id"
template_name = "compose_detail.html"
class ComposeRPMListView(SearchView):
form_class = ComposeRPMSearchForm
allow_empty = True
template_name = "compose_rpm_list.html"
context_object_name = "compose_rpm_list"
paginate_by = settings.ITEMS_PER_PAGE
def get_queryset(self):
urlargs = self.request.resolver_match.kwargs
variants = Variant.objects.filter(compose=urlargs["id"])
variant = variants.filter(variant_uid=urlargs["variant"])
if "arch" in urlargs:
arch = Arch.objects.get(name=urlargs["arch"])
variant_arch = VariantArch.objects.filter(variant=variant)
if "arch" in urlargs:
variant_arch = variant_arch.get(arch=arch)
packages = ComposeRPM.objects.filter(variant_arch=variant_arch)
query = self.get_form(self.form_class).get_query(self.request)
packages = packages.filter(query)
packages = packages.extra(order_by=["rpm__name", "rpm__version", "rpm__release",
"rpm__epoch", "rpm__arch"])
return packages
def get_context_data(self, *args, **kwargs):
context = super(ComposeRPMListView, self).get_context_data(*args, **kwargs)
urlargs = self.request.resolver_match.kwargs
compose = str(Compose.objects.get(pk=urlargs["id"]))
context["compose"] = compose
context["variant"] = urlargs["variant"]
if "arch" in urlargs:
context["arch"] = urlargs["arch"]
return context
class ComposeImageListView(SearchView):
form_class = ComposeImageSearchForm
allow_empty = True
template_name = "compose_image_list.html"
context_object_name = "compose_image_list"
paginate_by = settings.ITEMS_PER_PAGE
def get_queryset(self):
urlargs = self.request.resolver_match.kwargs
variants = Variant.objects.filter(compose=urlargs["id"])
variant = variants.filter(variant_uid=urlargs["variant"])
variant_arch = VariantArch.objects.filter(variant=variant)
if "arch" in urlargs:
arch = Arch.objects.get(name=urlargs["arch"])
variant_arch = variant_arch.get(arch=arch)
images = ComposeImage.objects.filter(variant_arch=variant_arch)
query = self.get_form(self.form_class).get_query(self.request)
images = images.filter(query)
return images
def get_context_data(self, *args, **kwargs):
context = super(ComposeImageListView, self).get_context_data(*args, **kwargs)
urlargs = self.request.resolver_match.kwargs
compose = str(Compose.objects.get(pk=urlargs["id"]))
context["compose"] = compose
context["variant"] = urlargs["variant"]
if "arch" in urlargs:
context["arch"] = urlargs["arch"]
return context
class RPMOverrideFormView(View):
"""
This view supports GET and POST methods. On GET, it displays the overrides
form, on POST it shows a preview of submitted data.
"""
def __init__(self, *args, **kwargs):
super(RPMOverrideFormView, self).__init__(*args, **kwargs)
self.checkbox_form_factory = formset_factory(ComposeRPMDisableForm, extra=0)
self.override_form_factory = formset_factory(OverrideRPMForm, extra=0)
self.variant_form_factory = formset_factory(VariantArchForm, extra=1)
self.override_v_form_factory = formset_factory(OverrideRPMForm, extra=0)
def _create_initial_form_data(self, release_id):
"""
Obtain data from database and mangle it to appropriate form. This
method populates two instance attributes, `initial_checkboxes` and
`initial_overrides`. Both contain a list of dicts. Note that the lists
are sorted first by value of `variant` key, then by `arch` and lastly
by `rpm_name` and `rpm_arch`.
"""
release = Release.objects.get(release_id=release_id)
self.compose = release.get_latest_compose()
if self.compose:
mapping, useless = self.compose.get_rpm_mapping(self.request.GET['package'], release=release)
else:
mapping = ComposeRPMMapping()
mapping, useless = mapping.get_rpm_mapping_only_with_overrides(self.request.GET['package'], False,
release=release)
checkboxes = []
overs = set()
for variant, arch, rpm_name, rpm_data in mapping:
checkboxes.append({"variant": variant,
"arch": arch,
"rpm_name": rpm_name,
"rpm_arch": rpm_data['rpm_arch'],
"included": rpm_data['included'],
"override": rpm_data['override']})
overs.add((variant, arch))
self.initial_checkboxes = checkboxes
self.initial_overrides = sorted({"variant": x[0], "arch": x[1]} for x in overs)
self.useless_overrides = useless
def _populate_context(self):
"""
Use precomputed forms to populate the context used for rendering
response. The forms MUST have initial values filled in even on POST
request, as the data is used to sort the fields.
The main item in context is under the `forms` key. It is a list of
tuples (Variant, Arch, Checkbox-Form-List, New-Override-Form-List).
Some of the keys in context are not used in templates, but are accessed
when testing. These are `override_forms`, `override_v_forms` and
`variant_forms`.
"""
checkbox_forms = [(variant, arch, list(forms)) for (variant, arch), forms
in groupby(self.checkbox_form, lambda x: (x.initial['variant'], x.initial['arch']))]
# forms :: Map (Variant, Arch) ([ComposerRPMDisableForm], [OverrideRPMForm])
forms = {}
for (variant, arch, checks) in checkbox_forms:
forms[(variant, arch)] = (checks, [])
for new_form in self.override_form:
variant = new_form.initial.get('variant') or new_form['variant'].value()
arch = new_form.initial.get('arch') or new_form['arch'].value()
forms.get((variant, arch), ([], []))[1].append(new_form)
var_forms = {}
for new_variant in self.variant_form:
num_id = new_variant.prefix.split('-')[1]
var_forms[num_id] = (new_variant, [])
for new_form in self.override_v_form:
var_forms[new_form['new_variant'].value()][1].append(new_form)
self.context = {
'package': self.request.GET['package'],
'override_forms': self.override_form,
'override_v_forms': self.override_v_form,
'variant_forms': self.variant_form,
"forms": [(a, b, c, d) for ((a, b), (c, d)) in sorted(forms.items())],
"vararch_forms": var_forms.values(),
"management_forms": [x.management_form for x in self.formsets],
"has_errors": any(x.errors for x in self.formsets),
"useless_overrides": [i for i in self.useless_overrides if i.do_not_delete],
'compose': self.compose,
}
def _create_formsets(self):
"""
Create formsets as instance attributes. The we are processing a POST
request, the forms will use data supplied by the request, otherwise it
will fall back to initial data.
There are four formsets:
checkbox_form
: forms used to disable existing packages
override_form
: forms used to create new overrides for existing Variant.Arch
variant_form
: forms used to create new Variant.Arch
override_v_form
: forms used to create new overrides for new Variant.Arch
"""
args = [self.request.POST] if self.request.method == 'POST' else []
self.checkbox_form = self.checkbox_form_factory(*args,
initial=self.initial_checkboxes,
prefix="checks")
self.override_form = self.override_form_factory(*args,
initial=self.initial_overrides,
prefix="news")
self.variant_form = self.variant_form_factory(*args, prefix="vararch")
self.override_v_form = self.override_v_form_factory(*args,
initial=[{"new_variant": "0"}],
prefix="for_new_vararch")
self.formsets = [self.checkbox_form, self.override_form, self.variant_form, self.override_v_form]
@csrf_exempt
def get(self, request, release_id):
self._create_initial_form_data(release_id)
self._create_formsets()
self._populate_context()
self.context.update({"release_id": release_id})
return render(request, 'overrides_form.html', self.context)
def _prepare_preview(self, release_id):
package = self.request.GET['package']
if not all([x.is_valid() for x in self.formsets]):
return False
initial_data = []
def stage(type, args, include, initial_data=initial_data):
data = {'release_id': release_id,
'srpm_name': package,
'action': type,
'variant': args['variant'],
'arch': args['arch'],
'rpm_name': args['rpm_name'],
'rpm_arch': args['rpm_arch'],
'include': include,
}
initial_data.append(data)
for form in self.checkbox_form:
data = form.cleaned_data
if form.initial['included'] == data['included']:
# No change in this form
continue
# ComposeRPM disabled by override should be included again.
if form.initial['override'] == 'delete' and data['included']:
stage('delete', data, False)
# Override creating a new package should be disabled.
if form.initial['override'] == 'create' and not data['included']:
stage('delete', data, True)
# ComposeRPM should be disabled.
if form.initial['override'] == 'orig' and not data['included']:
stage('create', data, False)
for data in self.override_form.cleaned_data:
if data['rpm_name']:
stage('create', data, True)
for data in self.override_v_form.cleaned_data:
if data['rpm_name']:
vararch_idx = data['new_variant']
data['variant'] = self.variant_form.cleaned_data[vararch_idx]['variant']
data['arch'] = self.variant_form.cleaned_data[vararch_idx]['arch']
stage('create', data, True)
for record in initial_data:
try:
orpm = OverrideRPM.objects.get(release__release_id=release_id,
variant=record['variant'],
arch=record['arch'],
rpm_name=record['rpm_name'],
rpm_arch=record['rpm_arch'])
record['do_not_delete'] = orpm.do_not_delete
record['comment'] = orpm.comment
if orpm.do_not_delete and orpm.include != record['include']:
record['warning'] = ('This override already exists with different inclusion. ' +
'Will modify override with do_not_delete set.')
except OverrideRPM.DoesNotExist:
pass
form_factory = formset_factory(OverrideRPMActionForm, extra=0)
forms = form_factory(initial=initial_data)
self.context = {
'actions': json.dumps(initial_data, indent=2),
'compressed': json.dumps(initial_data),
'forms': forms,
'num_forms': len(forms.forms),
'package': package,
'release_id': release_id,
}
return True
def post(self, request, release_id):
package = request.GET['package']
args = {"release_id": release_id, "package": package}
release = Release.objects.get(release_id=release_id)
self._create_initial_form_data(release_id)
self._create_formsets()
if request.POST.get('preview_submit', False):
factory = formset_factory(OverrideRPMActionForm, extra=0)
initial_data = json.loads(request.POST['initial_data'])
form = factory(request.POST, initial=initial_data)
if form.is_valid():
merge_data(initial_data, form.cleaned_data)
_apply_changes(request, release, initial_data)
messages.success(request, 'Data was successfully saved.')
return redirect(request.path + '?package=' + package)
self.context = {
'actions': json.dumps(initial_data, indent=2),
'compressed': request.POST['initial_data'],
'forms': form,
'has_errors': True,
'num_forms': len(form.forms),
'package': package,
'release_id': release_id,
}
return render(request, 'overrides_preview.html', self.context)
else:
if self._prepare_preview(release_id):
return render(request, 'overrides_preview.html', self.context)
self._populate_context()
self.context.update(args)
return render(request, 'overrides_form.html', self.context)
def merge_data(actions, forms):
for act in actions:
for form in forms:
if dict_equal_on(act, form, ['variant', 'arch', 'rpm_name', 'rpm_arch']):
act['do_not_delete'] = form['do_not_delete']
act['comment'] = form['comment']
def dict_equal_on(d1, d2, keys):
"""
Return True iff both dicts have all the requested keys with the same
values.
>>> dict_equal_on({1: 'a', 2: 'b'}, {1: 'a', 2: 'c'}, [1, 2])
False
>>> dict_equal_on({'a': 1}, {'b': 2}, [])
True
"""
if not keys:
return True
return d1.get(keys[0]) == d2.get(keys[0]) and dict_equal_on(d1, d2, keys[1:])
def _apply_changes(request, release, changes):
"""
Apply each change to update an override. The `changes` argument should be a
list of values suitable for `OverrideRPM.update_object` method. Each
perfomed change is logged.
"""
for change in changes:
pk, old_val, new_val = OverrideRPM.update_object(change['action'], release, change)
request.changeset.add('OverrideRPM', pk, old_val, new_val)
class ComposeViewSet(StrictQueryParamMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""
API endpoint that allows querying composes. `GET` request to this URL will
return the list of composes, each with link to the actual compose.
Each compose was built for a specific release. This relation is captured by
the `release` property, which contains an identifier of the release. A
compose can also be linked to arbitrary number of other releases. These
links are accessible via the `linked_releases` property.
The compose data contain a key `rpm_mapping_template` which can be
transformed into a URL for obtaining and modifying RPM mapping. The
template contains a string `{{package}}` which should be replaced with the
package name you are interested in.
There is no create API for compose. Composes get created as side-effect
of using several other APIs:
$LINK:composefullimport-list$
$LINK:composerpm-list$
$LINK:composeimage-list$
"""
queryset = Compose.objects.all().order_by('id')
serializer_class = ComposeSerializer
filter_class = ComposeFilter
filter_fields = ('srpm_name', 'rpm_name', 'rpm_arch', 'rpm_version', 'rpm_release')
permission_classes = (APIPermission,)
lookup_field = 'compose_id'
lookup_value_regex = '[^/]+'
context = {}
def get_serializer_context(self):
context = super(ComposeViewSet, self).get_serializer_context()
context.update(self.context)
return context
def filter_queryset(self, qs):
"""
If the viewset instance has attribute `order_queryset` set to True,
this method returns a list of composes ordered according to *productmd*
library. Otherwise it will return an unsorted queryset. (It is not
possible to sort unconditionally as get_object() will at some point
call this method and fail unless it receives a QuerySet instance.)
"""
qs = super(ComposeViewSet, self).filter_queryset(self._filter_nvras(qs))
if getattr(self, 'order_queryset', False):
return sorted(qs)
return qs
def _filter_nvras(self, qs):
q = Q()
query_params = self.request.query_params
query_param_rpm_key_mapping = [('rpm_name', 'name'),
('srpm_name', 'srpm_name'),
('rpm_version', 'version'),
('rpm_release', 'release'),
('rpm_arch', 'arch')]
for query_param, rpm_key in query_param_rpm_key_mapping:
rpm_value = query_params.get(query_param, None)
s = 'variant__variantarch__composerpm__rpm__' + rpm_key
if rpm_value:
q &= Q(**{s + '__iexact': rpm_value})
return qs.filter(q).distinct()
def _fill_in_cache(self, result_queryset):
"""
Cache some information and put them in context to prevent from getting them one by one
for each model object in other places.
Currently, it caches compose id to it's corresponding Sigkeys' key id mapping.
"""
variant_id_to_compose_id_dict = {}
variant_id_list = []
for compose_id, variant_id in Variant.objects.filter(
compose__in=result_queryset).values_list("compose__id", "id"):
variant_id_to_compose_id_dict[variant_id] = compose_id
variant_id_list.append(variant_id)
compose_id_to_va_id_set = {}
variant_arch_id_list = []
for variant_id, variant_arch_id in VariantArch.objects.filter(
variant__id__in=variant_id_list).values_list('variant__id', 'id'):
compose_id_to_va_id_set.setdefault(variant_id_to_compose_id_dict[variant_id], set([])).add(variant_arch_id)
variant_arch_id_list.append(variant_arch_id)
va_id_to_key_id_set = {}
for key_id, va_id in SigKey.objects.filter(
composerpm__variant_arch__id__in=variant_arch_id_list).values_list(
'key_id', 'composerpm__variant_arch__id').distinct():
va_id_to_key_id_set.setdefault(va_id, set([])).add(key_id)
compose_id_to_key_id_cache = {}
for compose_id, va_id_set in compose_id_to_va_id_set.iteritems():
for va_id in va_id_set:
if va_id in va_id_to_key_id_set:
key_id_set = compose_id_to_key_id_cache.setdefault(compose_id, set([]))
key_id_set |= va_id_to_key_id_set[va_id]
self.context = {'compose_id_to_key_id_cache': compose_id_to_key_id_cache}
def _add_messaging_info(self, request, info):
if hasattr(request._request, '_messagings'):
request._request._messagings.append(('.compose', info))
doc_retrieve = """
__Method__: GET
__URL__: $LINK:compose-detail:compose_id$
__Response__:
%(SERIALIZER)s
"""
def list(self, *args, **kwargs):
"""
Get a list of all composes. The composes are ordered first by the
release for which they were build (by their short and version).
Composes in the same release are ordered by date, type and respin.
__Method__: GET
__URL__: $LINK:compose-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
Note: Query params 'rpm_name', 'srpm_name', 'rpm_version', 'rpm_release', 'rpm_arch'
can be used together which perform AND search. When input multi values for one of these
query params, the last one will take effect.
"""
self.order_queryset = True
if 'ordering' in self.request.query_params.keys():
self.order_queryset = False
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
result_queryset = queryset
if page is not None:
result_queryset = page
self._fill_in_cache(result_queryset)
serializer = self.get_serializer(result_queryset, many=True)
if page is not None:
result = self.get_paginated_response(serializer.data)
else:
result = Response(serializer.data)
return result
def update_arch_testing_status(self, data):
compose_id = self.kwargs[self.lookup_field]
for variant_uid in data:
variant_data = as_dict(data[variant_uid], name=variant_uid)
for arch_name, status_name in variant_data.iteritems():
try:
var_arch = VariantArch.objects.get(arch__name=arch_name,
variant__variant_uid=variant_uid,
variant__compose__compose_id=compose_id)
state = ComposeAcceptanceTestingState.objects.get(name=status_name)
except VariantArch.DoesNotExist:
raise serializers.ValidationError(
{'rtt_tested_architectures':
'%s.%s not in compose %s.' % (variant_uid, arch_name, compose_id)}
)
except ComposeAcceptanceTestingState.DoesNotExist:
raise serializers.ValidationError(
{'rtt_tested_architectures': '"%s" is not a known testing status for %s.%s.'
% (status_name, variant_uid, arch_name)}
)
self.request.changeset.add('ComposeVariantArch', var_arch.pk,
json.dumps({"rtt_testing_status": var_arch.rtt_testing_status.name}),
json.dumps({"rtt_testing_status": status_name}))
var_arch.rtt_testing_status = state
var_arch.save()
def update(self, request, *args, **kwargs):
# This method is used by bulk update and partial update, but should not
# be called directly.
if not kwargs.get('partial', False):
return self.http_method_not_allowed(request, *args, **kwargs)
if not request.data:
return NoEmptyPatchMixin.make_response()
if not isinstance(request.data, dict):
return Response(data={"detail": ("The parameters' format for updating is wrong. "
"Please read API documentation")}, status=status.HTTP_400_BAD_REQUEST)
updatable_keys = set(['acceptance_testing', 'linked_releases', 'rtt_tested_architectures'])
if set(request.data.keys()) - updatable_keys:
return Response(status=status.HTTP_400_BAD_REQUEST,
data={'detail': 'Only these properties can be updated: %s'
% ', '.join(updatable_keys)})
# Omit changing request data if immutable.
try:
rtt_tested_architectures = request.data.pop('rtt_tested_architectures', {})
except AttributeError:
rtt_tested_architectures = request.data.get('rtt_tested_architectures', {})
arch_testing_status = as_dict(rtt_tested_architectures,
name='rtt_tested_architectures')
self.update_arch_testing_status(arch_testing_status)
old_data = ComposeSerializer(instance=self.get_object(), context={'request': request}).data
response = super(ComposeViewSet, self).update(request, *args, **kwargs)
if response.status_code == status.HTTP_200_OK:
request.changeset.add('Compose', self.object.pk,
json.dumps({'acceptance_testing': old_data['acceptance_testing']}),
json.dumps({'acceptance_testing': response.data['acceptance_testing']}))
request.changeset.add('Compose', self.object.pk,
json.dumps({'linked_releases': old_data['linked_releases']}),
json.dumps({'linked_releases': response.data['linked_releases']}))
# Add message
self._add_messaging_info(request, {'action': 'update',
'compose_id': self.object.compose_id,
'from': old_data,
'to': response.data})
return response
def perform_update(self, serializer):
# To log changes, we need updated instance saved somewhere.
self.object = serializer.save()
def bulk_update(self, *args, **kwargs):
"""
It is possible to perform bulk partial update on composes with `PATCH`
method. The input must be a JSON object with compose identifiers as
keys. Values for these keys should be in the same format as when
updating a single compose.
"""
return bulk_operations.bulk_update_impl(self, *args, **kwargs)
def partial_update(self, request, *args, **kwargs):
"""
Only some compose fields can be modified by this call. These are
`acceptance_testing`, `linked_releases` and `rtt_tested_architectures`.
Trying to change anything else will result in 400 BAD REQUEST response.
__Method__: PATCH
__URL__: $LINK:compose-detail:compose_id$
__Data__:
{
"acceptance_testing": string,
"linked_releases": [string],
"rtt_tested_architectures": object
}
If the same release is specified in `linked_release` multiple times, it
will be saved only once.
__Note__: if you want to just update the `rtt_tested_architectures`,
it's easy to update with $LINK:composetreertttests-list$ API.
In this API , the `rtt_tested_architectures` should be a mapping in the form of
`{variant: {arch: status}}`. Whatever is specified will be saved in
database, trees not mentioned will not be modified. Specifying variant
or architecture that does not exist will result in error.
__Response__:
same as for retrieve
"""
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
"""
It will mark the compose as 'deleted'.
__Method__:
DELETE
__URL__: $LINK:compose-detail:compose_id$
__Response__:
STATUS: 204 NO CONTENT
__Example__:
curl -X DELETE -H "Content-Type: application/json" $URL:compose-detail:1$
"""
instance = self.get_object()
if instance.deleted:
return Response(status=status.HTTP_204_NO_CONTENT,
headers=generate_warning_header_dict(
"No change. This compose was marked as deleted already."))
else:
instance.deleted = True
instance.save()
request.changeset.add('Compose', instance.pk,
json.dumps({'deleted': False}),
json.dumps({'deleted': True}))
self._add_messaging_info(request, {'action': 'delete',
'compose_id': instance.compose_id})
return Response(status=status.HTTP_204_NO_CONTENT)
class CheckParametersMixin(object):
def _check_parameters(self, expected_param_list, real_param_list, error_dict, optional_param_list=[]):
for key in set(real_param_list) - set(expected_param_list) - set(optional_param_list):
error_dict[key] = ["This field is illegal"]
for key in set(expected_param_list) - set(real_param_list):
error_dict[key] = ["This field is required"]
class ComposeRPMView(StrictQueryParamMixin, CheckParametersMixin, viewsets.GenericViewSet):
permission_classes = (APIPermission,)
lookup_field = 'compose_id'
lookup_value_regex = '[^/]+'
queryset = ComposeRPM.objects.none() # Required for permissions
def create(self, request):
"""
Import RPMs.
__Method__: POST
__URL__: $LINK:composerpm-list$
__Data__:
{
"release_id": string,
"composeinfo": composeinfo,
"rpm_manifest": rpm_manifest
}
__Response__:
{
"compose_id": string,
"imported rpms": int
}
The `composeinfo` and `rpm_manifest` values should be actual JSON
representation of composeinfo and rpm manifest, as stored in
`composeinfo.json` and `rpm-manifest.json` files.
You can use <a href="https://pagure.io/pungi">Pungi</a> to produce
`composeinfo.json` and `rpm-manifest.json`.
__Example__:
$ curl -H 'Content-Type: application/json' -X POST \\
-d "{\\"composeinfo\\": $(cat /path/to/composeinfo.json), \\
\\"rpm_manifest\\": $(cat /path/to/rpm-manifest.json), \\
\\"release_id\\": \\"release-1.0\\" }" \\
$URL:composerpm-list$
Note that RPM manifests tend to be too large to supply the data via
command line argument and using a temporary file becomes necessary.
$ { echo -n '{"composeinfo": '; cat /path/to/composeinfo.json
> echo -n ', "rpm_manifest": '; cat /path/to/rpm-manifest.json
> echo -n ', "release_id": "release-1.0" }' ; } >post_data.json
$ curl -H 'Content-Type: application/json' -X POST -d @post_data.json \\
$URL:composerpm-list$
You could skip the file and send the data directly to `curl`. In such a
case use `-d @-`.
"""
data = request.data
errors = {}
fields = ['release_id', 'composeinfo', 'rpm_manifest']
self._check_parameters(fields, data.keys(), errors)
if errors:
return Response(status=status.HTTP_400_BAD_REQUEST, data=errors)
compose_id, imported_rpms = lib.compose__import_rpms(request, data['release_id'], data['composeinfo'], data['rpm_manifest'])
return Response(data={'compose': compose_id, 'imported rpms': imported_rpms}, status=status.HTTP_201_CREATED)
def retrieve(self, request, **kwargs):
"""
__Method__: `GET`
__URL__: $LINK:composerpm-detail:compose_id$
This API end-point allows retrieving RPM manifest for a given compose.
It will return the exact same data as was imported.
"""
compose = get_object_or_404(Compose, compose_id=kwargs['compose_id'])
crpms = ComposeRPM.objects.filter(variant_arch__variant__compose=compose) \
.select_related('variant_arch__variant', 'variant_arch__arch', 'rpm', 'path') \
.prefetch_related('sigkey', 'content_category')
manifest = Rpms()
manifest.compose.date = compose.compose_date.strftime('%Y%m%d')
manifest.compose.id = compose.compose_id
manifest.compose.respin = compose.compose_respin
manifest.compose.type = compose.compose_type.name
for crpm in crpms:
arch = crpm.variant_arch.arch.name
path = (os.path.join(crpm.path.path, crpm.rpm.filename)
if crpm.path and crpm.rpm.filename
else None)
if crpm.rpm.arch == 'src':
srpm_nevra = None
else:
srpm_nevra = crpm.rpm.srpm_nevra
manifest.add(
arch=arch,
variant=crpm.variant_arch.variant.variant_uid,
nevra=crpm.rpm.nevra,
path=path,
sigkey=crpm.sigkey.key_id if crpm.sigkey else None,
category=crpm.content_category.name,
srpm_nevra=srpm_nevra,
)
return Response(manifest.serialize({}))
class ComposeFullImportViewSet(StrictQueryParamMixin, CheckParametersMixin, viewsets.GenericViewSet):
permission_classes = (APIPermission,)
queryset = Compose.objects.none() # Required for permissions.
def create(self, request):
"""
Import RPMs, images and set compose tree location.
__Method__: POST
__URL__: $LINK:composefullimport-list$
__Data__:
{
"release_id": string,
"composeinfo": composeinfo,
"rpm_manifest": rpm_manifest,
"image_manifest": image_manifest,
"location": string,
"url": string,
"scheme": string
}
__Response__:
{
"compose_id": string,
"imported rpms": int,
"imported images": int,
"set_locations": int
}
The `composeinfo`, `rpm_manifest` and `image_manifest`values should be actual JSON
representation of composeinfo, rpm manifest and image manifest, as stored in
`composeinfo.json`, `rpm-manifest.json` and `image-manifest.json` files.
`location`, `url`, `scheme` are used to set compose tree location.
__Example__:
$ curl -H 'Content-Type: application/json' -X POST \\
-d "{\\"composeinfo\\": $(cat /path/to/composeinfo.json), \\
\\"rpm_manifest\\": $(cat /path/to/rpm-manifest.json), \\
\\"image_manifest\\": $(cat /path/to/image_manifest.json), \\
\\"release_id\\": \\"release-1.0\\", \\"location\\": \\"BOS\\", \\
\\"scheme\\": \\"http\\", \\"url\\": \\"abc.com\\" }" \\
$URL:composefullimport-list$
Note that RPM manifests tend to be too large to supply the data via
command line argument and using a temporary file becomes necessary.
$ { echo -n '{"composeinfo": '; cat /path/to/composeinfo.json
> echo -n ', "rpm_manifest": '; cat /path/to/rpm-manifest.json
> echo -n ', "image_manifest": '; cat /path/to/image_manifest.json
> echo -n ', "release_id": "release-1.0", \"location\": \"BOS\", \"scheme\": \"http\", \"url\": \"abc.com\" }' ; } >post_data.json
$ curl -H 'Content-Type: application/json' -X POST -d @post_data.json \\
$URL:composefullimport-list$
You could skip the file and send the data directly to `curl`. In such a
case use `-d @-`.
"""
data = request.data
errors = {}
fields = ['release_id', 'composeinfo', 'rpm_manifest', 'image_manifest', 'location', 'url', 'scheme']
self._check_parameters(fields, data.keys(), errors)
if errors:
return Response(status=status.HTTP_400_BAD_REQUEST, data=errors)
compose_id, imported_rpms, imported_images, set_locations = lib.compose__full_import(request,
data['release_id'],
data['composeinfo'],
data['rpm_manifest'],
data['image_manifest'],
data['location'],
data['url'],
data['scheme'])
return Response(data={'compose': compose_id, 'imported rpms': imported_rpms,
'imported images': imported_images, 'set_locations': set_locations},
status=status.HTTP_201_CREATED)
class ComposeRPMMappingView(StrictQueryParamMixin,
viewsets.GenericViewSet):
"""
This API endpoint allows viewing and modification of RPM mapping. The
overrides applied in this view (if not suppressed) come from the release
the compose was built for.
"""
permission_classes = (APIPermission,)
lookup_field = 'package'
queryset = ComposeRPM.objects.none() # Required for permissions
extra_query_params = ('disable_overrides', 'perform')
def retrieve(self, request, **kwargs):
"""
__URL__: $LINK:composerpmmapping-detail:compose_id:package$
__Response__:
{
Variants:{
archs:{
rpm_names:[
rpm_arch,
]
}
}
}
Returns a JSON representing the RPM mapping. There is an optional query
parameter `?disable_overrides=1` which returns the raw mapping not
affected by any overrides.
"""
compose = get_object_or_404(Compose, compose_id=kwargs['compose_id'])
mapping, _ = compose.get_rpm_mapping(kwargs['package'],
bool(request.query_params.get('disable_overrides', False)))
return Response(mapping.get_pure_dict())
def partial_update(self, request, **kwargs):
"""
__URL__: $LINK:composerpmmapping-detail:compose_id:package$
Unlike other API end-points, patching RPM mapping requires you to specify all the fields. The request data
should be a list of objects where each object has exactly the keys listed in documentation below. Only `include`
field can be left out if `action` is not `create`.
[
{
"action": <str>, # value should be 'create' or 'delete'
"variant": <str>,
"arch": <str>,
"srpm_name": <str>,
"rpm_name": <str>,
"rpm_arch": <str>,
"include": <bool>, # create only
"comment": <str>,
"do_not_delete": <bool>
}
]
"""
subset = set(["action", "variant", "arch", "srpm_name", "rpm_name", "rpm_arch",
"comment", "do_not_delete"])
field_all = set(["action", "variant", "arch", "srpm_name", "rpm_name", "rpm_arch",
"comment", "do_not_delete", "include"])
if not isinstance(request.data, list):
return Response(data={"detail": ("Wrong input format")}, status=status.HTTP_400_BAD_REQUEST)
for i in request.data:
s = set(i) - field_all
if s:
return Response(data={"detail": ("Fields %s are not valid inputs" % list(s))},
status=status.HTTP_400_BAD_REQUEST)
if not subset.issubset(set(i)):
return Response(data={"detail": "Not all fields specified"}, status=status.HTTP_400_BAD_REQUEST)
if i['action'].lower().strip() == "create" and "include" not in i:
return Response(data={"detail": "No field 'include' when 'action' is create"},
status=status.HTTP_400_BAD_REQUEST)
if i['action'].lower().strip() == "delete" and "include" in i:
return Response(data={"detail": "Field 'include' is only for 'action' being 'create'"},
status=status.HTTP_400_BAD_REQUEST)
compose = get_object_or_404(Compose, compose_id=kwargs['compose_id'])
_apply_changes(request, compose.release, request.data)
return Response(status=status.HTTP_204_NO_CONTENT)
def _update_parameters_acceptable(self, in_data, layer):
result = False
if layer == 1:
result = isinstance(in_data, list)
elif layer > 1:
if not isinstance(in_data, dict):
result = False
else:
result = True
for i in range(len(in_data.values())):
result = result and self._update_parameters_acceptable((in_data.values()[i]), layer - 1)
return result
def update(self, request, **kwargs):
"""
__URL__: $LINK:composerpmmapping-detail:compose_id:package$
__Data__:
{
Variants:{
archs:{
rpm_names:[
rpm_arch,
]
}
}
}
Allows updating the RPM mapping by using a `PUT` request with data
containing new mapping. PDC will compute changes between current
mapping and the requested one. The response contains a list of changes
suitable for partial update via `PATCH` method.
__Response__:
[
{
'release_id': <str>,
'srpm_name': <str>,
'action': <str>,
'variant': <str>,
'arch': <str>,
'rpm_name': <str>,
'rpm_arch': <str>,
'include': <bool>,
}
]
By default, no changes are performed on the server. If you add
`?perform=1` query string parameter, the changes will actually be saved
in database as well as returned.
"""
compose = get_object_or_404(Compose, compose_id=kwargs['compose_id'])
if not self._update_parameters_acceptable(request.data, 4):
return Response(
data={"detail": "The parameters' format for updating is wrong. Please read API documentation"},
status=status.HTTP_400_BAD_REQUEST)
mapping, _ = compose.get_rpm_mapping(kwargs['package'])
new_mapping = ComposeRPMMapping(data=request.data)
changes = mapping.compute_changes(new_mapping)
if bool(request.query_params.get('perform', False)):
_apply_changes(request, compose.release, changes)
return Response(changes)
def bulk_update(self, *args, **kwargs):
"""
It is possible to perform bulk update on compose rpm mapping with `PUT` or `PATCH`
method. The input must be a JSON object with `package`as
keys. Values for these keys should be in the same format as `update`.
"""
return bulk_operations.bulk_update_impl(self, *args, **kwargs)
class ComposeImageView(StrictQueryParamMixin, CheckParametersMixin,
viewsets.GenericViewSet):
permission_classes = (APIPermission,)
queryset = ComposeImage.objects.none() # Required for permissions
lookup_field = 'compose_id'
lookup_value_regex = '[^/]+'
def create(self, request):
"""
Import images.
__Method__: POST
__URL__: $LINK:composeimage-list$
__Data__:
{
"release_id": string,
"composeinfo": composeinfo,
"image_manifest": image_manifest
}
__Response__:
{
"compose_id": string,
"imported images": int
}
The `composeinfo` and `image_manifest` values should be actual JSON
representation of composeinfo and image manifest, as stored in
`composeinfo.json` and `image-manifest.json` files.
You can use <a href="https://pagure.io/pungi">Pungi</a> to produce
`composeinfo.json` and `image-manifest.json`.
__Example__:
$ curl -H 'Content-Type: application/json' -X POST \\
-d "{\\"composeinfo\\": $(cat /path/to/composeinfo.json), \\
\\"image_manifest\\": $(cat /path/to/image-manifest.json), \\
\\"release_id\\": \\"release-1.0\\" }" \\
$URL:composeimage-list$
"""
data = request.data
errors = {}
fields = ['release_id', 'composeinfo', 'image_manifest']
self._check_parameters(fields, data.keys(), errors)
if errors:
return Response(status=400, data=errors)
compose_id, imported_images = lib.compose__import_images(request, data['release_id'], data['composeinfo'], data['image_manifest'])
return Response(data={'compose': compose_id, 'imported images': imported_images}, status=status.HTTP_201_CREATED)
def retrieve(self, request, **kwargs):
"""
__Method__: `GET`
__URL__: $LINK:composeimage-detail:compose_id$
This API end-point allows retrieving RPM manifest for a given compose.
It will return the exact same data as was imported.
"""
compose = get_object_or_404(Compose, compose_id=kwargs['compose_id'])
cimages = ComposeImage.objects.filter(variant_arch__variant__compose=compose)
manifest = Images()
manifest.compose.date = compose.compose_date.strftime('%Y%m%d')
manifest.compose.id = compose.compose_id
manifest.compose.respin = compose.compose_respin
manifest.compose.type = compose.compose_type.name
for cimage in cimages:
im = Image(None)
im.path = os.path.join(cimage.path.path, cimage.image.file_name)
im.arch = cimage.image.arch
im.bootable = cimage.image.bootable
im.mtime = cimage.image.mtime
im.size = cimage.image.size
im.volume_id = cimage.image.volume_id
im.type = cimage.image.image_type.name
im.format = cimage.image.image_format.name
im.arch = cimage.image.arch
im.disc_number = cimage.image.disc_number
im.disc_count = cimage.image.disc_count
im.checksums = {'sha256': cimage.image.sha256}
im.subvariant = cimage.image.subvariant
if cimage.image.md5:
im.checksums['md5'] = cimage.image.md5
if cimage.image.sha1:
im.checksums['sha1'] = cimage.image.sha1
im.implant_md5 = cimage.image.implant_md5
manifest.add(cimage.variant_arch.variant.variant_uid, cimage.variant_arch.arch.name, im)
return Response(manifest.serialize({}))
class ReleaseOverridesRPMViewSet(StrictQueryParamMixin,
mixins.ListModelMixin,
ChangeSetCreateModelMixin,
ChangeSetDestroyModelMixin,
viewsets.GenericViewSet):
"""
Create, search or delete RPM overrides for specific release. The release is
referenced by its `release_id`.
"""
serializer_class = OverrideRPMSerializer
queryset = OverrideRPM.objects.all().order_by('id')
filter_class = OverrideRPMFilter
permission_classes = (APIPermission,)
doc_create = """
__Method__: POST
__URL__: $LINK:overridesrpm-list$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
__Example__:
$ curl -H 'Content-Type: application/json' "$URL:overridesrpm-list$" \\
-X POST -d '{"variant": "Client", "arch": "x86_64", "srpm_name": "bash", \\
"rpm_name": "bash-doc", "rpm_arch": "src", "include": true, \\
"release": "release-1.0"}'
{
"id": 1,
"release": "release-1.0",
"variant": "Client",
"arch": "x86_64",
"srpm_name": "bash",
"rpm_name": "bash-doc",
"rpm_arch": "src",
"include": true,
"comment": "",
"do_not_delete": false
}
"""
doc_list = """
__Method__: GET
__URL__: $LINK:overridesrpm-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
__Example__:
$ curl -H 'Content-Type: application/json' "$URL:overridesrpm-list$"
{
"previous": null,
"next": null,
"count": 1,
"results": [{
"id": 1,
"do_not_delete": false,
"release": "release-1.0",
"variant": "Client",
"arch": "x86_64",
"srpm_name": "bash",
"rpm_name": "bash-doc",
"rpm_arch": "src",
"include": true,
"comment": ""
}]
}
"""
doc_destroy = """
Delete a particular override.
__Method__: DELETE
__URL__: $LINK:overridesrpm-detail:id$
"""
def bulk_destroy(self, *args, **kwargs):
"""
There are two ways to invoke this call. Both require a request body. In
one case you can only delete a list of specific overrides, in the other
you clear all overrides on a given release.
__Method__: DELETE
__URL__: $LINK:overridesrpm-list$
__Data__:
[id]
or
{
"release": string,
"force": bool # optional, default false
}
__Response__:
For deleting a list of specific objects there is no output.
When clearing all overrides, a list of deleted objects is returned.
__Example__:
$ curl -H 'Content-Type: application/json' "$URL:overridesrpm-list$" \\
-X DELETE -d '[1, 15, 29]'
Clearing all overrides.
$ curl -H 'Content-Type: application/json' "$URL:overridesrpm-list$" \\
-X DELETE -d '{ "release": "release-1.0", "force": false }'
[
{
"id": 1,
"do_not_delete": false,
"release": "release-1.0",
"variant": "Client",
"arch": "x86_64",
"srpm_name": "bash",
"rpm_name": "bash-magic",
"rpm_arch": "src",
"include": true,
"comment": ""
}
]
"""
data = self.request.data
if isinstance(data, dict):
keys = set(data.keys())
if keys - {'force'} != {'release'}:
return Response(status=status.HTTP_400_BAD_REQUEST,
data=["Allowed keys are release and force (optional, default false)."])
release_obj = get_object_or_404(Release, release_id=data["release"])
data = self._clear(release_obj, data)
if data:
return Response(status=status.HTTP_200_OK, data=data)
else:
return Response(status=status.HTTP_404_NOT_FOUND, data={'detail': 'Not found.'})
if isinstance(data, list):
return bulk_operations.bulk_destroy_impl(self, *args, **kwargs)
return Response(status=status.HTTP_400_BAD_REQUEST,
data=['Bulk delete expects either a list or object.'])
def _clear(self, release_obj, args):
query = Q(release=release_obj)
if not bool_from_native(args.get("force", "False")):
query &= Q(do_not_delete=False)
queryset = OverrideRPM.objects.filter(query)
result = []
for override in queryset:
serializer = self.serializer_class(override)
self.request.changeset.add('OverrideRPM', override.pk, serializer.data, 'null')
result.append(serializer.data)
queryset.delete()
return result
class OverridesRPMCloneViewSet(StrictQueryParamMixin, viewsets.GenericViewSet):
permission_classes = (APIPermission,)
queryset = OverrideRPM.objects.none()
def create(self, request):
"""
Clone overrides-rpm from source-release to target-release, both them have to be existed.
With optional arguments, each optional argument specifies which type of overrides get copied.
And if overrides-rpm have exited in target-release, they don't get copied.
__Method__: POST
__URL__: $LINK:overridesrpmclone-list$
__DATA__:
{
"source_release_id": string
"target_release_id": string
"rpm_name": string #optional
"srpm_name": string #optional
"variant": string #optional
"arch": string #optional
}
__Response__:
[
{
"arch": "string",
"comment (optional, default=\"\")": "string",
"do_not_delete (optional, default=false)": "boolean",
"id (read-only)": "int",
"include": "boolean",
"release": "Release.release_id",
"rpm_arch": "string",
"rpm_name": "string",
"srpm_name": "string",
"variant": "string"
}
]
"""
data = request.data
keys = set(['source_release_id', 'target_release_id'])
arg_filter_map = ['variant', 'arch', 'srpm_name', 'rpm_name', 'rpm_arch']
allowed_keys = list(keys) + arg_filter_map
extra_keys = set(data.keys()) - set(allowed_keys)
if extra_keys:
return Response({'detail': '%s keys are not allowed' % list(extra_keys)},
status=status.HTTP_400_BAD_REQUEST)
for key in keys:
if key not in data:
return Response({'detail': 'Missing %s' % key},
status=status.HTTP_400_BAD_REQUEST)
tmp_release = {}
for key in keys:
try:
tmp_release[key] = Release.objects.get(release_id=data.pop(key))
except Release.DoesNotExist:
return Response({'detail': '%s does not exist' % key},
status=status.HTTP_404_NOT_FOUND)
kwargs = {'release__release_id': tmp_release['source_release_id'].release_id}
for arg in arg_filter_map:
arg_data = request.data.get(arg)
if arg_data:
kwargs[arg] = arg_data
overrides_rpm = OverrideRPM.objects.filter(**kwargs)
if not overrides_rpm:
return Response({'detail': 'there is no overrides-rpm in source release'},
status=status.HTTP_400_BAD_REQUEST)
results = []
for rpm in overrides_rpm:
orpm, created = OverrideRPM.objects.get_or_create(release=tmp_release['target_release_id'],
rpm_name=rpm.rpm_name,
rpm_arch=rpm.rpm_arch,
variant=rpm.variant,
arch=rpm.arch,
srpm_name=rpm.srpm_name,
comment=rpm.comment
)
if created:
results.append(orpm.export())
request.changeset.add('OverridesRPM', orpm.pk,
'null', json.dumps(orpm.export()))
if results:
return Response(status=status.HTTP_201_CREATED, data=results)
else:
return Response({'detail': 'overridesRPMs have existed in target release'},
status=status.HTTP_200_OK)
class FilterBugzillaProductsAndComponents(StrictQueryParamMixin,
viewsets.ReadOnlyModelViewSet):
"""
This API endpoint allows listing bugzilla products and components with RPM's nvr.
"""
queryset = ComposeRPM.objects.none() # Required for permissions
extra_query_params = ('nvr', )
permission_classes = (APIPermission,)
def list(self, request):
"""
__Method__: GET
__URL__: $LINK:bugzilla-list$
__Query params__:
The `nvr` is always required.
%(FILTERS)s
__Response__:
[
{
"bugzilla_component": [
string,
...
],
"bugzilla_product": string
},
...
]
__Example__:
$ curl -H 'Content-Type: application/json' "$URL:bugzilla-list$?nvr=GConf2-3.2.6-8.el7"
[
{
"bugzilla_component": [
"GConf1", "GConf2"
],
"bugzilla_product": "Fedora"
}
]
"""
nvr = request.query_params.get('nvr', None)
if nvr is None:
return Response(status=status.HTTP_400_BAD_REQUEST, data={'detail': 'The nvr is required.'})
try:
result = lib.find_bugzilla_products_and_components_with_rpm_nvr(nvr)
except ValueError, ex:
return Response(status=status.HTTP_404_NOT_FOUND, data={'detail': str(ex)})
else:
return Response(status=status.HTTP_200_OK, data=result)
class FindComposeMixin(object):
def _filter_by_compose_type(self, qs):
if self.included_compose_type:
qs = qs.filter(compose_type__name=self.included_compose_type)
if self.excluded_compose_type:
qs = qs.exclude(compose_type__name=self.excluded_compose_type)
return qs
def _get_composes_for_release(self):
result = []
composes = Compose.objects.filter(release__release_id=self.release_id, deleted=False)
composes = self._filter_by_compose_type(composes)
result = self._get_result(composes, result)
return result
def _get_composes_for_product_version(self):
result = []
all_composes = []
releases = Release.objects.filter(product_version__product_version_id=self.product_version)
for release in releases:
composes = Compose.objects.filter(release=release, deleted=False)
composes = self._filter_by_compose_type(composes)
all_composes.extend(composes)
result = self._get_result(all_composes, result)
return result
def _get_result(self, composes, result):
if self.latest:
compose = max(composes) if composes else None
if compose:
self._construct_result(compose, result)
else:
for compose in sorted(composes):
self._construct_result(compose, result)
return result
def _construct_result(self, compose, result):
rpms = compose.get_rpms(self.rpm_name)
result.append({'compose': compose.compose_id,
'packages': self._packages_output(rpms)})
return result
def _packages_output(self, rpms):
"""
Output packages with unicode or dict
"""
packages = [unicode(rpm) for rpm in rpms]
return (packages
if not self.to_dict
else [RPMSerializer(rpm, exclude_fields=['dependencies']).data for rpm in rpms])
def _get_query_param_or_false(self, request, query_str):
value = request.query_params.get(query_str)
if value:
value = convert_str_to_bool(value)
else:
value = False
setattr(self, query_str, value)
def _get_older_compose(self):
compose = get_object_or_404(Compose, compose_id=self.compose_id)
current_rpms = set(r.sort_key for r in compose.get_rpms(self.rpm_name))
# Find older composes for same release (not including this one)
composes = (Compose.objects
.exclude(deleted=True)
# Get only older composes
.exclude(compose_date__gt=compose.compose_date)
# Only composes in the same product
.filter(release__short=compose.release.short)
# Which have the requested rpm in some version
.filter(variant__variantarch__composerpm__rpm__name=self.rpm_name)
# Keep only composes from the release that requested
# compose belongs to, or GA releases. This way, after R-1.1
# it goes to R-1.0, but not R-1.0-updates.
.filter(Q(release__release_type__short='ga') | Q(release=compose.release))
.exclude(id=compose.id)
.distinct())
composes = self._filter_by_compose_type(composes)
latest = None
for compose in sorted(composes, reverse=True):
rpms = compose.get_rpms(self.rpm_name)
# Does compose have a version not in current compose?
if set(r.sort_key for r in rpms) - current_rpms:
latest = compose
break
if not latest:
raise Http404('No older compose with earlier version of RPM')
return {
'compose': latest.compose_id,
'packages': self._packages_output(rpms)
}
class FindComposeByReleaseRPMViewSet(StrictQueryParamMixin, FindComposeMixin, viewsets.GenericViewSet):
"""
This API endpoint allows finding all composes that contain the package
(and include its version) for a given release and srpm_name
"""
queryset = ComposeRPM.objects.none() # Required for permissions
extra_query_params = ('included_compose_type', 'excluded_compose_type', 'latest', 'to_dict')
permission_classes = (APIPermission,)
def list(self, request, **kwargs):
"""
This method allows listing all (compose, package) pairs for a given
release and RPM name.
The ordering of composes is performed by the *productmd* library. It
first compares compose date, then compose type
(`test` < `nightly` < `production`) and lastly respin.
`latest` is optional parameter. If it is provided, and the value is True, it will
return a single pair with the latest compose and its version of the packages.
`to_dict` is optional parameter, accepted values (True, 'true', 't', 'True', '1'),
or (False, 'false', 'f', 'False', '0'). If it is provided, and the value is True,
packages' format will be as a dict.
__Method__: GET
__URL__: $LINK:findcomposebyrr-list:release_id:rpm_name$
__Query params__:
%(FILTERS)s
__Response__:
[
{
"compose": string,
"packages": [string]
},
...
]
The list is sorted by compose: oldest first.
"""
self.included_compose_type = request.query_params.get('included_compose_type')
self.excluded_compose_type = request.query_params.get('excluded_compose_type')
self._get_query_param_or_false(request, 'latest')
self._get_query_param_or_false(request, 'to_dict')
self.release_id = kwargs.get('release_id')
self.rpm_name = kwargs.get('rpm_name')
return Response(self._get_composes_for_release())
class FindOlderComposeByComposeRPMViewSet(StrictQueryParamMixin, FindComposeMixin, viewsets.GenericViewSet):
"""
This API endpoint allows finding the latest compose older than specified compose
which contains a different version of the specified package.
"""
queryset = ComposeRPM.objects.none() # Required for permissions
extra_query_params = ('included_compose_type', 'excluded_compose_type', 'to_dict')
permission_classes = (APIPermission,)
def list(self, request, **kwargs):
"""
This method is to find the latest compose older than specified compose
which contains a different version of the specified package when given a
compose and a package.
The ordering of composes is performed by the *productmd* library. It
first compares compose date, then compose type
(`test` < `nightly` < `production`) and lastly respin.
This method will find the latest one according to above sequence.
`to_dict` is optional parameter, accepted values (True, 'true', 't', 'True', '1'),
or (False, 'false', 'f', 'False', '0'). If it is provided, and the value is True,
packages' format will be as a dict.
__Method__: GET
__URL__: $LINK:findoldercomposebycr-list:compose_id:rpm_name$
__Query params__:
%(FILTERS)s
__Response__:
[
{
"compose": string,
"packages": [string]
},
...
]
The list is sorted by compose: oldest first.
"""
self.included_compose_type = request.query_params.get('included_compose_type')
self.excluded_compose_type = request.query_params.get('excluded_compose_type')
self._get_query_param_or_false(request, 'to_dict')
self.compose_id = kwargs.get('compose_id')
self.rpm_name = kwargs.get('rpm_name')
return Response(self._get_older_compose())
class FindComposeByProductVersionRPMViewSet(StrictQueryParamMixin, FindComposeMixin, viewsets.GenericViewSet):
"""
This API endpoint allows finding all composes that contain the package
(and include its version) for a given product_version and srpm_name
"""
queryset = ComposeRPM.objects.none() # Required for permissions
extra_query_params = ('included_compose_type', 'excluded_compose_type', 'latest', 'to_dict')
permission_classes = (APIPermission,)
def list(self, request, **kwargs):
"""
This method allows listing all (compose, package) pairs for a given
product_version and RPM name.
The ordering of composes is performed by the *productmd* library. It
first compares compose date, then compose type
(`test` < `nightly` < `production`) and lastly respin.
`latest` is optional parameter. If it is provided, and the value is True, it will
return a single pair with the latest compose and its version of the packages.
`to_dict` is optional parameter, accepted values (True, 'true', 't', 'True', '1'),
or (False, 'false', 'f', 'False', '0'). If it is provided, and the value is True,
packages' format will be as a dict.
__Method__: GET
__URL__: $LINK:findcomposesbypvr-list:product_version_id:rpm_name$
__Query params__:
%(FILTERS)s
__Response__:
[
{
"compose": string,
"packages": [string]
},
...
]
The list is sorted by compose: oldest first.
"""
self.included_compose_type = request.query_params.get('included_compose_type')
self.excluded_compose_type = request.query_params.get('excluded_compose_type')
self._get_query_param_or_false(request, 'latest')
self._get_query_param_or_false(request, 'to_dict')
self.product_version = kwargs.get('product_version')
self.rpm_name = kwargs.get('rpm_name')
return Response(self._get_composes_for_product_version())
class ComposeImageRTTTestViewSet(NotificationMixin,
ChangeSetUpdateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
StrictQueryParamMixin,
MultiLookupFieldMixin,
viewsets.GenericViewSet):
"""
API endpoint that allows querying compose-image RTT Test results.
##Test tools##
You can use ``curl`` in terminal, with -X _method_ (GET|POST|PATCH),
-d _data_ (a json string). or GUI plugins for
browsers, such as ``RESTClient``, ``RESTConsole``.
"""
queryset = ComposeImage.objects.select_related('variant_arch', 'image').all()
serializer_class = ComposeImageRTTTestSerializer
filter_class = ComposeImageRTTTestFilter
permission_classes = (APIPermission,)
lookup_fields = (
('variant_arch__variant__compose__compose_id', r'[^/]+'),
('variant_arch__variant__variant_uid', r'[^/]+'),
('variant_arch__arch__name', r'[^/]+'),
('image__file_name', r'[^/]+'),
)
doc_list = """
__Method__: GET
__URL__: $LINK:composeimagertttests-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
"""
doc_retrieve = """
__Method__: GET
__URL__: $LINK:composeimagertttests-detail:compose_id}/{variant_uid}/{arch}/{file_name$
__Response__:
%(SERIALIZER)s
"""
def update(self, request, *args, **kwargs):
# This method is used by bulk update and partial update, but should not
# be called directly.
if not kwargs.get('partial', False):
return self.http_method_not_allowed(request, *args, **kwargs)
if not request.data:
return NoEmptyPatchMixin.make_response()
updatable_keys = set(['test_result'])
if set(request.data.keys()) - updatable_keys:
return Response(status=status.HTTP_400_BAD_REQUEST,
data={'detail': 'Only these properties can be updated: %s'
% ', '.join(updatable_keys)})
return super(ComposeImageRTTTestViewSet, self).update(request, *args, **kwargs)
def bulk_update(self, *args, **kwargs):
"""
It is possible to perform bulk partial update on composeimagertttest with `PATCH`
method. The input must be a JSON object with composeimagertttest identifiers as
keys. Values for these keys should be in the same format as when
updating a single composeimagertttest.
"""
return bulk_operations.bulk_update_impl(self, *args, **kwargs)
def partial_update(self, request, *args, **kwargs):
"""
Only `test_result` fields can be modified by this call.
Trying to change anything else will result in 400 BAD REQUEST response.
__Method__: PATCH
__URL__: $LINK:composeimagertttests-detail:compose_id}/{variant_uid}/{arch}/{file_name$
__Data__:
{
"test_result": string
}
__Response__:
same as for retrieve
"""
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
class ComposeTreeViewSet(NotificationMixin,
ChangeSetModelMixin,
StrictQueryParamMixin,
MultiLookupFieldMixin,
viewsets.GenericViewSet):
"""
API endpoint that allows querying compose-variant-arch relevant to location.
##Test tools##
You can use ``curl`` in terminal, with -X _method_ (GET|POST|PUT|DELETE),
-d _data_ (a json string). or GUI plugins for
browsers, such as ``RESTClient``, ``RESTConsole``.
"""
queryset = ComposeTree.objects.select_related('compose', 'variant', 'arch').all()
serializer_class = ComposeTreeSerializer
filter_class = ComposeTreeFilter
permission_classes = (APIPermission,)
lookup_fields = (
('compose__compose_id', r'[^/]+'),
('variant__variant_uid', r'[^/]+'),
('arch__name', r'[^/]+'),
('location__short', r'[^/]+'),
('scheme__name', r'[^/]+'),
)
doc_list = """
__Method__: GET
__URL__: $LINK:composetreelocations-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
"""
def create(self, request, *args, **kwargs):
"""
__Method__: POST
__URL__: $LINK:composetreelocations-list$
__Data__:
%(WRITABLE_SERIALIZER)s
* *synced_content*: $LINK:contentdeliverycontentcategory-list$
All fields are required. The required architectures must already be
present in PDC. compose/variant/arch combo must exist already for CREATE.
__Response__: Same as input data.
__NOTE__:
If synced_content is omitted, all content types are filled in.
"""
data = request.data
if 'compose' not in data:
return Response({'detail': 'Missing compose'},
status=status.HTTP_400_BAD_REQUEST)
try:
get_object_or_404(Compose, compose_id=data['compose'])
except Http404:
return Response({'detail': 'Compose %s does not existed' % data['compose']},
status=status.HTTP_404_NOT_FOUND)
if 'variant' not in data:
return Response({'detail': 'Missing variant'},
status=status.HTTP_400_BAD_REQUEST)
if not request.data.get("synced_content"):
request.data["synced_content"] = ['binary', 'debug', 'source']
return super(ComposeTreeViewSet, self).create(request, *args, **kwargs)
doc_retrieve = """
__Method__: GET
__URL__: $LINK:composetreelocations-detail:compose_id}/{variant_uid}/{arch}/{location}/{scheme$
__Response__:
%(SERIALIZER)s
"""
def update(self, request, *args, **kwargs):
# This method is used by bulk update and partial update, but should not
# be called directly.
if not kwargs.get('partial', False):
return self.http_method_not_allowed(request, *args, **kwargs)
if not request.data:
return NoEmptyPatchMixin.make_response()
updatable_keys = set(['scheme', 'url', 'synced_content'])
if set(request.data.keys()) - updatable_keys:
return Response(status=status.HTTP_400_BAD_REQUEST,
data={'detail': 'Only these properties can be updated: %s'
% ', '.join(updatable_keys)})
return super(ComposeTreeViewSet, self).update(request, *args, **kwargs)
def bulk_update(self, *args, **kwargs):
"""
It is possible to perform bulk partial update on composetreelocation with `PATCH`
method. The input must be a JSON object with composetreelocation identifiers as
keys. Values for these keys should be in the same format as when
updating a single composetreelocation.
"""
return bulk_operations.bulk_update_impl(self, *args, **kwargs)
def partial_update(self, request, *args, **kwargs):
"""
Only some composetreelocation fields can be modified by this call. They are
`scheme`, `synced_content` and `url`. Trying to change anything else
will result in 400 BAD REQUEST response.
__Method__: PATCH
__URL__: $LINK:composetreelocations-detail:compose_id}/{variant_uid}/{arch}/{location}/{scheme$
__Data__:
{
"scheme": string,
"synced_content": [string],
"url": string
}
If the same content category is specified in `synced_content` multiple times, it
will be saved only once.
__Response__:
same as for retrieve
"""
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
doc_destroy = """
__Method__:
DELETE
__URL__: $LINK:composetreelocations-detail:compose_id}/{variant_uid}/{arch}/{location}/{scheme$
__Response__:
STATUS: 204 NO CONTENT
"""
class ComposeTreeRTTTestViewSet(NotificationMixin,
ChangeSetUpdateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
StrictQueryParamMixin,
MultiLookupFieldMixin,
viewsets.GenericViewSet):
"""
This API is prepared for updating the `rtt_tested_architectures` key
in $LINK:compose-list$ API.
"""
queryset = VariantArch.objects.all()
serializer_class = ComposeTreeRTTTestSerializer
filter_class = ComposeTreeRTTTestFilter
permission_classes = (APIPermission,)
lookup_fields = (
('variant__compose__compose_id', r'[^/]+'),
('variant__variant_uid', r'[^/]+'),
('arch__name', r'[^/]+'),
)
doc_list = """
__Method__: GET
__URL__: $LINK:composetreertttests-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
"""
doc_retrieve = """
__Method__: GET
__URL__: $LINK:composetreertttests-detail:compose_id}/{variant_uid}/{arch$
__Response__:
%(SERIALIZER)s
"""
doc_update = """
__Method__: PUT, PATCH
__URL__: $LINK:composetreertttests-detail:compose_id}/{variant_uid}/{arch$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
| mit | 8aedf6b4606200ebb1c4b034874889ac | 38.750722 | 142 | 0.552081 | 4.291905 | false | false | false | false |
product-definition-center/product-definition-center | pdc/scripts/create_release_components.py | 3 | 3266 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import argparse
from beanbag import BeanBagException
from pdc_client import PDCClient
RELEASES_RESOURCE = 'releases'
RPMS_RESOURCE = 'rpms'
GLOBAL_COMPONENTS_RESOURCE = 'global-components'
RELEASE_COMPONENTS_RESOURCE = 'release-components'
BATCH_NUM = 200
def _find_latest_compose_id_for_release(release):
try:
release_info = client[RELEASES_RESOURCE][release]._()
if release_info['compose_set']:
return release_info['compose_set'][-1]
except BeanBagException as e:
if e.response.status_code == 404:
return None
raise
def _find_src_rpm_names_for_compose(compose_id):
condition = {'arch': 'src', 'compose': compose_id, 'page_size': -1}
return set([item['name'] for item in client[RPMS_RESOURCE]._(**condition)])
def _bulk_insert_resource(resource_name, data_list):
for i in xrange(0, len(data_list), BATCH_NUM):
batch = data_list[i:(i + BATCH_NUM)]
client[resource_name]._(batch)
def _generate_global_components(name_set):
condition = {'page_size': -1}
existing_gc_name_set = set([item['name'] for item in client[GLOBAL_COMPONENTS_RESOURCE]._(**condition)])
non_existing_set = name_set - existing_gc_name_set
if non_existing_set:
print "About to insert %d global components." % len(non_existing_set)
_bulk_insert_resource(GLOBAL_COMPONENTS_RESOURCE, [{'name': item} for item in non_existing_set])
print "Inserted %d global components." % len(non_existing_set)
def _generate_release_components(release, name_set):
condition = {'page_size': -1, 'release': release}
existing_rc_name_set = set([item['name'] for item in client[RELEASE_COMPONENTS_RESOURCE]._(**condition)])
non_existing_set = name_set - existing_rc_name_set
if non_existing_set:
print "About to insert %d release components." % len(non_existing_set)
_bulk_insert_resource(RELEASE_COMPONENTS_RESOURCE,
[{'name': item, 'release': release, 'global_component': item}
for item in non_existing_set])
print "Inserted %d release components." % len(non_existing_set)
def main(release):
compose_id = _find_latest_compose_id_for_release(release)
if not compose_id:
print "The release %s doesn't exist or no compose in it." % release
exit(1)
srpm_name_result = _find_src_rpm_names_for_compose(compose_id)
if srpm_name_result:
_generate_global_components(srpm_name_result)
_generate_release_components(release, srpm_name_result)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create release components according release id')
parser.add_argument('-s', '--server', help='PDC instance url or shortcut.', required=True)
parser.add_argument("-r", "--release", help="release id for a release.", required=True)
options = parser.parse_args()
try:
client = PDCClient(options.server)
main(options.release)
except BeanBagException as e:
print "%d %s" % (e.response.status_code, e.response.content)
except Exception as e:
print str(e)
| mit | 63bca48bfd775faf021f45f4f8ebc05e | 36.54023 | 109 | 0.659522 | 3.561614 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/common/renderers.py | 2 | 10795 | #
# Copyright (c) 2018 Red Hat
# Licensed under The MIT License (MIT)
# https://opensource.org/licenses/MIT
#
from collections import OrderedDict
import logging
import re
import sys
import inspect
from django.conf import settings
from django.utils.encoding import smart_text
from contrib import drf_introspection
from django.urls import NoReverseMatch
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework.utils import formatting
from rest_framework.reverse import reverse
from pdc.apps.utils.utils import urldecode
from .renderers_filters import get_filters
from .renderers_serializers import get_serializer, get_writable_serializer
"""
## Writing documentation in docstrings
Docstrings of each method will be available in browsable API as documentation.
These features are available to simplify writing the comments:
* the content is formatted as Markdown
* %(HOST_NAME)s and %(API_ROOT)s macros will be replaced by host name and URL
fragment for API, respectively
* %(FILTERS)s will be replaced a by a list of available query string filters
* %(SERIALIZER)s will be replaced by a code block with details about
serializer
* %(WRITABLE_SERIALIZER)s will do the same, but without read-only fields
* $URL:route-name:arg1:arg2...$ will be replaced by absolute URL
* $LINK:route-name:arg1:...$ will be replaced by a clickable link with
relative URL pointing to the specified place; arguments for LINK will be
wrapped in braces automatically
When the URL specification can not be resolve, "BAD URL" will be displayed on
the page and details about the error will be logged to the error log.
"""
URL_SPEC_RE = re.compile(r'\$(?P<type>URL|LINK):(?P<details>[^$]+)\$')
ORDERING_STRING = """
* `ordering` (string) Comma separated list of fields for ordering results.
- To sort by a field in descending order, prefix its name with minus (e.g. `-name`).
- Use double underscores for nested field names (e.g. `parent__child` for `{"parent": {"child": ...}}`).
"""
FIELDS_STRING = """
Following filters can be used to show only specific fields. This can make
response time faster. Format is list or single value
(JSON: `{"fields": ["a","b"]}` or `{"fields": "a"}`, in URL: `?fields=a&fields=b`).
* `fields` (list | string) Fields to display (other fields will be hidden).
* `exclude_fields`: (list | string) Fields *NOT* to display (overrules `fields`).
"""
DEFAULT_DESCRIPTION = {
"list": """
__Method__: `GET`
__URL__: %(URL)s
__Query params__:
%(FILTERS)s
__Response__:
Paged list of following objects.
%(SERIALIZER)s
""",
"retrieve": """
__Method__: `GET`
__URL__: %(DETAIL_URL)s
__Response__:
%(SERIALIZER)s
""",
"create": """
__Method__: `POST`
__URL__: %(URL)s
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
""",
"bulk_create": """
__Method__: `POST`
__URL__: %(URL)s
__Data__: <code>[ <b>{Item Data}</b>, … ]</code>
__Item Data__: %(WRITABLE_SERIALIZER)s
__Response__:
List of following objects.
%(SERIALIZER)s
""",
"destroy": """
__Method__: `DELETE`
__URL__: %(DETAIL_URL)s
__Response__:
On success, HTTP status code is `204 NO CONTENT`.
""",
"bulk_destroy": """
__Method__: `DELETE`
__URL__: %(URL)s
__Data__: <code>[ %(ID)s, … ]</code>
__Response__:
On success, HTTP status code is `204 NO CONTENT`.
""",
"update": """
__Method__: `PUT`
__URL__: %(DETAIL_URL)s
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
""",
"bulk_update": """
__Method__: `PUT`, `PATCH`
__URL__: %(URL)s
__Data__: <code>{ "%(ID)s": <b>{Item Data}</b>, … }</code>
__Item Data__:
%(WRITABLE_SERIALIZER)s
All fields are required for `PUT` and optional for `PATCH`.
__Response__:
List of following objects.
%(SERIALIZER)s
""",
"partial_update": """
__Method__: `PATCH`
__URL__: %(DETAIL_URL)s
__Data__:
%(WRITABLE_SERIALIZER)s
All fields are optional.
__Response__:
List of following objects.
%(SERIALIZER)s
""",
}
def cached_by_argument_class(method):
"""
Decorator which caches result of method call by class of the first
argument.
Subsequent calls with same class of the first argument just return the
cached result.
"""
cache = {}
def wrapper(self, arg, *args, **kwargs):
cache_key = arg.__class__
if cache_key in cache:
return cache[cache_key]
result = method(self, arg, *args, **kwargs)
cache[cache_key] = result
return result
return wrapper
class ReadOnlyBrowsableAPIRenderer(BrowsableAPIRenderer):
template = "browsable_api/api.html"
methods_mapping = (
'list',
'retrieve',
'create',
'bulk_create',
'update',
'destroy',
'bulk_destroy',
'partial_update',
'bulk_update',
# Token Auth methods
'obtain',
'refresh',
)
def get_raw_data_form(self, data, view, method, request):
return None
def get_rendered_html_form(self, data, view, method, request):
return None
def get_context(self, data, accepted_media_type, renderer_context):
self.request = renderer_context['request']
super_class = super(ReadOnlyBrowsableAPIRenderer, self)
super_retval = super_class.get_context(data, accepted_media_type,
renderer_context)
if super_retval is not None:
del super_retval['put_form']
del super_retval['post_form']
del super_retval['delete_form']
del super_retval['options_form']
del super_retval['raw_data_put_form']
del super_retval['raw_data_post_form']
del super_retval['raw_data_patch_form']
del super_retval['raw_data_put_or_patch_form']
super_retval['display_edit_forms'] = False
super_retval['version'] = "1.0"
view = renderer_context['view']
super_retval['overview'] = self.get_overview(view)
return super_retval
@cached_by_argument_class
def get_overview(self, view):
overview = view.__doc__ or ''
return self.format_description(view, None, overview)
@cached_by_argument_class
def get_description(self, view, *args):
description = OrderedDict()
for method in self.methods_mapping:
func = getattr(view, method, None)
if func:
docstring = inspect.cleandoc(func.__doc__ or '')
doc_attribute = getattr(view, 'doc_' + method, None)
if doc_attribute:
docstring += '\n\n' + inspect.cleandoc(doc_attribute)
if method in DEFAULT_DESCRIPTION \
and '__URL__' not in docstring \
and '__Method__' not in docstring:
docstring += '\n\n' + inspect.cleandoc(DEFAULT_DESCRIPTION[method])
description[method] = self.format_description(view, method, docstring)
return description
def format_description(self, view, method, description):
macros = settings.BROWSABLE_DOCUMENT_MACROS
if '%(FILTERS)s' in description:
macros['FILTERS'] = get_filters(view)
# If the API has the LIST method, show ordering field info.
if 'list' == method and getattr(view, 'serializer_class', None) is not None:
macros['FILTERS'] += ORDERING_STRING
# Show fields info if applicable.
if issubclass(view.serializer_class, drf_introspection.serializers.DynamicFieldsSerializerMixin):
macros['FILTERS'] += FIELDS_STRING
if '%(SERIALIZER)s' in description:
macros['SERIALIZER'] = get_serializer(view, include_read_only=True)
if '%(WRITABLE_SERIALIZER)s' in description:
macros['WRITABLE_SERIALIZER'] = get_writable_serializer(view, method)
if '%(URL)s' in description:
macros['URL'] = get_url(view, 'list')
if '%(DETAIL_URL)s' in description:
macros['DETAIL_URL'] = get_url(view, 'detail')
if '%(ID)s' in description:
macros['ID'] = '{%s}' % get_id_template(view)
if hasattr(view, 'docstring_macros'):
macros.update(view.docstring_macros)
doc = formatting.dedent(description)
doc = doc % macros
doc = self.substitute_urls(view, method, doc)
doc = smart_text(doc)
doc = formatting.markup_description(doc)
return doc
def substitute_urls(self, view, method, text):
def replace_url(match):
type = match.groupdict()['type']
parts = match.groupdict()['details'].split(':')
url_name = parts[0]
args = parts[1:]
if type == 'LINK':
args = ['{%s}' % arg for arg in args]
try:
if type == 'LINK':
url = reverse(url_name, args=args)
return '[`%s`](%s)' % (urldecode(url), url)
return reverse(url_name, args=args, request=self.request)
except NoReverseMatch:
logger = logging.getLogger(__name__)
logger.error('Bad URL specifier <%s> in %s.%s'
% (match.group(0), view.__class__.__name__, method),
exc_info=sys.exc_info())
return 'BAD URL'
return URL_SPEC_RE.sub(replace_url, text)
def get_id_template(view):
if hasattr(view, 'lookup_fields'):
lookup_fields = [field for field, _ in view.lookup_fields]
return '}/{'.join(lookup_fields)
if hasattr(view, 'lookup_field'):
return view.lookup_field
return ''
def get_url(view, detail_or_list):
from django.urls import get_resolver
resolver = get_resolver(None)
viewname = '%s-%s' % (view.basename, detail_or_list)
url_template, args = resolver.reverse_dict.getlist(viewname)[1][0][0]
if len(args) == 1 and args[0] == 'composite_field':
url = url_template % {'composite_field': '{%s}' % get_id_template(view)}
else:
url = url_template % {arg: '{%s}' % arg for arg in args}
return '<a href="/%s">/%s</a>' % (url, url)
| mit | 515cb6bab733961cab6352c41592a441 | 28.097035 | 113 | 0.575174 | 3.960015 | false | false | false | false |
product-definition-center/product-definition-center | pdc/apps/osbs/tests.py | 2 | 7347 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from rest_framework.test import APITestCase
from rest_framework import status
from django.urls import reverse
from pdc.apps.common.test_utils import TestCaseWithChangeSetMixin
from . import models
from pdc.apps.component import models as component_models
class OSBSRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
'pdc/apps/release/fixtures/tests/release.json',
'pdc/apps/component/fixtures/tests/upstream.json',
'pdc/apps/component/fixtures/tests/global_component.json',
'pdc/apps/osbs/fixtures/tests/records.json',
]
@classmethod
def setUpTestData(cls):
type = component_models.ReleaseComponentType.objects.get(name='container')
type.has_osbs = True
type.save()
def test_create_component_creates_osbs(self):
response = self.client.post(reverse('releasecomponent-list'),
{'name': 'test', 'release': 'release-1.0',
'global_component': 'python', 'type': 'container'},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Two already existed in fixtures.
self.assertEqual(3, models.OSBSRecord.objects.count())
def test_create_component_with_bad_type_does_not_create_osbs(self):
response = self.client.post(reverse('releasecomponent-list'),
{'name': 'test', 'release': 'release-1.0',
'global_component': 'python', 'type': 'rpm'},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(2, models.OSBSRecord.objects.count())
def test_update_component_to_different_type_deletes_osbs(self):
response = self.client.patch(reverse('releasecomponent-detail', args=[1]),
{'type': 'rpm'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, models.OSBSRecord.objects.count())
def test_change_type_creates_osbs(self):
type = component_models.ReleaseComponentType.objects.get(name='rpm')
type.has_osbs = True
type.save()
self.assertEqual(3, models.OSBSRecord.objects.count())
def test_change_type_deletes_osbs(self):
type = component_models.ReleaseComponentType.objects.get(name='container')
type.has_osbs = False
type.save()
self.assertEqual(0, models.OSBSRecord.objects.count())
def test_list_osbs(self):
response = self.client.get(reverse('osbs-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
def test_filter_by_release(self):
response = self.client.get(reverse('osbs-list'), {'release': 'release-2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_filter_by_component_name(self):
response = self.client.get(reverse('osbs-list'), {'component_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_filter_by_autorebuild(self):
response = self.client.get(reverse('osbs-list'), {'autorebuild': True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_retrieve_osbs(self):
response = self.client.get(reverse('osbs-detail', args=['release-1.0/python27']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
{'component': {'id': 1,
'name': 'python27',
'release': 'release-1.0'},
'autorebuild': True})
def test_deleting_osbs_fails(self):
response = self.client.delete(reverse('osbs-detail', args=['release-1.0/python27']))
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(2, models.OSBSRecord.objects.count())
def test_update(self):
response = self.client.put(reverse('osbs-detail', args=['release-1.0/python27']),
{'autorebuild': False},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertFalse(response.data['autorebuild'])
self.assertNumChanges([1])
r = models.OSBSRecord.objects.get(component_id=1)
self.assertFalse(r.autorebuild)
def test_partial_update(self):
response = self.client.patch(reverse('osbs-detail', args=['release-1.0/python27']),
{'autorebuild': False},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertFalse(response.data['autorebuild'])
self.assertNumChanges([1])
r = models.OSBSRecord.objects.get(component_id=1)
self.assertFalse(r.autorebuild)
def test_can_unset_autorebuild(self):
response = self.client.patch(reverse('osbs-detail', args=['release-1.0/python27']),
{'autorebuild': None},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsNone(response.data['autorebuild'])
self.assertNumChanges([1])
r = models.OSBSRecord.objects.get(component_id=1)
self.assertIsNone(r.autorebuild)
def test_cloning_release_clones_osbs(self):
self.client.post(reverse('releaseclone-list'),
{'old_release_id': 'release-1.0', 'version': '1.1'},
format='json')
records = models.OSBSRecord.objects.filter(component__release__release_id='release-1.1')
self.assertEqual(2, len(records))
self.assertTrue(records.get(component__name='python27').autorebuild)
self.assertFalse(records.get(component__name='MySQL-python').autorebuild)
self.assertNumChanges([6]) # 1 release, 3 components, 2 osbs records
def test_update_with_wrong_key(self):
response = self.client.put(reverse('osbs-detail', args=['release-1.0/python27']),
{'autorebuild': False, 'wrongkey': True},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"detail": 'Unknown fields: "wrongkey".'})
def test_partial_update_with_wrong_key(self):
response = self.client.patch(reverse('osbs-detail', args=['release-1.0/python27']),
{'wrongkey': False},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"detail": 'Unknown fields: "wrongkey".'})
| mit | aa86e996d96890ce14f6ab21e704d2b9 | 47.655629 | 96 | 0.608412 | 3.967063 | false | true | false | false |
product-definition-center/product-definition-center | pdc/apps/auth/signals.py | 2 | 1939 | #
# Copyright (c) 2018 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
def update_resources(sender, **kwargs):
"""Updates list of resources for which permissions can be created"""
import inspect
from django.conf import settings
from django.utils.module_loading import autodiscover_modules
from pdc.apps.auth.models import ResourcePermission, ActionPermission, Resource
from pdc.apps.utils.SortedRouter import router
from pdc.apps.utils.utils import convert_method_to_action
if getattr(settings, 'SKIP_RESOURCE_CREATION', False):
# We are running tests, don't create anything
return
API_WITH_NO_PERMISSION_CONTROL = set(['auth/token', 'auth/current-user'])
# Import all routers to have list of all end-points.
autodiscover_modules('routers')
action_to_obj_dict = {}
for action in ('update', 'create', 'delete', 'read'):
action_to_obj_dict[action] = ActionPermission.objects.get(name=action)
for prefix, view_set, basename in router.registry:
if prefix in API_WITH_NO_PERMISSION_CONTROL:
continue
view_name = str(view_set)
resource_obj, created = Resource.objects.get_or_create(name=prefix,
defaults={'view': view_name})
if not created and resource_obj.view != view_name:
# Update the name of the View class
resource_obj.view = view_name
resource_obj.save()
for name, method in inspect.getmembers(view_set, predicate=inspect.ismethod):
action_name = convert_method_to_action(name.lower())
if action_name:
action_permission = action_to_obj_dict[action_name]
ResourcePermission.objects.get_or_create(resource=resource_obj,
permission=action_permission)
| mit | 6f297e1518b1930ee74afbfb0fc98067 | 41.152174 | 92 | 0.636411 | 4.252193 | false | false | false | false |
webcomics/dosage | scripts/mklanguages.py | 1 | 1323 | #!/usr/bin/env python3
# SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2020 Tobias Gruetzmacher
'''update languages.py from pycountry'''
import os
import codecs
from dosagelib.scraper import scrapers
def main():
"""Update language information in dosagelib/languages.py."""
basepath = os.path.dirname(os.path.dirname(__file__))
fn = os.path.join(basepath, 'dosagelib', 'languages.py')
with codecs.open(fn, 'w', 'utf-8') as f:
f.write('# SPDX-License-Identifier: MIT\n')
f.write('# ISO 693-1 language codes from pycountry\n')
f.write('# This file is automatically generated, DO NOT EDIT!\n')
lang = get_used_languages()
write_languages(f, lang)
def get_used_languages():
languages = {}
for scraperobj in scrapers.get():
lang = scraperobj.lang
if lang not in languages:
languages[lang] = scraperobj.language()
return languages
def write_languages(f, langs):
"""Write language information."""
f.write("Languages = {%s" % os.linesep)
for lang in sorted(langs):
f.write(" %r: %r,%s" % (lang, langs[lang], os.linesep))
f.write("}%s" % os.linesep)
if __name__ == '__main__':
main()
| mit | b2aea50bb5087e1a30e000ddec619814 | 29.767442 | 73 | 0.643235 | 3.349367 | false | false | false | false |
kivy/pyjnius | setup.py | 1 | 2955 | '''
Setup.py for creating a binary distribution.
'''
from __future__ import print_function
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
from os import environ
from os.path import dirname, join
import sys
from setup_sdist import SETUP_KWARGS
# XXX hack to be able to import jnius.env withough having build
# jnius.jnius yet, better solution welcome
syspath = sys.path[:]
sys.path.insert(0, 'jnius')
from env import get_java_setup
sys.path = syspath
def getenv(key):
'''Get value from environment and decode it.'''
val = environ.get(key)
if val is not None:
try:
return val.decode()
except AttributeError:
return val
return val
FILES = [
'jni.pxi',
'jnius_compat.pxi',
'jnius_conversion.pxi',
'jnius_export_class.pxi',
'jnius_export_func.pxi',
'jnius_jvm_android.pxi',
'jnius_jvm_desktop.pxi',
'jnius_jvm_dlopen.pxi',
'jnius_localref.pxi',
'jnius_nativetypes3.pxi',
'jnius_proxy.pxi',
'jnius.pyx',
'jnius_utils.pxi',
]
EXTRA_LINK_ARGS = []
# detect Python for android
PLATFORM = sys.platform
NDKPLATFORM = getenv('NDKPLATFORM')
if NDKPLATFORM is not None and getenv('LIBLINK'):
PLATFORM = 'android'
# detect platform
if PLATFORM == 'android':
FILES = [fn[:-3] + 'c' for fn in FILES if fn.endswith('pyx')]
JAVA=get_java_setup(PLATFORM)
assert JAVA.is_jdk(), "You need a JDK, we only found a JRE. Try setting JAVA_HOME"
def compile_native_invocation_handler(java):
'''Find javac and compile NativeInvocationHandler.java.'''
javac = java.get_javac()
source_level = '1.7'
try:
subprocess.check_call([
javac, '-target', source_level, '-source', source_level,
join('jnius', 'src', 'org', 'jnius', 'NativeInvocationHandler.java')
])
except FileNotFoundError:
subprocess.check_call([
javac.replace('"', ''), '-target', source_level, '-source', source_level,
join('jnius', 'src', 'org', 'jnius', 'NativeInvocationHandler.java')
])
compile_native_invocation_handler(JAVA)
# generate the config.pxi
with open(join(dirname(__file__), 'jnius', 'config.pxi'), 'w') as fd:
fd.write('DEF JNIUS_PLATFORM = {0!r}\n\n'.format(PLATFORM))
# pop setup.py from included files in the installed package
SETUP_KWARGS['py_modules'].remove('setup')
ext_modules = [
Extension(
'jnius', [join('jnius', x) for x in FILES],
libraries=JAVA.get_libraries(),
library_dirs=JAVA.get_library_dirs(),
include_dirs=JAVA.get_include_dirs(),
extra_link_args=EXTRA_LINK_ARGS,
)
]
for ext_mod in ext_modules:
ext_mod.cython_directives = {'language_level': 3}
# create the extension
setup(
cmdclass={'build_ext': build_ext},
ext_modules=ext_modules,
**SETUP_KWARGS
)
| mit | 91eb571a7f35e47748283e22c7b23e5c | 25.383929 | 85 | 0.653807 | 3.170601 | false | false | false | false |
webcomics/dosage | dosagelib/plugins/q.py | 1 | 1082 | # SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2022 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from ..scraper import ParserScraper
class QuantumVibe(ParserScraper):
url = 'https://www.quantumvibe.com/'
stripUrl = url + 'strip?page=%s'
firstStripUrl = stripUrl % '1'
imageSearch = '//img[contains(@src, "disppageV3?story=qv")]'
prevSearch = '//a[./img[@alt="Previous Strip"]]'
class QuestionableContent(ParserScraper):
url = 'http://www.questionablecontent.net/'
stripUrl = url + 'view.php?comic=%s'
firstStripUrl = stripUrl % '1'
imageSearch = '//img[contains(@src, "comics/")]'
prevSearch = '//a[text()="Previous"]'
help = 'Index format: n (unpadded)'
class Qwantz(ParserScraper):
url = 'http://www.qwantz.com/index.php'
stripUrl = url + '?comic=%s'
firstStripUrl = stripUrl % '1'
imageSearch = '//img[d:class("comic")]'
prevSearch = '//a[@rel="prev"]'
help = 'Index format: n'
| mit | 042ad2d95f42a8d467c37ff161ab6ddf | 32.8125 | 64 | 0.660813 | 2.956284 | false | false | false | false |
webcomics/dosage | scripts/webcomicfactory.py | 1 | 1470 | #!/usr/bin/env python3
# SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
"""
Script to get WebComicFactory comics and save the info in a JSON file for
further processing.
"""
from scriptutil import ComicListUpdater
class WebComicFactoryUpdater(ComicListUpdater):
def find_first(self, url):
data = self.get_url(url)
firstlinks = data.cssselect('a.comic-nav-first')
if not firstlinks:
print("INFO:", "No first link on »%s«, already first page?" %
(url))
return url
return firstlinks[0].attrib['href']
def collect_results(self):
"""Parse start page for supported comics."""
url = 'http://www.thewebcomicfactory.com/'
data = self.get_url(url)
for comicdiv in data.cssselect('div.ceo_thumbnail_widget'):
comicname = comicdiv.cssselect('h2')[0]
comiclink = comicdiv.cssselect('a')[0]
comicurl = comiclink.attrib['href']
name = comicname.text
if 'comic-color-key' in comicurl:
continue
comicurl = self.find_first(comicurl)
self.add_comic(name, comicurl)
def get_entry(self, name, url):
return (u"cls('%s',\n '%s')," % (name, url))
if __name__ == '__main__':
WebComicFactoryUpdater(__file__).run()
| mit | 6b5617396b0a22949b66017128198d3a | 31.622222 | 73 | 0.613079 | 3.359268 | false | false | false | false |
sendgrid/sendgrid-python | sendgrid/helpers/mail/html_content.py | 2 | 1434 | from .content import Content
from .validators import ValidateApiKey
class HtmlContent(Content):
"""HTML content to be included in your email."""
def __init__(self, content):
"""Create an HtmlContent with the specified MIME type and content.
:param content: The HTML content.
:type content: string
"""
self._content = None
self._validator = ValidateApiKey()
if content is not None:
self.content = content
@property
def mime_type(self):
"""The MIME type for HTML content.
:rtype: string
"""
return "text/html"
@property
def content(self):
"""The actual HTML content.
:rtype: string
"""
return self._content
@content.setter
def content(self, value):
"""The actual HTML content.
:param value: The actual HTML content.
:type value: string
"""
self._validator.validate_message_dict(value)
self._content = value
def get(self):
"""
Get a JSON-ready representation of this HtmlContent.
:returns: This HtmlContent, ready for use in a request body.
:rtype: dict
"""
content = {}
if self.mime_type is not None:
content["type"] = self.mime_type
if self.content is not None:
content["value"] = self.content
return content
| mit | ef93b3aca655656c62f67549a8df18f1 | 23.305085 | 74 | 0.572524 | 4.625806 | false | false | false | false |
mrkipling/maraschino | modules/couchpotato.py | 3 | 14369 | from flask import render_template, request, jsonify, json, send_file
from jinja2.filters import FILTERS
from maraschino.tools import get_setting_value, requires_auth
from maraschino import logger, app, WEBROOT
import urllib2
import StringIO
import base64
import re
def couchpotato_http():
if get_setting_value('couchpotato_https') == '1':
return 'https://'
else:
return 'http://'
def couchpotato_url():
port = get_setting_value('couchpotato_port')
url_base = get_setting_value('couchpotato_ip')
webroot = get_setting_value('couchpotato_webroot')
if port:
url_base = '%s:%s' % (url_base, port)
if webroot:
url_base = '%s/%s' % (url_base, webroot)
url = '%s/api/%s' % (url_base, get_setting_value('couchpotato_api'))
return couchpotato_http() + url
def couchpotato_url_no_api():
port = get_setting_value('couchpotato_port')
url_base = get_setting_value('couchpotato_ip')
webroot = get_setting_value('couchpotato_webroot')
if port:
url_base = '%s:%s' % (url_base, port)
if webroot:
url_base = '%s/%s' % (url_base, webroot)
return couchpotato_http() + url_base
def couchpotato_api(method, params=None, use_json=True, dev=False):
username = get_setting_value('couchpotato_user')
password = get_setting_value('couchpotato_password')
if params:
params = '/?%s' % params
else:
params = '/'
params = (params).replace(' ', '%20')
url = '%s/%s%s' % (couchpotato_url(), method, params)
req = urllib2.Request(url)
if username and password:
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
data = urllib2.urlopen(req).read()
if dev:
print url
print data
if use_json:
data = json.JSONDecoder().decode(data)
return data
def log_exception(e):
logger.log('CouchPotato :: EXCEPTION -- %s' % e, 'DEBUG')
def couchpotato_image(path):
path_pieces = re.split('\\/', path)
return '%s/xhr/couchpotato/image/%s' % (WEBROOT, path_pieces[-1])
FILTERS['cp_img'] = couchpotato_image
@app.route('/xhr/couchpotato/image/<path:url>')
def couchpotato_proxy(url):
username = get_setting_value('couchpotato_user')
password = get_setting_value('couchpotato_password')
url = '%s/file.cache/%s' % (couchpotato_url(), url)
req = urllib2.Request(url)
if username and password:
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
img = StringIO.StringIO(urllib2.urlopen(req).read())
logger.log('CouchPotato :: Fetching image from %s' % (url), 'DEBUG')
return send_file(img, mimetype='image/jpeg')
@app.route('/xhr/couchpotato/')
@app.route('/xhr/couchpotato/<status>/')
def xhr_couchpotato(status='active'):
profiles = {}
status_string = 'status=%s' % status
template = 'couchpotato.html'
if status is not 'active':
template = 'couchpotato/all.html'
try:
logger.log('CouchPotato :: Fetching "%s movies" list' % status, 'INFO')
couchpotato = couchpotato_api('movie.list', params=status_string)
except Exception as e:
log_exception(e)
couchpotato = None
logger.log('CouchPotato :: Fetching "%s movies" list (DONE)' % status, 'INFO')
if status == 'wanted' and not type(couchpotato) is list:
logger.log('CouchPotato :: Wanted movies list is empty', 'INFO')
return cp_search('There are no movies in your wanted list.')
profiles = couchpotato_api('profile.list')
for movie in couchpotato['movies']:
for profile in profiles['list']:
if profile['_id'] == movie['profile_id']:
movie['profile_label'] = profile['label']
return render_template(template,
url=couchpotato_url(),
app_link=couchpotato_url_no_api(),
couchpotato=couchpotato,
profiles=profiles,
compact_view=get_setting_value('couchpotato_compact') == '1',
)
@app.route('/xhr/couchpotato/history/')
def xhr_couchpotato_history():
unread = 0
try:
couchpotato = couchpotato_api('notification.list')
couchpotato = couchpotato['notifications']
for notification in couchpotato:
if not notification['read']:
unread = unread + 1
except Exception as e:
logger.log('CouchPotato :: Could not retrieve Couchpotato - %s' % (e), 'WARNING')
couchpotato = "empty"
return render_template('couchpotato/history.html',
couchpotato=couchpotato,
unread=unread,
)
@app.route('/xhr/couchpotato/search/')
def cp_search(message=None):
couchpotato = {}
params = False
profiles = {}
try:
query = request.args['name']
params = 'q=' + query
except:
pass
if params:
try:
logger.log('CouchPotato :: Searching for movie: %s' % (query), 'INFO')
couchpotato = couchpotato_api('movie.search', params=params)
amount = len(couchpotato['movies'])
logger.log('CouchPotato :: found %i movies for %s' % (amount, query), 'INFO')
if couchpotato['success'] and amount != 0:
couchpotato = couchpotato['movies']
try:
# logger.log('CouchPotato :: Getting quality profiles', 'INFO')
profiles = couchpotato_api('profile.list')
except Exception as e:
log_exception(e)
else:
return render_template('couchpotato/search.html', error='No movies with "%s" were found' % (query), couchpotato='results')
except Exception as e:
log_exception(e)
couchpotato = None
else:
logger.log('CouchPotato :: Loading search template', 'DEBUG')
couchpotato = None
return render_template('couchpotato/search.html',
data=couchpotato,
couchpotato='results',
profiles=profiles,
error=message
)
@app.route('/xhr/couchpotato/add_movie/<imdbid>/<title>/')
@app.route('/xhr/couchpotato/add_movie/<imdbid>/<title>/<profile>/')
def add_movie(imdbid, title, profile=False):
if profile:
params = 'identifier=%s&title=%s&profile_id=%s' % (imdbid, title, profile)
else:
params = 'identifier=%s&title=%s' % (imdbid, title)
try:
logger.log('CouchPotato :: Adding %s (%s) to wanted list' % (title, imdbid), 'INFO')
result = couchpotato_api('movie.add', params)
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/restart/')
@requires_auth
def cp_restart():
try:
logger.log('CouchPotato :: Restarting', 'INFO')
result = couchpotato_api('app.restart', use_json=False)
if 'restarting' in result:
return jsonify({'success': True})
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/available/')
@requires_auth
def cp_available():
try:
logger.log('CouchPotato :: Checking if CouchPotato is available', 'INFO')
result = couchpotato_api('app.available')
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/shutdown/')
@requires_auth
def cp_shutdown():
try:
logger.log('CouchPotato :: Shutting down', 'INFO')
result = couchpotato_api('app.shutdown', use_json=False)
if 'shutdown' in result:
return jsonify({'success': True})
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/version/')
@requires_auth
def cp_version():
try:
result = couchpotato_api('app.version')
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/profiles/')
@requires_auth
def cp_profiles():
try:
logger.log('CouchPotato :: Getting profiles', 'INFO')
result = couchpotato_api('profile.list')
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/quality/')
@requires_auth
def cp_quality():
try:
logger.log('CouchPotato :: Getting quality', 'INFO')
result = couchpotato_api('quality.list')
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/update/check/')
@requires_auth
def cp_update_check():
try:
logger.log('CouchPotato :: Getting update', 'INFO')
result = couchpotato_api('updater.check')
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/delete_movie/<id>/')
@requires_auth
def movie_delete(id):
"""
Delete a movie from list
----- Params -----
id int (comma separated) Movie ID(s) you want to delete.
delete_from string: all (default), wanted, manage Delete movie from this page
"""
try:
logger.log('CouchPotato :: Deleting movie %s' % id, 'INFO')
result = couchpotato_api('movie.delete', 'id=%s' % id)
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/refresh_movie/<id>/')
def movie_refresh(id):
"""
Refresh a movie from list
----- Params -----
id int (comma separated) Movie ID(s) you want to refresh.
"""
try:
logger.log('CouchPotato :: Refreshing movie %s' % id, 'INFO')
result = couchpotato_api('movie.refresh', 'id=%s' % id)
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/settings/')
def cp_settings():
"""
Retrieve settings from CP
"""
try:
logger.log('CouchPotato :: Retrieving settings', 'INFO')
result = couchpotato_api('settings')
logger.log('CouchPotato :: Retrieving settings (DONE)', 'INFO')
return render_template('couchpotato/settings.html',
couchpotato=result,
)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/get_movie/<id>/')
def cp_get_movie(id):
"""
Retrieve movie from CP
---- Params -----
id int (comma separated) The id of the movie
"""
try:
logger.log('CouchPotato :: Retrieving movie info', 'INFO')
result = couchpotato_api('media.get', 'id=%s' % id)
try:
logger.log('CouchPotato :: Getting quality profiles', 'INFO')
profiles = couchpotato_api('profile.list')
except Exception as e:
log_exception(e)
logger.log('CouchPotato :: Retrieving movie info (DONE)', 'INFO')
return render_template('couchpotato/info.html',
couchpotato=result,
profiles=profiles,
)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/edit_movie/<movieid>/<profileid>/')
def cp_edit_movie(movieid, profileid):
"""
Edit movie in CP
---- Params -----
movieid int (comma separated) The id of the movie
profileid int Id of the profile to go to
"""
try:
logger.log('CouchPotato :: Retrieving movie info', 'INFO')
result = couchpotato_api('movie.edit', 'id=%s&profile_id=%s' % (movieid, profileid))
if result['success']:
logger.log('CouchPotato :: Retrieving movie info (DONE)', 'INFO')
return jsonify({'success': True})
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/log/')
@app.route('/xhr/couchpotato/log/<type>/<lines>/')
def cp_log(type='all', lines=30):
"""
Edit movie in CP
---- Params -----
type <optional> all, error, info, debug Type of log
lines <optional> int Number of lines - last to first
"""
try:
logger.log('CouchPotato :: Retrieving "%s" log' % type, 'INFO')
result = couchpotato_api('logging.partial', 'type=%s&lines=%s' % (type, lines))
if result['success']:
logger.log('CouchPotato :: Retrieving "%s" log (DONE)' % type, 'INFO')
return render_template('couchpotato/log.html',
couchpotato=result,
level=type,
)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/notification/read/')
@app.route('/xhr/couchpotato/notification/read/<int:id>/')
def cp_notification_read(id=False):
"""
Mark notification as read in CP
---- Params -----
ids <optional> int Notification id - if empty will mark all notifications
"""
try:
logger.log('CouchPotato :: Marking notification "%i" as read' % id, 'INFO')
if id:
couchpotato_api('notification.markread', 'ids=%i' % id)
else:
couchpotato_api('notification.markread')
return jsonify({'success': True})
except Exception as e:
log_exception(e)
return jsonify({'success': False})
@app.route('/xhr/couchpotato/release/<action>/<id>/')
@requires_auth
def release_action(action, id):
if id.isdigit():
id = int(id)
try:
logger.log('CouchPotato :: %sing release %s' % (action.title()[:-1], id), 'INFO')
result = couchpotato_api('release.%s' % action, 'id=%s' % id)
return jsonify(result)
except Exception as e:
log_exception(e)
return jsonify({'success': False})
| mit | 85851ec8ee4b2a867d1a3ac01d6c72b0 | 29.314346 | 138 | 0.596492 | 3.660892 | false | false | false | false |
mrkipling/maraschino | lib/apscheduler/job.py | 29 | 4675 | """
Jobs represent scheduled tasks.
"""
from threading import Lock
from datetime import timedelta
from apscheduler.util import to_unicode, ref_to_obj, get_callable_name,\
obj_to_ref
class MaxInstancesReachedError(Exception):
pass
class Job(object):
"""
Encapsulates the actual Job along with its metadata. Job instances
are created by the scheduler when adding jobs, and it should not be
directly instantiated.
:param trigger: trigger that determines the execution times
:param func: callable to call when the trigger is triggered
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job (optional)
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:param coalesce: run once instead of many times if the scheduler determines
that the job should be run more than once in succession
:param max_runs: maximum number of times this job is allowed to be
triggered
:param max_instances: maximum number of concurrently running
instances allowed for this job
"""
id = None
next_run_time = None
def __init__(self, trigger, func, args, kwargs, misfire_grace_time,
coalesce, name=None, max_runs=None, max_instances=1):
if not trigger:
raise ValueError('The trigger must not be None')
if not hasattr(func, '__call__'):
raise TypeError('func must be callable')
if not hasattr(args, '__getitem__'):
raise TypeError('args must be a list-like object')
if not hasattr(kwargs, '__getitem__'):
raise TypeError('kwargs must be a dict-like object')
if misfire_grace_time <= 0:
raise ValueError('misfire_grace_time must be a positive value')
if max_runs is not None and max_runs <= 0:
raise ValueError('max_runs must be a positive value')
if max_instances <= 0:
raise ValueError('max_instances must be a positive value')
self._lock = Lock()
self.trigger = trigger
self.func = func
self.args = args
self.kwargs = kwargs
self.name = to_unicode(name or get_callable_name(func))
self.misfire_grace_time = misfire_grace_time
self.coalesce = coalesce
self.max_runs = max_runs
self.max_instances = max_instances
self.runs = 0
self.instances = 0
def compute_next_run_time(self, now):
if self.runs == self.max_runs:
self.next_run_time = None
else:
self.next_run_time = self.trigger.get_next_fire_time(now)
return self.next_run_time
def get_run_times(self, now):
"""
Computes the scheduled run times between ``next_run_time`` and ``now``.
"""
run_times = []
run_time = self.next_run_time
increment = timedelta(microseconds=1)
while ((not self.max_runs or self.runs < self.max_runs) and
run_time and run_time <= now):
run_times.append(run_time)
run_time = self.trigger.get_next_fire_time(run_time + increment)
return run_times
def add_instance(self):
self._lock.acquire()
try:
if self.instances == self.max_instances:
raise MaxInstancesReachedError
self.instances += 1
finally:
self._lock.release()
def remove_instance(self):
self._lock.acquire()
try:
assert self.instances > 0, 'Already at 0 instances'
self.instances -= 1
finally:
self._lock.release()
def __getstate__(self):
# Prevents the unwanted pickling of transient or unpicklable variables
state = self.__dict__.copy()
state.pop('instances', None)
state.pop('func', None)
state.pop('_lock', None)
state['func_ref'] = obj_to_ref(self.func)
return state
def __setstate__(self, state):
state['instances'] = 0
state['func'] = ref_to_obj(state.pop('func_ref'))
state['_lock'] = Lock()
self.__dict__ = state
def __eq__(self, other):
if isinstance(other, Job):
return self.id is not None and other.id == self.id or self is other
return NotImplemented
def __repr__(self):
return '<Job (name=%s, trigger=%s)>' % (self.name, repr(self.trigger))
def __str__(self):
return '%s (trigger: %s, next run at: %s)' % (self.name,
str(self.trigger), str(self.next_run_time))
| mit | e9079fd63e82f3e6b9bc6a0d569f153a | 33.88806 | 79 | 0.606203 | 4.108084 | false | false | false | false |
mrkipling/maraschino | modules/weather.py | 8 | 3625 | from flask import render_template
from maraschino import app, WEBROOT
from maraschino.tools import requires_auth, get_setting_value
from weatherfeed.weatherfeed import Weather
from jinja2.filters import FILTERS
import datetime
def meridian():
meridian = get_setting_value('weather_time') == '0'
return meridian
def get_time():
now = datetime.datetime.now()
if meridian():
return now.strftime('%I:%M')
else:
return now.strftime('%H:%M')
def get_date():
now = datetime.datetime.now()
return now.strftime('%A %d %B')
def weather_temp(temp):
if not temp.isdigit():
return temp
temp = int(temp)
degrees = unichr(176)
if get_setting_value('weather_use_celcius') == '1':
temp = temp - 32
temp = temp * 5
temp = temp / 9
return str(int(temp)) + degrees + 'C'
else:
return str(int(temp)) + degrees + 'F'
FILTERS['weather_temp'] = weather_temp
def weather_speed(speed):
if not speed.isdigit():
return speed
speed = int(speed)
if get_setting_value('weather_use_kilometers') == '1':
speed = speed * 1.609
return str(int(speed)) + 'kph'
else:
return str(int(speed)) + 'mph'
FILTERS['weather_speed'] = weather_speed
@app.route('/xhr/weather/')
@requires_auth
def xhr_weather():
location = get_setting_value('weather_location')
use_kilometers = get_setting_value('weather_use_kilometers') == '1'
compact_view = get_setting_value('weather_compact') == '1'
w = Weather(location, metric=False)
weather = {
'current': w.currentConditions,
'forecast': w.forecast
}
wind = int(weather['current']['wind']['degrees'])
if wind in range(0, 22) or wind in range(338, 360):
img = 'N'
elif wind in range(68, 112):
img = 'E'
elif wind in range(158, 202):
img = 'S'
elif wind in range(248, 292):
img = 'W'
elif wind in range(22, 68):
img = 'NE'
elif wind in range(112, 158):
img = 'SE'
elif wind in range(202, 248):
img = 'SW'
elif wind in range(292, 338):
img = 'NW'
wind_image = '%s/static/images/weather/%s.png' % (WEBROOT, img)
conditions = [
{
'image': 'Rain',
'conditions': ['rain', 'shower', 'drizzle']
},
{
'image': 'Thunderstorm',
'conditions': ['thunder']
},
{
'image': 'Sunny',
'conditions': ['sunny', 'clear']
},
{
'image': 'Overcast',
'conditions': ['overcast', 'cloudy']
},
{
'image': 'Snow',
'conditions': ['snow']
},
{
'image': 'Storm',
'conditions': ['storm', 'hail']
},
{
'image': 'Fog',
'conditions': ['mist', 'fog', 'smoke', 'haze']
}
]
for a in conditions:
for cond in a['conditions']:
if cond in weather['current']['type'].lower():
weather['current']['image'] = '%s/static/images/weather/%s.png' % (WEBROOT, a['image'])
for day in weather['forecast']:
if day:
if cond in day['day']['type'].lower():
day['image'] = '%s/static/images/weather/%s.png' % (WEBROOT, a['image'])
return render_template('weather.html',
compact_view=compact_view,
weather=weather,
wind_image=wind_image,
time = get_time(),
date = get_date(),
meridian = meridian()
)
| mit | d3b15c476c07ab6166560cb415c29eae | 24.173611 | 103 | 0.525517 | 3.59623 | false | false | false | false |
california-civic-data-coalition/django-calaccess-campaign-browser | calaccess_campaign_browser/models/filers.py | 3 | 8245 | from .filings import Filing
from django.db import models
from django.template.defaultfilters import slugify
from calaccess_campaign_browser.utils.models import AllCapsNameMixin
import time
class Filer(AllCapsNameMixin):
"""
An entity that files campaign finance disclosure documents.
That includes candidates for public office that have committees raising
money on their behalf (i.e. Jerry Brown) as well as Political Action
Committees (PACs) that contribute money to numerous candidates for office.
"""
name = models.CharField(max_length=255, null=True)
filer_id_raw = models.IntegerField(db_index=True)
xref_filer_id = models.CharField(
max_length=32,
null=True,
db_index=True
)
FILER_TYPE_CHOICES = (
('pac', 'PAC'),
('cand', 'Candidate'),
)
filer_type = models.CharField(
max_length=10,
choices=FILER_TYPE_CHOICES
)
PARTY_CHOICES = (
('16013', 'Americans Elect'),
('16012', 'No party preference'),
('16011', 'Unknown'),
('16010', 'Natural law'),
('16009', 'Non-partisan'),
('16008', 'Libertarian'),
('16007', 'Independent'),
('16006', 'Peace and Freedom'),
('16005', 'American Independent'),
('16004', 'Reform'),
('16003', 'Green'),
('16002', 'Republican'),
('16001', 'Democratic'),
('0', 'N/A'),
)
party = models.CharField(
max_length=255,
choices=PARTY_CHOICES,
)
STATUS_CHOICES = (
('A', 'Active'),
('ACTIVE', 'Active'),
('INACTIVE', 'Inactive'),
('N', 'Inactive'),
('P', 'Pending'),
('R', 'Revoked'),
('S', 'Suspended'),
('TERMINATED', 'Terminated'),
('W', 'Withdrawn'),
('Y', 'Active'),
)
status = models.CharField(
max_length=255,
null=True,
choices=STATUS_CHOICES
)
effective_date = models.DateField(null=True)
class Meta:
ordering = ("name",)
app_label = 'calaccess_campaign_browser'
@models.permalink
def get_absolute_url(self):
return ('filer_detail', [str(self.pk)])
@property
def slug(self):
return slugify(self.name)
@property
def real_filings(self):
return Filing.real.filter(committee__filer=self)
@property
def total_contributions(self):
summaries = [f.summary for f in self.real_filings]
summaries = [s for s in summaries if s]
return sum([
s.total_contributions for s in summaries if s.total_contributions
])
class Committee(AllCapsNameMixin):
"""
If a Candidate controls the committee, the filer is associated with the
Candidate Filer record, not the committee Filer record
But the committee Filer record can still be accessed using filer_id_raw
So candidate filers potentially link to multiple committes,
and committee filers that are not candidate controlled
link back to one, committee filer
If there's a better way I'm open to suggestions
"""
filer = models.ForeignKey('Filer')
filer_id_raw = models.IntegerField(db_index=True)
xref_filer_id = models.CharField(
max_length=32,
null=True,
db_index=True
)
name = models.CharField(max_length=255, null=True)
CMTE_TYPE_OPTIONS = (
('cand', 'Candidate'),
('pac', 'PAC'),
('linked-pac', 'Non-Candidate Committee, linked to other committees'),
)
committee_type = models.CharField(
max_length=50,
choices=CMTE_TYPE_OPTIONS,
db_index=True,
)
PARTY_CHOICES = (
('16013', 'Americans Elect'),
('16012', 'No party preference'),
('16011', 'Unknown'),
('16010', 'Natural law'),
('16009', 'Non-partisan'),
('16008', 'Libertarian'),
('16007', 'Independent'),
('16006', 'Peace and Freedom'),
('16005', 'American Independent'),
('16004', 'Reform'),
('16003', 'Green'),
('16002', 'Republican'),
('16001', 'Democratic'),
('0', 'N/A'),
)
party = models.CharField(
max_length=255,
choices=PARTY_CHOICES
)
COMMITTEE_STATUS_CHOICES = (
('', 'N/A'),
('N', 'Inactive'),
('P', 'Pending'),
('R', 'Revoked'),
('S', 'Suspended'),
('W', 'Withdrawn'),
('Y', 'Active'),
)
status = models.CharField(
max_length=255,
null=True,
choices=COMMITTEE_STATUS_CHOICES
)
LEVEL_CHOICES = (
('40501', 'Local'),
('40502', 'State'),
('40503', 'County'),
('40504', 'Multi-county'),
('40505', 'City'),
('40506', 'Federal'),
('40507', 'Superior court judge'),
('0', 'N/A'),
)
level_of_government = models.CharField(
max_length=255,
null=True,
choices=LEVEL_CHOICES
)
effective_date = models.DateField(null=True)
class Meta:
ordering = ("name",)
app_label = 'calaccess_campaign_browser'
@models.permalink
def get_absolute_url(self):
return ('committee_detail', [str(self.pk)])
def get_calaccess_url(self):
url = "http://cal-access.ss.ca.gov/Campaign/Committees/Detail.aspx?id="
return url + str(self.filer_id_raw)
@property
def filer_short_name(self):
return self.filer.short_name
@property
def real_filings(self):
return Filing.real.by_committee(self).select_related("cycle")
@property
def total_contributions(self):
return sum([
f.total_contributions for f in self.real_filings
if f.total_contributions
])
@property
def total_contributions_by_year(self):
d = {}
for f in self.real_filings:
if not f.total_contributions:
continue
try:
d[f.period.start_date.year] += f.total_contributions
except KeyError:
d[f.period.start_date.year] = f.total_contributions
return sorted(d.items(), key=lambda x: x[0], reverse=True)
@property
def total_contributions_by_cycle(self):
d = {}
for f in self.real_filings:
if not f.total_contributions:
continue
try:
d[f.cycle.name] += f.total_contributions
except KeyError:
d[f.cycle.name] = f.total_contributions
return sorted(d.items(), key=lambda x: x[0], reverse=True)
@property
def total_expenditures(self):
return sum([
f.total_expenditures for f in self.real_filings
if f.total_expenditures
])
@property
def total_expenditures_by_cycle(self):
d = {}
for f in self.real_filings:
if not f.total_expenditures:
continue
try:
d[f.cycle.name] += f.total_expenditures
except KeyError:
d[f.cycle.name] = f.total_expenditures
return sorted(d.items(), key=lambda x: x[0], reverse=True)
@property
def total_expenditures_by_year(self):
d = {}
for f in self.real_filings:
if not f.total_expenditures:
continue
try:
d[f.period.start_date.year] += f.total_expenditures
except KeyError:
d[f.period.start_date.year] = f.total_expenditures
return sorted(d.items(), key=lambda x: x[0], reverse=True)
@property
def total_cashflow_balance(self):
return self.total_contributions - self.total_expenditures
@property
def years_active(self):
filings = self.real_filings.all()
if not filings:
return None
start_filing = filings.order_by('start_date').first().start_date.year
end_filing = filings.order_by('end_date').last().end_date.year
if end_filing == int(time.strftime("%Y")):
end_filing = "Present"
if start_filing == end_filing:
return "(%s)" % end_filing
else:
return "(%s - %s)" % (start_filing, end_filing)
| mit | a708c49d58cc241c5849fc25722762ce | 29.091241 | 79 | 0.561431 | 3.630559 | false | false | false | false |
mrkipling/maraschino | maraschino/models.py | 6 | 5319 | # -*- coding: utf-8 -*-
"""Class that represent the database structure"""
from sqlalchemy import Column, Integer, String, Boolean, PickleType
from maraschino.database import Base
class Module(Base):
"""Table for one Maraschino module"""
__tablename__ = 'modules'
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True)
column = Column(Integer)
position = Column(Integer)
poll = Column(Integer)
delay = Column(Integer)
def __init__(self, name, column, position=None, poll=None, delay=None):
self.name = name
self.column = column
self.position = position
self.poll = poll
self.delay = delay
def __repr__(self):
return '<Module %r>' % (self.name)
class Setting(Base):
"""Table for one setting value"""
__tablename__ = 'settings'
id = Column(Integer, primary_key=True)
key = Column(String(100), unique=True)
value = Column(String(500))
def __init__(self, key, value=None):
self.key = key
self.value = value
def __repr__(self):
return '<Setting %r>' % (self.key)
class Application(Base):
"""Table for one application in the applications module"""
__tablename__ = 'applications'
id = Column(Integer, primary_key=True)
name = Column(String(100))
url = Column(String(1000))
description = Column(String(100))
image = Column(String(100))
position = Column(Integer)
def __init__(self, name, url, description=None, image=None, position=None):
self.name = name
self.url = url
self.description = description
self.image = image
if position == None:
self.position = highest_position(Application)
else:
self.position = position
def __repr__(self):
return '<Application %r>' % (self.name)
class Disk(Base):
"""Old diskspace module table. No longer in use."""
__tablename__ = 'disks'
id = Column(Integer, primary_key=True)
path = Column(String(500))
position = Column(Integer)
def __init__(self, path, position=None):
self.path = path
if position == None:
self.position = highest_position(Disk)
else:
self.position = position
def __repr__(self):
return '<Disk %r>' % (self.path)
class HardDisk(Base):
"""Table for one disk in the diskspace module"""
__tablename__ = 'disks2'
id = Column(Integer, primary_key=True)
data = Column(PickleType)
position = Column(Integer)
def __init__(self, data={}, position=None):
self.data = data
if position == None:
self.position = highest_position(HardDisk)
else:
self.position = position
def __repr__(self):
return '<HardDisk %r>' % (self.position)
class XbmcServer(Base):
"""Table for the XBMC server config"""
__tablename__ = 'xbmc_servers'
id = Column(Integer, primary_key=True)
label = Column(String(500))
position = Column(Integer)
hostname = Column(String(500))
port = Column(String(10))
username = Column(String(100))
password = Column(String(100))
mac_address = Column(String(100))
def __init__(self, label, position, hostname, port='8080', username=None, password=None, mac_address=None):
self.label = label
if position == None:
self.position = highest_position(Disk)
else:
self.position = position
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.mac_address = mac_address
def __repr__(self):
return '<XbmcServer %r>' % (self.label)
class Script(Base):
__tablename__ = 'scripts'
id = Column(Integer, primary_key=True)
label = Column(String(500))
script = Column(String(500))
parameters = Column(String(500))
updates = Column(Integer)
status = Column(String(500))
data = Column(PickleType)
def __init__(self, label, script, parameters=None, updates=0, status=None, data=None):
self.label = label
self.script = script
self.parameters = parameters
self.updates = updates
self.status = status
self.data = data
def __repr__(self):
return '<Script %r>' % (self.label)
class RecentlyAdded(Base):
__tablename__ = 'recently_added'
id = Column(Integer, primary_key=True)
name = Column(String(100))
data = Column(PickleType)
def __init__(self, name, data=[]):
self.name = name
self.data = data
def __repr__(self):
return '<RecentlyAdded %r>' % (self.name)
class NewznabSite(Base):
__tablename__ = 'newznab'
id = Column(Integer, primary_key=True)
name = Column(String(100))
url = Column(String(100))
apikey = Column(String(100))
def __init__(self, name, url, apikey):
self.name = name
self.url = url
self.apikey = apikey
def __repr__(self):
return '<NewznabSite %r>' % (self.name)
def highest_position(model):
highest_position = 0
items = model.query.all()
for item in items:
if item.position > highest_position:
highest_position = item.position
return highest_position + 1
| mit | 8db32392fcf27414d7accba95d5bfbad | 25.595 | 111 | 0.599549 | 3.865552 | false | false | false | false |
mrkipling/maraschino | lib/werkzeug/exceptions.py | 84 | 16350 | # -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException, e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
from werkzeug._internal import HTTP_STATUS_CODES, _get_environ
class HTTPException(Exception):
"""
Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None):
Exception.__init__(self, '%d %s' % (self.code, self.name))
if description is not None:
self.description = description
@classmethod
def wrap(cls, exception, name=None):
"""This method returns a new subclass of the exception provided that
also is a subclass of `BadRequest`.
"""
class newcls(cls, exception):
def __init__(self, arg=None, description=None):
cls.__init__(self, description)
exception.__init__(self, arg)
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
newcls.__name__ = name or cls.__name__ + exception.__name__
return newcls
@property
def name(self):
"""The status name."""
return HTTP_STATUS_CODES[self.code]
def get_description(self, environ):
"""Get the description."""
environ = _get_environ(environ)
return self.description
def get_body(self, environ):
"""Get the HTML body."""
return (
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>%(code)s %(name)s</title>\n'
'<h1>%(name)s</h1>\n'
'%(description)s\n'
) % {
'code': self.code,
'name': escape(self.name),
'description': self.get_description(environ)
}
def get_headers(self, environ):
"""Get a list of headers."""
return [('Content-Type', 'text/html')]
def get_response(self, environ):
"""Get a response object.
:param environ: the environ for the request.
:return: a :class:`BaseResponse` object or a subclass thereof.
"""
# lazily imported for various reasons. For one, we can use the exceptions
# with custom responses (testing exception instances against types) and
# so we don't ever have to import the wrappers, but also because there
# are circular dependencies when bootstrapping the module.
environ = _get_environ(environ)
from werkzeug.wrappers import BaseResponse
headers = self.get_headers(environ)
return BaseResponse(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
if 'description' in self.__dict__:
txt = self.description
else:
txt = self.name
return '%d: %s' % (self.code, txt)
def __repr__(self):
return '<%s \'%s\'>' % (self.__class__.__name__, self)
class _ProxyException(HTTPException):
"""An HTTP exception that expands renders a WSGI application on error."""
def __init__(self, response):
Exception.__init__(self, 'proxy exception for %r' % response)
self.response = response
def get_response(self, environ):
return self.response
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'<p>The browser (or proxy) sent a request that this server could '
'not understand.</p>'
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extend this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class Unauthorized(HTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'<p>The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.</p><p>In case you are allowed to request '
'the document, please check your user-id and password and try '
'again.</p>'
)
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'<p>You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.</p>'
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'<p>The requested URL was not found on the server.</p>'
'<p>If you entered the URL manually please check your spelling and '
'try again.</p>'
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
def get_description(self, environ):
m = escape(environ.get('REQUEST_METHOD', 'GET'))
return '<p>The method %s is not allowed for the requested URL.</p>' % m
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
'<p>The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.</p>'
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
'<p>The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.</p>'
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
'<p>A conflict happened while processing the request. The resource '
'might have been modified while the request was being processed.'
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
'<p>The requested URL is no longer available on this server and '
'there is no forwarding address.</p><p>If you followed a link '
'from a foreign page, please contact the author of this page.'
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
'<p>A request with this method requires a valid <code>Content-'
'Length</code> header.</p>'
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
'<p>The precondition on the request for the URL failed positive '
'evaluation.</p>'
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = (
'<p>The data value transmitted exceeds the capacity limit.</p>'
)
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
'<p>The length of the requested URL exceeds the capacity limit '
'for this server. The request cannot be processed.</p>'
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
'<p>The server does not support the media type transmitted in '
'the request.</p>'
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for a part of the file that lies beyond the end
of the file.
.. versionadded:: 0.7
"""
code = 416
description = (
'<p>The server cannot provide the requested range.'
)
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = (
'<p>The server could not meet the requirements of the Expect header'
)
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = (
'<p>This server is a teapot, not a coffee machine'
)
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
'<p>The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.</p>'
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'<p>The server does not support the action requested by the '
'browser.</p>'
)
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
'<p>The proxy server received an invalid response from an upstream '
'server.</p>'
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
'<p>The server is temporarily unable to service your request due to '
'maintenance downtime or capacity problems. Please try again '
'later.</p>'
)
default_exceptions = {}
__all__ = ['HTTPException']
def _find_exceptions():
for name, obj in globals().iteritems():
try:
if getattr(obj, 'code', None) is not None:
default_exceptions[obj.code] = obj
__all__.append(obj.__name__)
except TypeError: # pragma: no cover
continue
_find_exceptions()
del _find_exceptions
#: raised by the request functions if they were unable to decode the
#: incoming data properly.
HTTPUnicodeError = BadRequest.wrap(UnicodeError, 'HTTPUnicodeError')
class Aborter(object):
"""
When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, (int, long)):
raise _ProxyException(code)
if code not in self.mapping:
raise LookupError('no exception for %r' % code)
raise self.mapping[code](*args, **kwargs)
abort = Aborter()
#: an exception that is used internally to signal both a key error and a
#: bad request. Used by a lot of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)
# imported here because of circular dependencies of werkzeug.utils
from werkzeug.utils import escape
| mit | d111c7da7827e59133e7c8d35dd79e84 | 29.503731 | 82 | 0.635841 | 4.440521 | false | false | false | false |
mrkipling/maraschino | lib/sqlalchemy/events.py | 14 | 15648 | # sqlalchemy/events.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core event interfaces."""
from sqlalchemy import event, exc, util
engine = util.importlater('sqlalchemy', 'engine')
pool = util.importlater('sqlalchemy', 'pool')
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and :class:`.SchemaEvent`
subclasses, including :class:`.MetaData`, :class:`.Table`,
:class:`.Column`.
:class:`.MetaData` and :class:`.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`.Column` is associated
with its :class:`.Table`, when a :class:`.ForeignKeyConstraint`
is associated with a :class:`.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
See also:
:ref:`event_toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
def before_create(self, target, connection, **kw):
"""Called before CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_create(self, target, connection, **kw):
"""Called after CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_drop(self, target, connection, **kw):
"""Called before DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_drop(self, target, connection, **kw):
"""Called after DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def column_reflect(self, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`.
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`.Column` specific arguments ``info``, ``key``,
and ``quote`` can also be added to the dictionary and
will be passed to the constructor of :class:`.Column`.
Note that this event is only meaningful if either
associated with the :class:`.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for :class:`.Table`.
"""
class SchemaEventTarget(object):
"""Base class for elements that are the targets of :class:`.DDLEvents` events.
This includes :class:`.SchemaItem` as well as :class:`.SchemaType`.
"""
dispatch = event.dispatcher(DDLEvents)
def _set_parent(self, parent):
"""Associate with this SchemaEvent's parent object."""
raise NotImplementedError()
def _set_parent_with_dispatch(self, parent):
self.dispatch.before_parent_attach(self, parent)
self._set_parent(parent)
self.dispatch.after_parent_attach(self, parent)
class PoolEvents(event.Events):
"""Available events for :class:`.Pool`.
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
e.g.::
from sqlalchemy import event
def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
"handle an on checkout event"
event.listen(Pool, 'checkout', my_on_checkout)
In addition to accepting the :class:`.Pool` class and :class:`.Pool` instances,
:class:`.PoolEvents` also accepts :class:`.Engine` objects and
the :class:`.Engine` class as targets, which will be resolved
to the ``.pool`` attribute of the given engine or the :class:`.Pool`
class::
engine = create_engine("postgresql://scott:tiger@localhost/test")
# will associate with engine.pool
event.listen(engine, 'checkout', my_on_checkout)
"""
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, engine.Engine):
return pool.Pool
elif issubclass(target, pool.Pool):
return target
elif isinstance(target, engine.Engine):
return target.pool
else:
return target
def connect(self, dbapi_connection, connection_record):
"""Called once for each new DB-API connection or Pool's ``creator()``.
:param dbapi_con:
A newly connected raw DB-API connection (not a SQLAlchemy
``Connection`` wrapper).
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
"""
def first_connect(self, dbapi_connection, connection_record):
"""Called exactly once for the first DB-API connection.
:param dbapi_con:
A newly connected raw DB-API connection (not a SQLAlchemy
``Connection`` wrapper).
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
"""
def checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a connection is retrieved from the Pool.
:param dbapi_con:
A raw DB-API connection
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
:param con_proxy:
The ``_ConnectionFairy`` which manages the connection for the span of
the current checkout.
If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
"""
def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_con:
A raw DB-API connection
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
"""
class ConnectionEvents(event.Events):
"""Available events for :class:`.Connection`.
The methods here define the name of an event as well as the names of members that are passed to listener functions.
e.g.::
from sqlalchemy import event, create_engine
def before_execute(conn, clauseelement, multiparams, params):
log.info("Received statement: %s" % clauseelement)
engine = create_engine('postgresql://scott:tiger@localhost/test')
event.listen(engine, "before_execute", before_execute)
Some events allow modifiers to the listen() function.
:param retval=False: Applies to the :meth:`.before_execute` and
:meth:`.before_cursor_execute` events only. When True, the
user-defined event function must have a return value, which
is a tuple of parameters that replace the given statement
and parameters. See those methods for a description of
specific return arguments.
"""
@classmethod
def _listen(cls, target, identifier, fn, retval=False):
target._has_events = True
if not retval:
if identifier == 'before_execute':
orig_fn = fn
def wrap(conn, clauseelement, multiparams, params):
orig_fn(conn, clauseelement, multiparams, params)
return clauseelement, multiparams, params
fn = wrap
elif identifier == 'before_cursor_execute':
orig_fn = fn
def wrap(conn, cursor, statement,
parameters, context, executemany):
orig_fn(conn, cursor, statement,
parameters, context, executemany)
return statement, parameters
fn = wrap
elif retval and identifier not in ('before_execute', 'before_cursor_execute'):
raise exc.ArgumentError(
"Only the 'before_execute' and "
"'before_cursor_execute' engine "
"event listeners accept the 'retval=True' "
"argument.")
event.Events._listen(target, identifier, fn)
def before_execute(self, conn, clauseelement, multiparams, params):
"""Intercept high level execute() events."""
def after_execute(self, conn, clauseelement, multiparams, params, result):
"""Intercept high level execute() events."""
def before_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events."""
def after_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events."""
def begin(self, conn):
"""Intercept begin() events."""
def rollback(self, conn):
"""Intercept rollback() events."""
def commit(self, conn):
"""Intercept commit() events."""
def savepoint(self, conn, name=None):
"""Intercept savepoint() events."""
def rollback_savepoint(self, conn, name, context):
"""Intercept rollback_savepoint() events."""
def release_savepoint(self, conn, name, context):
"""Intercept release_savepoint() events."""
def begin_twophase(self, conn, xid):
"""Intercept begin_twophase() events."""
def prepare_twophase(self, conn, xid):
"""Intercept prepare_twophase() events."""
def rollback_twophase(self, conn, xid, is_prepared):
"""Intercept rollback_twophase() events."""
def commit_twophase(self, conn, xid, is_prepared):
"""Intercept commit_twophase() events."""
| mit | 04d8bddfb63ffc68a218cd32fef54c49 | 35.138568 | 119 | 0.619888 | 4.754786 | false | false | false | false |
mrkipling/maraschino | lib/jinja2/nodes.py | 122 | 28750 | # -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import operator
from itertools import chain, izip
from collections import deque
from jinja2.utils import Markup, MethodType, FunctionType
#: the types we support for context functions
_context_function_types = (FunctionType, MethodType)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(object):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are three major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
__metaclass__ = NodeType
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
iter(attributes).next())
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
obj = self.node.as_const(eval_ctx)
# don't evaluate context functions
args = [x.as_const(eval_ctx) for x in self.args]
if isinstance(obj, _context_function_types):
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return obj(*args, **kwargs)
except Exception:
raise Impossible()
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(unicode(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Substract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
| mit | da1402c6f9aff67a9ba767982264013c | 30.593407 | 81 | 0.600243 | 4.027739 | false | false | false | false |
mikedh/trimesh | tests/test_base.py | 1 | 3393 | """
Test the base trimesh.Trimesh object.
"""
try:
from . import generic as g
except BaseException:
import generic as g
class MeshTests(g.unittest.TestCase):
def test_vertex_neighbors(self):
m = g.trimesh.primitives.Box()
neighbors = m.vertex_neighbors
assert len(neighbors) == len(m.vertices)
elist = m.edges_unique.tolist()
for v_i, neighs in enumerate(neighbors):
for n in neighs:
assert ([v_i, n] in elist or [n, v_i] in elist)
def test_validate(self):
"""
Make sure meshes with validation work
"""
m = g.get_mesh('featuretype.STL', validate=True)
assert m.is_volume
pre_len = len(m.vertices)
pre_vol = m.volume
m.remove_unreferenced_vertices()
assert len(m.vertices) == pre_len
assert g.np.isclose(m.volume, pre_vol)
# add some unreferenced vertices
m.vertices = g.np.vstack((m.vertices, g.np.random.random((100, 3))))
assert len(m.vertices) == pre_len + 100
assert g.np.isclose(m.volume, pre_vol)
m.remove_unreferenced_vertices()
assert len(m.vertices) == pre_len
assert g.np.isclose(m.volume, pre_vol)
def test_validate_inversion(self):
"""Make sure inverted meshes are fixed by `validate=True`"""
orig_mesh = g.get_mesh("unit_cube.STL")
orig_verts = orig_mesh.vertices.copy()
orig_faces = orig_mesh.faces.copy()
orig_face_set = {tuple(row) for row in orig_faces}
inv_faces = orig_faces[:, ::-1]
inv_mesh = g.Trimesh(orig_verts, inv_faces, validate=False)
assert {tuple(row) for row in inv_mesh.faces} != orig_face_set
fixed_mesh = g.Trimesh(orig_verts, inv_faces, validate=True)
assert {tuple(row) for row in fixed_mesh.faces} == orig_face_set
def test_none(self):
"""
Make sure mesh methods don't return None or crash.
"""
# a radially symmetric mesh with units
# should have no properties that are None
mesh = g.get_mesh('tube.obj')
mesh.units = 'in'
# loop through string property names
for method in dir(mesh):
# ignore private- ish methods
if method.startswith('_'):
continue
# a string expression to evaluate
expr = 'mesh.{}'.format(method)
try:
# get the value of that expression
res = eval(expr)
except ImportError:
g.log.warning('unable to import!', exc_info=True)
continue
# shouldn't be None!
assert res is not None
# check methods in scene objects
scene = mesh.scene()
# camera will be None unless set
blacklist = ['camera']
for method in dir(scene):
# ignore private- ish methods
if method.startswith('_') or method in blacklist:
continue
# a string expression to evaluate
expr = 'scene.{}'.format(method)
# get the value of that expression
res = eval(expr)
# shouldn't be None!
if res is None:
raise ValueError('"{}" is None!!'.format(expr))
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
| mit | 1491cd10c9bd74166e8e602d9533f815 | 30.71028 | 76 | 0.563218 | 3.945349 | false | true | false | false |
mikedh/trimesh | trimesh/base.py | 1 | 97401 | """
github.com/mikedh/trimesh
----------------------------
Library for importing, exporting and doing simple operations on triangular meshes.
"""
from . import ray
from . import util
from . import units
from . import poses
from . import graph
from . import sample
from . import repair
from . import convex
from . import remesh
from . import caching
from . import inertia
from . import boolean
from . import grouping
from . import geometry
from . import permutate
from . import proximity
from . import triangles
from . import curvature
from . import smoothing # noqa
from . import comparison
from . import registration
from . import decomposition
from . import intersections
from . import transformations
from .visual import create_visual, TextureVisuals
from .exchange.export import export_mesh
from .constants import log, log_time, tol
from .scene import Scene
from .parent import Geometry3D
import copy
import warnings
import numpy as np
class Trimesh(Geometry3D):
def __init__(self,
vertices=None,
faces=None,
face_normals=None,
vertex_normals=None,
face_colors=None,
vertex_colors=None,
face_attributes=None,
vertex_attributes=None,
metadata=None,
process=True,
validate=False,
merge_tex=None,
merge_norm=None,
use_embree=True,
initial_cache=None,
visual=None,
**kwargs):
"""
A Trimesh object contains a triangular 3D mesh.
Parameters
------------
vertices : (n, 3) float
Array of vertex locations
faces : (m, 3) or (m, 4) int
Array of triangular or quad faces (triangulated on load)
face_normals : (m, 3) float
Array of normal vectors corresponding to faces
vertex_normals : (n, 3) float
Array of normal vectors for vertices
metadata : dict
Any metadata about the mesh
process : bool
if True, Nan and Inf values will be removed
immediately and vertices will be merged
validate : bool
If True, degenerate and duplicate faces will be
removed immediately, and some functions will alter
the mesh to ensure consistent results.
use_embree : bool
If True try to use pyembree raytracer.
If pyembree is not available it will automatically fall
back to a much slower rtree/numpy implementation
initial_cache : dict
A way to pass things to the cache in case expensive
things were calculated before creating the mesh object.
visual : ColorVisuals or TextureVisuals
Assigned to self.visual
"""
if initial_cache is None:
initial_cache = {}
# self._data stores information about the mesh which
# CANNOT be regenerated.
# in the base class all that is stored here is vertex and
# face information
# any data put into the store is converted to a TrackedArray
# which is a subclass of np.ndarray that provides hash and crc
# methods which can be used to detect changes in the array.
self._data = caching.DataStore()
# self._cache stores information about the mesh which CAN be
# regenerated from self._data, but may be slow to calculate.
# In order to maintain consistency
# the cache is cleared when self._data.crc() changes
self._cache = caching.Cache(
id_function=self._data.__hash__,
force_immutable=True)
self._cache.update(initial_cache)
# check for None only to avoid warning messages in subclasses
if vertices is not None:
# (n, 3) float, set of vertices
self.vertices = vertices
if faces is not None:
# (m, 3) int of triangle faces, references self.vertices
self.faces = faces
# hold visual information about the mesh (vertex and face colors)
if visual is None:
self.visual = create_visual(
face_colors=face_colors,
vertex_colors=vertex_colors,
mesh=self)
else:
self.visual = visual
# normals are accessed through setters/properties and are regenerated
# if dimensions are inconsistent, but can be set by the constructor
# to avoid a substantial number of cross products
if face_normals is not None:
self.face_normals = face_normals
# (n, 3) float of vertex normals, can be created from face normals
if vertex_normals is not None:
self.vertex_normals = vertex_normals
# embree is a much, much faster raytracer written by Intel
# if you have pyembree installed you should use it
# although both raytracers were designed to have a common API
if ray.has_embree and use_embree:
self.ray = ray.ray_pyembree.RayMeshIntersector(self)
else:
# create a ray-mesh query object for the current mesh
# initializing is very inexpensive and object is convenient to have.
# On first query expensive bookkeeping is done (creation of r-tree),
# and is cached for subsequent queries
self.ray = ray.ray_triangle.RayMeshIntersector(self)
# a quick way to get permuted versions of the current mesh
self.permutate = permutate.Permutator(self)
# convenience class for nearest point queries
self.nearest = proximity.ProximityQuery(self)
# store metadata about the mesh in a dictionary
self.metadata = dict()
# update the mesh metadata with passed metadata
if isinstance(metadata, dict):
self.metadata.update(metadata)
elif metadata is not None:
raise ValueError(
'metadata should be a dict or None, got %s' % str(metadata))
# Set the default center of mass and density
self._density = 1.0
self._center_mass = None
# store per-face and per-vertex attributes which will
# be updated when an update_faces call is made
self.face_attributes = {}
self.vertex_attributes = {}
# use update to copy items
if face_attributes is not None:
self.face_attributes.update(face_attributes)
if vertex_attributes is not None:
self.vertex_attributes.update(vertex_attributes)
# process will remove NaN and Inf values and merge vertices
# if validate, will remove degenerate and duplicate faces
if process or validate:
self.process(validate=validate,
merge_tex=merge_tex,
merge_norm=merge_norm)
# save reference to kwargs
self._kwargs = kwargs
def process(self,
validate=False,
merge_tex=None,
merge_norm=None):
"""
Do processing to make a mesh useful.
Does this by:
1) removing NaN and Inf values
2) merging duplicate vertices
If validate:
3) Remove triangles which have one edge
of their 2D oriented bounding box
shorter than tol.merge
4) remove duplicated triangles
5) ensure triangles are consistently wound
and normals face outwards
Parameters
------------
validate : bool
Remove degenerate and duplicate faces.
Returns
------------
self: trimesh.Trimesh
Current mesh
"""
# if there are no vertices or faces exit early
if self.is_empty:
return self
# avoid clearing the cache during operations
with self._cache:
self.remove_infinite_values()
self.merge_vertices(merge_tex=merge_tex,
merge_norm=merge_norm)
# if we're cleaning remove duplicate
# and degenerate faces
if validate:
self.remove_duplicate_faces()
self.remove_degenerate_faces()
self.fix_normals()
# since none of our process operations moved vertices or faces
# we can keep face and vertex normals in the cache without recomputing
# if faces or vertices have been removed, normals are validated before
# being returned so there is no danger of inconsistent dimensions
self._cache.clear(exclude={'face_normals',
'vertex_normals'})
self.metadata['processed'] = True
return self
@property
def faces(self):
"""
The faces of the mesh.
This is regarded as core information which cannot be
regenerated from cache and as such is stored in
`self._data` which tracks the array for changes and
clears cached values of the mesh altered.
Returns
----------
faces : (n, 3) int64
References for `self.vertices` for triangles.
"""
return self._data.get(
'faces', np.empty(shape=(0, 3), dtype=np.int64))
@faces.setter
def faces(self, values):
"""
Set the vertex indexes that make up triangular faces.
Parameters
--------------
values : (n, 3) int64
Indexes of self.vertices
"""
if values is None or len(values) == 0:
return self._data.data.pop('faces', None)
if not (isinstance(values, np.ndarray) and values.dtype == np.int64):
values = np.asanyarray(values, dtype=np.int64)
# automatically triangulate quad faces
if len(values.shape) == 2 and values.shape[1] != 3:
log.info('triangulating faces')
values = geometry.triangulate_quads(values)
self._data['faces'] = values
@caching.cache_decorator
def faces_sparse(self):
"""
A sparse matrix representation of the faces.
Returns
----------
sparse : scipy.sparse.coo_matrix
Has properties:
dtype : bool
shape : (len(self.vertices), len(self.faces))
"""
sparse = geometry.index_sparse(
columns=len(self.vertices),
indices=self.faces)
return sparse
@property
def face_normals(self):
"""
Return the unit normal vector for each face.
If a face is degenerate and a normal can't be generated
a zero magnitude unit vector will be returned for that face.
Returns
-----------
normals : (len(self.faces), 3) np.float64
Normal vectors of each face
"""
# check shape of cached normals
cached = self._cache['face_normals']
# get faces from datastore
if 'faces' in self._data:
faces = self._data.data['faces']
else:
faces = None
# if we have no faces exit early
if faces is None or len(faces) == 0:
return np.array([], dtype=np.int64).reshape((0, 3))
# if the shape of cached normals equals the shape of faces return
if np.shape(cached) == np.shape(faces):
return cached
log.debug('generating face normals')
# use cached triangle cross products to generate normals
# this will always return the correct shape but some values
# will be zero or an arbitrary vector if the inputs had
# a cross product below machine epsilon
normals, valid = triangles.normals(
triangles=self.triangles,
crosses=self.triangles_cross)
# if all triangles are valid shape is correct
if valid.all():
# put calculated face normals into cache manually
self._cache['face_normals'] = normals
return normals
# make a padded list of normals for correct shape
padded = np.zeros((len(self.triangles), 3),
dtype=np.float64)
padded[valid] = normals
# put calculated face normals into cache manually
self._cache['face_normals'] = padded
return padded
@face_normals.setter
def face_normals(self, values):
"""
Assign values to face normals.
Parameters
-------------
values : (len(self.faces), 3) float
Unit face normals
"""
# if nothing passed exit
if values is None:
return
# make sure candidate face normals are C-contiguous float
values = np.asanyarray(
values, order='C', dtype=np.float64)
# face normals need to correspond to faces
if len(values) == 0 or values.shape != self.faces.shape:
log.debug('face_normals incorrect shape, ignoring!')
return
# check if any values are larger than tol.merge
# don't set the normals if they are all zero
ptp = values.ptp()
if not np.isfinite(ptp):
log.debug('face_normals contain NaN, ignoring!')
return
if ptp < tol.merge:
log.debug('face_normals all zero, ignoring!')
return
# make sure the first few normals match the first few triangles
check, valid = triangles.normals(
self.vertices.view(np.ndarray)[self.faces[:20]])
compare = np.zeros((len(valid), 3))
compare[valid] = check
if not np.allclose(compare, values[:20]):
log.debug("face_normals didn't match triangles, ignoring!")
return
# otherwise store face normals
self._cache['face_normals'] = values
@property
def vertices(self):
"""
The vertices of the mesh.
This is regarded as core information which cannot be
generated from cache and as such is stored in self._data
which tracks the array for changes and clears cached
values of the mesh if this is altered.
Returns
----------
vertices : (n, 3) float
Points in cartesian space referenced by self.faces
"""
return self._data.get('vertices', np.empty(shape=(0, 3), dtype=np.float64))
@vertices.setter
def vertices(self, values):
"""
Assign vertex values to the mesh.
Parameters
--------------
values : (n, 3) float
Points in space
"""
self._data['vertices'] = np.asanyarray(
values, order='C', dtype=np.float64)
@caching.cache_decorator
def vertex_normals(self):
"""
The vertex normals of the mesh. If the normals were loaded
we check to make sure we have the same number of vertex
normals and vertices before returning them. If there are
no vertex normals defined or a shape mismatch we calculate
the vertex normals from the mean normals of the faces the
vertex is used in.
Returns
----------
vertex_normals : (n, 3) float
Represents the surface normal at each vertex.
Where n == len(self.vertices)
"""
# make sure we have faces_sparse
assert hasattr(self.faces_sparse, 'dot')
vertex_normals = geometry.weighted_vertex_normals(
vertex_count=len(self.vertices),
faces=self.faces,
face_normals=self.face_normals,
face_angles=self.face_angles)
return vertex_normals
@vertex_normals.setter
def vertex_normals(self, values):
"""
Assign values to vertex normals.
Parameters
-------------
values : (len(self.vertices), 3) float
Unit normal vectors for each vertex
"""
if values is not None:
values = np.asanyarray(values,
order='C',
dtype=np.float64)
if values.shape == self.vertices.shape:
# check to see if they assigned all zeros
if values.ptp() < tol.merge:
log.debug('vertex_normals are all zero!')
self._cache['vertex_normals'] = values
@caching.cache_decorator
def vertex_faces(self):
"""
A representation of the face indices that correspond to each vertex.
Returns
----------
vertex_faces : (n,m) int
Each row contains the face indices that correspond to the given vertex,
padded with -1 up to the max number of faces corresponding to any one vertex
Where n == len(self.vertices), m == max number of faces for a single vertex
"""
vertex_faces = geometry.vertex_face_indices(
vertex_count=len(self.vertices),
faces=self.faces,
faces_sparse=self.faces_sparse)
return vertex_faces
@caching.cache_decorator
def bounds(self):
"""
The axis aligned bounds of the faces of the mesh.
Returns
-----------
bounds : (2, 3) float or None
Bounding box with [min, max] coordinates
If mesh is empty will return None
"""
# return bounds including ONLY referenced vertices
in_mesh = self.vertices[self.referenced_vertices]
# don't crash if we have no vertices referenced
if len(in_mesh) == 0:
return None
# get mesh bounds with min and max
return np.array([in_mesh.min(axis=0),
in_mesh.max(axis=0)])
@caching.cache_decorator
def extents(self):
"""
The length, width, and height of the axis aligned
bounding box of the mesh.
Returns
-----------
extents : (3, ) float or None
Array containing axis aligned [length, width, height]
If mesh is empty returns None
"""
# if mesh is empty return None
if self.bounds is None:
return None
extents = self.bounds.ptp(axis=0)
return extents
@caching.cache_decorator
def scale(self):
"""
A metric for the overall scale of the mesh, the length of the
diagonal of the axis aligned bounding box of the mesh.
Returns
----------
scale : float
The length of the meshes AABB diagonal
"""
# if mesh is empty just return no scale
if self.extents is None:
return 1.0
# make sure we are returning python floats
scale = float((self.extents ** 2).sum() ** .5)
return scale
@caching.cache_decorator
def centroid(self):
"""
The point in space which is the average of the triangle
centroids weighted by the area of each triangle.
This will be valid even for non-watertight meshes,
unlike self.center_mass
Returns
----------
centroid : (3, ) float
The average vertex weighted by face area
"""
# use the centroid of each triangle weighted by
# the area of the triangle to find the overall centroid
try:
centroid = np.average(self.triangles_center,
weights=self.area_faces,
axis=0)
except BaseException:
# if all triangles are zero-area weights will not work
centroid = self.triangles_center.mean(axis=0)
return centroid
@property
def center_mass(self):
"""
The point in space which is the center of mass/volume.
If the current mesh is not watertight this is meaningless
garbage unless it was explicitly set.
Returns
-----------
center_mass : (3, ) float
Volumetric center of mass of the mesh
"""
center_mass = self.mass_properties['center_mass']
return center_mass
@center_mass.setter
def center_mass(self, cm):
self._center_mass = cm
self._cache.delete('mass_properties')
@property
def density(self):
"""
The density of the mesh.
Returns
-----------
density : float
The density of the mesh.
"""
density = self.mass_properties['density']
return density
@density.setter
def density(self, value):
"""
Set the density of the mesh.
Parameters
-------------
density : float
Specify the density of the mesh to be
used in inertia calculations.
"""
self._density = float(value)
self._cache.delete('mass_properties')
@property
def volume(self):
"""
Volume of the current mesh calculated using a surface
integral. If the current mesh isn't watertight this is
garbage.
Returns
---------
volume : float
Volume of the current mesh
"""
volume = self.mass_properties['volume']
return volume
@property
def mass(self):
"""
Mass of the current mesh, based on specified density and
volume. If the current mesh isn't watertight this is garbage.
Returns
---------
mass : float
Mass of the current mesh
"""
mass = self.mass_properties['mass']
return mass
@property
def moment_inertia(self):
"""
Return the moment of inertia matrix of the current mesh.
If mesh isn't watertight this is garbage.
Returns
---------
inertia : (3, 3) float
Moment of inertia of the current mesh
"""
inertia = self.mass_properties['inertia']
return inertia
@caching.cache_decorator
def principal_inertia_components(self):
"""
Return the principal components of inertia
Ordering corresponds to mesh.principal_inertia_vectors
Returns
----------
components : (3, ) float
Principal components of inertia
"""
# both components and vectors from inertia matrix
components, vectors = inertia.principal_axis(self.moment_inertia)
# store vectors in cache for later
self._cache['principal_inertia_vectors'] = vectors
return components
@property
def principal_inertia_vectors(self):
"""
Return the principal axis of inertia as unit vectors.
The order corresponds to `mesh.principal_inertia_components`.
Returns
----------
vectors : (3, 3) float
Three vectors pointing along the
principal axis of inertia directions
"""
_ = self.principal_inertia_components
return self._cache['principal_inertia_vectors']
@caching.cache_decorator
def principal_inertia_transform(self):
"""
A transform which moves the current mesh so the principal
inertia vectors are on the X,Y, and Z axis, and the centroid is
at the origin.
Returns
----------
transform : (4, 4) float
Homogeneous transformation matrix
"""
order = np.argsort(self.principal_inertia_components)[1:][::-1]
vectors = self.principal_inertia_vectors[order]
vectors = np.vstack((vectors, np.cross(*vectors)))
transform = np.eye(4)
transform[:3, :3] = vectors
transform = transformations.transform_around(
matrix=transform,
point=self.centroid)
transform[:3, 3] -= self.centroid
return transform
@caching.cache_decorator
def symmetry(self):
"""
Check whether a mesh has rotational symmetry around
an axis (radial) or point (spherical).
Returns
-----------
symmetry : None, 'radial', 'spherical'
What kind of symmetry does the mesh have.
"""
symmetry, axis, section = inertia.radial_symmetry(self)
self._cache['symmetry_axis'] = axis
self._cache['symmetry_section'] = section
return symmetry
@property
def symmetry_axis(self):
"""
If a mesh has rotational symmetry, return the axis.
Returns
------------
axis : (3, ) float
Axis around which a 2D profile was revolved to create this mesh.
"""
if self.symmetry is not None:
return self._cache['symmetry_axis']
@property
def symmetry_section(self):
"""
If a mesh has rotational symmetry return the two
vectors which make up a section coordinate frame.
Returns
----------
section : (2, 3) float
Vectors to take a section along
"""
if self.symmetry is not None:
return self._cache['symmetry_section']
@caching.cache_decorator
def triangles(self):
"""
Actual triangles of the mesh (points, not indexes)
Returns
---------
triangles : (n, 3, 3) float
Points of triangle vertices
"""
# use of advanced indexing on our tracked arrays will
# trigger a change flag which means the hash will have to be
# recomputed. We can escape this check by viewing the array.
triangles = self.vertices.view(np.ndarray)[self.faces]
return triangles
@caching.cache_decorator
def triangles_tree(self):
"""
An R-tree containing each face of the mesh.
Returns
----------
tree : rtree.index
Each triangle in self.faces has a rectangular cell
"""
tree = triangles.bounds_tree(self.triangles)
return tree
@caching.cache_decorator
def triangles_center(self):
"""
The center of each triangle (barycentric [1/3, 1/3, 1/3])
Returns
---------
triangles_center : (len(self.faces), 3) float
Center of each triangular face
"""
triangles_center = self.triangles.mean(axis=1)
return triangles_center
@caching.cache_decorator
def triangles_cross(self):
"""
The cross product of two edges of each triangle.
Returns
---------
crosses : (n, 3) float
Cross product of each triangle
"""
crosses = triangles.cross(self.triangles)
return crosses
@caching.cache_decorator
def edges(self):
"""
Edges of the mesh (derived from faces).
Returns
---------
edges : (n, 2) int
List of vertex indices making up edges
"""
edges, index = geometry.faces_to_edges(self.faces.view(np.ndarray),
return_index=True)
self._cache['edges_face'] = index
return edges
@caching.cache_decorator
def edges_face(self):
"""
Which face does each edge belong to.
Returns
---------
edges_face : (n, ) int
Index of self.faces
"""
_ = self.edges
return self._cache['edges_face']
@caching.cache_decorator
def edges_unique(self):
"""
The unique edges of the mesh.
Returns
----------
edges_unique : (n, 2) int
Vertex indices for unique edges
"""
unique, inverse = grouping.unique_rows(self.edges_sorted)
edges_unique = self.edges_sorted[unique]
# edges_unique will be added automatically by the decorator
# additional terms generated need to be added to the cache manually
self._cache['edges_unique_idx'] = unique
self._cache['edges_unique_inverse'] = inverse
return edges_unique
@caching.cache_decorator
def edges_unique_length(self):
"""
How long is each unique edge.
Returns
----------
length : (len(self.edges_unique), ) float
Length of each unique edge
"""
vector = np.subtract(*self.vertices[self.edges_unique.T])
length = util.row_norm(vector)
return length
@caching.cache_decorator
def edges_unique_inverse(self):
"""
Return the inverse required to reproduce
self.edges_sorted from self.edges_unique.
Useful for referencing edge properties:
mesh.edges_unique[mesh.edges_unique_inverse] == m.edges_sorted
Returns
----------
inverse : (len(self.edges), ) int
Indexes of self.edges_unique
"""
_ = self.edges_unique
return self._cache['edges_unique_inverse']
@caching.cache_decorator
def edges_sorted(self):
"""
Edges sorted along axis 1
Returns
----------
edges_sorted : (n, 2)
Same as self.edges but sorted along axis 1
"""
edges_sorted = np.sort(self.edges, axis=1)
return edges_sorted
@caching.cache_decorator
def edges_sorted_tree(self):
"""
A KDTree for mapping edges back to edge index.
Returns
------------
tree : scipy.spatial.cKDTree
Tree when queried with edges will return
their index in mesh.edges_sorted
"""
from scipy.spatial import cKDTree
return cKDTree(self.edges_sorted)
@caching.cache_decorator
def edges_sparse(self):
"""
Edges in sparse bool COO graph format where connected
vertices are True.
Returns
----------
sparse: (len(self.vertices), len(self.vertices)) bool
Sparse graph in COO format
"""
sparse = graph.edges_to_coo(self.edges,
count=len(self.vertices))
return sparse
@caching.cache_decorator
def body_count(self):
"""
How many connected groups of vertices exist in this mesh.
Note that this number may differ from result in mesh.split,
which is calculated from FACE rather than vertex adjacency.
Returns
-----------
count : int
Number of connected vertex groups
"""
# labels are (len(vertices), int) OB
count, labels = graph.csgraph.connected_components(
self.edges_sparse,
directed=False,
return_labels=True)
self._cache['vertices_component_label'] = labels
return count
@caching.cache_decorator
def faces_unique_edges(self):
"""
For each face return which indexes in mesh.unique_edges constructs
that face.
Returns
---------
faces_unique_edges : (len(self.faces), 3) int
Indexes of self.edges_unique that
construct self.faces
Examples
---------
In [0]: mesh.faces[:2]
Out[0]:
TrackedArray([[ 1, 6946, 24224],
[ 6946, 1727, 24225]])
In [1]: mesh.edges_unique[mesh.faces_unique_edges[:2]]
Out[1]:
array([[[ 1, 6946],
[ 6946, 24224],
[ 1, 24224]],
[[ 1727, 6946],
[ 1727, 24225],
[ 6946, 24225]]])
"""
# make sure we have populated unique edges
_ = self.edges_unique
# we are relying on the fact that edges are stacked in triplets
result = self._cache['edges_unique_inverse'].reshape((-1, 3))
return result
@caching.cache_decorator
def euler_number(self):
"""
Return the Euler characteristic (a topological invariant) for the mesh
In order to guarantee correctness, this should be called after
remove_unreferenced_vertices
Returns
----------
euler_number : int
Topological invariant
"""
euler = int(self.referenced_vertices.sum() -
len(self.edges_unique) +
len(self.faces))
return euler
@caching.cache_decorator
def referenced_vertices(self):
"""
Which vertices in the current mesh are referenced by a face.
Returns
-------------
referenced : (len(self.vertices), ) bool
Which vertices are referenced by a face
"""
referenced = np.zeros(len(self.vertices), dtype=bool)
referenced[self.faces] = True
return referenced
@property
def units(self):
"""
Definition of units for the mesh.
Returns
----------
units : str
Unit system mesh is in, or None if not defined
"""
if 'units' in self.metadata:
return self.metadata['units']
else:
return None
@units.setter
def units(self, value):
value = str(value).lower()
self.metadata['units'] = value
def convert_units(self, desired, guess=False):
"""
Convert the units of the mesh into a specified unit.
Parameters
------------
desired : string
Units to convert to (eg 'inches')
guess : boolean
If self.units are not defined should we
guess the current units of the document and then convert?
"""
units._convert_units(self, desired, guess)
return self
def merge_vertices(
self,
merge_tex=None,
merge_norm=None,
digits_vertex=None,
digits_norm=None,
digits_uv=None):
"""
Removes duplicate vertices grouped by position and
optionally texture coordinate and normal.
Parameters
-------------
mesh : Trimesh object
Mesh to merge vertices on
merge_tex : bool
If True textured meshes with UV coordinates will
have vertices merged regardless of UV coordinates
merge_norm : bool
If True, meshes with vertex normals will have
vertices merged ignoring different normals
digits_vertex : None or int
Number of digits to consider for vertex position
digits_norm : int
Number of digits to consider for unit normals
digits_uv : int
Number of digits to consider for UV coordinates
"""
grouping.merge_vertices(
mesh=self,
merge_tex=merge_tex,
merge_norm=merge_norm,
digits_vertex=digits_vertex,
digits_norm=digits_norm,
digits_uv=digits_uv)
def update_vertices(self, mask, inverse=None):
"""
Update vertices with a mask.
Parameters
------------
vertex_mask : (len(self.vertices)) bool
Array of which vertices to keep
inverse : (len(self.vertices)) int
Array to reconstruct vertex references
such as output by np.unique
"""
# if the mesh is already empty we can't remove anything
if self.is_empty:
return
# make sure mask is a numpy array
mask = np.asanyarray(mask)
if ((mask.dtype.name == 'bool' and mask.all()) or
len(mask) == 0 or self.is_empty):
# mask doesn't remove any vertices so exit early
return
# create the inverse mask if not passed
if inverse is None:
inverse = np.zeros(len(self.vertices), dtype=np.int64)
if mask.dtype.kind == 'b':
inverse[mask] = np.arange(mask.sum())
elif mask.dtype.kind == 'i':
inverse[mask] = np.arange(len(mask))
else:
inverse = None
# re-index faces from inverse
if inverse is not None and util.is_shape(self.faces, (-1, 3)):
self.faces = inverse[self.faces.reshape(-1)].reshape((-1, 3))
# update the visual object with our mask
self.visual.update_vertices(mask)
# get the normals from cache before dumping
cached_normals = self._cache['vertex_normals']
# apply to face_attributes
count = len(self.vertices)
for key, value in self.vertex_attributes.items():
try:
# covers un-len'd objects as well
if len(value) != count:
raise TypeError()
except TypeError:
continue
# apply the mask to the attribute
self.vertex_attributes[key] = value[mask]
# actually apply the mask
self.vertices = self.vertices[mask]
# if we had passed vertex normals try to save them
if util.is_shape(cached_normals, (-1, 3)):
try:
self.vertex_normals = cached_normals[mask]
except BaseException:
pass
def update_faces(self, mask):
"""
In many cases, we will want to remove specific faces.
However, there is additional bookkeeping to do this cleanly.
This function updates the set of faces with a validity mask,
as well as keeping track of normals and colors.
Parameters
------------
valid : (m) int or (len(self.faces)) bool
Mask to remove faces
"""
# if the mesh is already empty we can't remove anything
if self.is_empty:
return
mask = np.asanyarray(mask)
if mask.dtype.name == 'bool' and mask.all():
# mask removes no faces so exit early
return
# try to save face normals before dumping cache
cached_normals = self._cache['face_normals']
faces = self._data['faces']
# if Trimesh has been subclassed and faces have been moved
# from data to cache, get faces from cache.
if not util.is_shape(faces, (-1, 3)):
faces = self._cache['faces']
# apply to face_attributes
count = len(self.faces)
for key, value in self.face_attributes.items():
try:
# covers un-len'd objects as well
if len(value) != count:
raise TypeError()
except TypeError:
continue
# apply the mask to the attribute
self.face_attributes[key] = value[mask]
# actually apply the mask
self.faces = faces[mask]
# apply to face colors
self.visual.update_faces(mask)
# if our normals were the correct shape apply them
if util.is_shape(cached_normals, (-1, 3)):
self.face_normals = cached_normals[mask]
def remove_infinite_values(self):
"""
Ensure that every vertex and face consists of finite numbers.
This will remove vertices or faces containing np.nan and np.inf
Alters `self.faces` and `self.vertices`
"""
if util.is_shape(self.faces, (-1, 3)):
# (len(self.faces), ) bool, mask for faces
face_mask = np.isfinite(self.faces).all(axis=1)
self.update_faces(face_mask)
if util.is_shape(self.vertices, (-1, 3)):
# (len(self.vertices), ) bool, mask for vertices
vertex_mask = np.isfinite(self.vertices).all(axis=1)
self.update_vertices(vertex_mask)
def remove_duplicate_faces(self):
"""
On the current mesh remove any faces which are duplicates.
Alters `self.faces` to remove duplicate faces
"""
unique, inverse = grouping.unique_rows(np.sort(self.faces, axis=1))
self.update_faces(unique)
def rezero(self):
"""
Translate the mesh so that all vertex vertices are positive.
Alters `self.vertices`.
"""
self.apply_translation(self.bounds[0] * -1.0)
@log_time
def split(self, **kwargs):
"""
Returns a list of Trimesh objects, based on face connectivity.
Splits into individual components, sometimes referred to as 'bodies'
Parameters
------------
only_watertight : bool
Only return watertight meshes and discard remainder
adjacency : None or (n, 2) int
Override face adjacency with custom values
Returns
---------
meshes : (n, ) trimesh.Trimesh
Separate bodies from original mesh
"""
return graph.split(self, **kwargs)
@caching.cache_decorator
def face_adjacency(self):
"""
Find faces that share an edge i.e. 'adjacent' faces.
Returns
----------
adjacency : (n, 2) int
Pairs of faces which share an edge
Examples
---------
In [1]: mesh = trimesh.load('models/featuretype.STL')
In [2]: mesh.face_adjacency
Out[2]:
array([[ 0, 1],
[ 2, 3],
[ 0, 3],
...,
[1112, 949],
[3467, 3475],
[1113, 3475]])
In [3]: mesh.faces[mesh.face_adjacency[0]]
Out[3]:
TrackedArray([[ 1, 0, 408],
[1239, 0, 1]], dtype=int64)
In [4]: import networkx as nx
In [5]: graph = nx.from_edgelist(mesh.face_adjacency)
In [6]: groups = nx.connected_components(graph)
"""
adjacency, edges = graph.face_adjacency(
mesh=self, return_edges=True)
self._cache['face_adjacency_edges'] = edges
return adjacency
@caching.cache_decorator
def face_neighborhood(self):
"""
Find faces that share a vertex i.e. 'neighbors' faces.
Returns
----------
neighborhood : (n, 2) int
Pairs of faces which share a vertex
"""
return graph.face_neighborhood(self)
@caching.cache_decorator
def face_adjacency_edges(self):
"""
Returns the edges that are shared by the adjacent faces.
Returns
--------
edges : (n, 2) int
Vertex indices which correspond to face_adjacency
"""
# this value is calculated as a byproduct of the face adjacency
_ = self.face_adjacency
return self._cache['face_adjacency_edges']
@caching.cache_decorator
def face_adjacency_edges_tree(self):
"""
A KDTree for mapping edges back face adjacency index.
Returns
------------
tree : scipy.spatial.cKDTree
Tree when queried with SORTED edges will return
their index in mesh.face_adjacency
"""
from scipy.spatial import cKDTree
return cKDTree(self.face_adjacency_edges)
@caching.cache_decorator
def face_adjacency_angles(self):
"""
Return the angle between adjacent faces
Returns
--------
adjacency_angle : (n, ) float
Angle between adjacent faces
Each value corresponds with self.face_adjacency
"""
# get pairs of unit vectors for adjacent faces
pairs = self.face_normals[self.face_adjacency]
# find the angle between the pairs of vectors
angles = geometry.vector_angle(pairs)
return angles
@caching.cache_decorator
def face_adjacency_projections(self):
"""
The projection of the non-shared vertex of a triangle onto
its adjacent face
Returns
----------
projections : (len(self.face_adjacency), ) float
Dot product of vertex
onto plane of adjacent triangle.
"""
projections = convex.adjacency_projections(self)
return projections
@caching.cache_decorator
def face_adjacency_convex(self):
"""
Return faces which are adjacent and locally convex.
What this means is that given faces A and B, the one vertex
in B that is not shared with A, projected onto the plane of A
has a projection that is zero or negative.
Returns
----------
are_convex : (len(self.face_adjacency), ) bool
Face pairs that are locally convex
"""
are_convex = self.face_adjacency_projections < tol.merge
return are_convex
@caching.cache_decorator
def face_adjacency_unshared(self):
"""
Return the vertex index of the two vertices not in the shared
edge between two adjacent faces
Returns
-----------
vid_unshared : (len(mesh.face_adjacency), 2) int
Indexes of mesh.vertices
"""
vid_unshared = graph.face_adjacency_unshared(self)
return vid_unshared
@caching.cache_decorator
def face_adjacency_radius(self):
"""
The approximate radius of a cylinder that fits inside adjacent faces.
Returns
------------
radii : (len(self.face_adjacency), ) float
Approximate radius formed by triangle pair
"""
radii, span = graph.face_adjacency_radius(mesh=self)
self._cache['face_adjacency_span'] = span
return radii
@caching.cache_decorator
def face_adjacency_span(self):
"""
The approximate perpendicular projection of the non-shared
vertices in a pair of adjacent faces onto the shared edge of
the two faces.
Returns
------------
span : (len(self.face_adjacency), ) float
Approximate span between the non-shared vertices
"""
_ = self.face_adjacency_radius
return self._cache['face_adjacency_span']
@caching.cache_decorator
def integral_mean_curvature(self):
"""
The integral mean curvature, or the surface integral of the mean curvature.
Returns
---------
area : float
Integral mean curvature of mesh
"""
edges_length = np.linalg.norm(np.subtract(
*self.vertices[self.face_adjacency_edges.T]), axis=1)
imc = (self.face_adjacency_angles * edges_length).sum() * 0.5
return imc
@caching.cache_decorator
def vertex_adjacency_graph(self):
"""
Returns a networkx graph representing the vertices and their connections
in the mesh.
Returns
---------
graph: networkx.Graph
Graph representing vertices and edges between
them where vertices are nodes and edges are edges
Examples
----------
This is useful for getting nearby vertices for a given vertex,
potentially for some simple smoothing techniques.
mesh = trimesh.primitives.Box()
graph = mesh.vertex_adjacency_graph
graph.neighbors(0)
> [1, 2, 3, 4]
"""
adjacency_g = graph.vertex_adjacency_graph(mesh=self)
return adjacency_g
@caching.cache_decorator
def vertex_neighbors(self):
"""
The vertex neighbors of each vertex of the mesh, determined from
the cached vertex_adjacency_graph, if already existent.
Returns
----------
vertex_neighbors : (len(self.vertices), ) int
Represents immediate neighbors of each vertex along
the edge of a triangle
Examples
----------
This is useful for getting nearby vertices for a given vertex,
potentially for some simple smoothing techniques.
>>> mesh = trimesh.primitives.Box()
>>> mesh.vertex_neighbors[0]
[1, 2, 3, 4]
"""
return graph.neighbors(
edges=self.edges_unique, max_index=len(self.vertices))
@caching.cache_decorator
def is_winding_consistent(self):
"""
Does the mesh have consistent winding or not.
A mesh with consistent winding has each shared edge
going in an opposite direction from the other in the pair.
Returns
--------
consistent : bool
Is winding is consistent or not
"""
if self.is_empty:
return False
# consistent winding check is populated into the cache by is_watertight
_ = self.is_watertight
return self._cache['is_winding_consistent']
@caching.cache_decorator
def is_watertight(self):
"""
Check if a mesh is watertight by making sure every edge is
included in two faces.
Returns
----------
is_watertight : bool
Is mesh watertight or not
"""
if self.is_empty:
return False
watertight, winding = graph.is_watertight(
edges=self.edges, edges_sorted=self.edges_sorted)
self._cache['is_winding_consistent'] = winding
return watertight
@caching.cache_decorator
def is_volume(self):
"""
Check if a mesh has all the properties required to represent
a valid volume, rather than just a surface.
These properties include being watertight, having consistent
winding and outward facing normals.
Returns
---------
valid : bool
Does the mesh represent a volume
"""
valid = bool(self.is_watertight and
self.is_winding_consistent and
np.isfinite(self.center_mass).all() and
self.volume > 0.0)
return valid
@property
def is_empty(self):
"""
Does the current mesh have data defined.
Returns
--------
empty : bool
If True, no data is set on the current mesh
"""
return self._data.is_empty()
@caching.cache_decorator
def is_convex(self):
"""
Check if a mesh is convex or not.
Returns
----------
is_convex: bool
Is mesh convex or not
"""
if self.is_empty:
return False
is_convex = bool(convex.is_convex(self))
return is_convex
@caching.cache_decorator
def kdtree(self):
"""
Return a scipy.spatial.cKDTree of the vertices of the mesh.
Not cached as this lead to observed memory issues and segfaults.
Returns
---------
tree : scipy.spatial.cKDTree
Contains mesh.vertices
"""
from scipy.spatial import cKDTree
tree = cKDTree(self.vertices.view(np.ndarray))
return tree
def remove_degenerate_faces(self, height=tol.merge):
"""
Remove degenerate faces (faces without 3 unique vertex indices)
from the current mesh.
If a height is specified, it will remove any face with a 2D oriented
bounding box with one edge shorter than that height.
If not specified, it will remove any face with a zero normal.
Parameters
------------
height : float
If specified removes faces with an oriented bounding
box shorter than this on one side.
Returns
-------------
nondegenerate : (len(self.faces), ) bool
Mask used to remove faces
"""
nondegenerate = triangles.nondegenerate(
self.triangles,
areas=self.area_faces,
height=height)
self.update_faces(nondegenerate)
return nondegenerate
@caching.cache_decorator
def facets(self):
"""
Return a list of face indices for coplanar adjacent faces.
Returns
---------
facets : (n, ) sequence of (m, ) int
Groups of indexes of self.faces
"""
facets = graph.facets(self)
return facets
@caching.cache_decorator
def facets_area(self):
"""
Return an array containing the area of each facet.
Returns
---------
area : (len(self.facets), ) float
Total area of each facet (group of faces)
"""
# avoid thrashing the cache inside a loop
area_faces = self.area_faces
# sum the area of each group of faces represented by facets
# use native python sum in tight loop as opposed to array.sum()
# as in this case the lower function call overhead of
# native sum provides roughly a 50% speedup
areas = np.array([sum(area_faces[i])
for i in self.facets],
dtype=np.float64)
return areas
@caching.cache_decorator
def facets_normal(self):
"""
Return the normal of each facet
Returns
---------
normals: (len(self.facets), 3) float
A unit normal vector for each facet
"""
if len(self.facets) == 0:
return np.array([])
area_faces = self.area_faces
# the face index of the largest face in each facet
index = np.array([i[area_faces[i].argmax()]
for i in self.facets])
# (n, 3) float, unit normal vectors of facet plane
normals = self.face_normals[index]
# (n, 3) float, points on facet plane
origins = self.vertices[self.faces[:, 0][index]]
# save origins in cache
self._cache['facets_origin'] = origins
return normals
@caching.cache_decorator
def facets_origin(self):
"""
Return a point on the facet plane.
Returns
------------
origins : (len(self.facets), 3) float
A point on each facet plane
"""
_ = self.facets_normal
return self._cache['facets_origin']
@caching.cache_decorator
def facets_boundary(self):
"""
Return the edges which represent the boundary of each facet
Returns
---------
edges_boundary : sequence of (n, 2) int
Indices of self.vertices
"""
# make each row correspond to a single face
edges = self.edges_sorted.reshape((-1, 6))
# get the edges for each facet
edges_facet = [edges[i].reshape((-1, 2)) for i in self.facets]
edges_boundary = [i[grouping.group_rows(i, require_count=1)]
for i in edges_facet]
return edges_boundary
@caching.cache_decorator
def facets_on_hull(self):
"""
Find which facets of the mesh are on the convex hull.
Returns
---------
on_hull : (len(mesh.facets), ) bool
is A facet on the meshes convex hull or not
"""
# if no facets exit early
if len(self.facets) == 0:
return np.array([], dtype=bool)
# facets plane, origin and normal
normals = self.facets_normal
origins = self.facets_origin
# (n, 3) convex hull vertices
convex = self.convex_hull.vertices.view(np.ndarray).copy()
# boolean mask for which facets are on convex hull
on_hull = np.zeros(len(self.facets), dtype=bool)
for i, normal, origin in zip(range(len(normals)), normals, origins):
# a facet plane is on the convex hull if every vertex
# of the convex hull is behind that plane
# which we are checking with dot products
dot = np.dot(normal, (convex - origin).T)
on_hull[i] = (dot < tol.merge).all()
return on_hull
@log_time
def fix_normals(self, multibody=None):
"""
Find and fix problems with self.face_normals and self.faces
winding direction.
For face normals ensure that vectors are consistently pointed
outwards, and that self.faces is wound in the correct direction
for all connected components.
Parameters
-------------
multibody : None or bool
Fix normals across multiple bodies
if None automatically pick from body_count
"""
if multibody is None:
multibody = self.body_count > 1
repair.fix_normals(self, multibody=multibody)
def fill_holes(self):
"""
Fill single triangle and single quad holes in the current mesh.
Returns
----------
watertight : bool
Is the mesh watertight after the function completes
"""
return repair.fill_holes(self)
def register(self, other, **kwargs):
"""
Align a mesh with another mesh or a PointCloud using
the principal axes of inertia as a starting point which
is refined by iterative closest point.
Parameters
------------
mesh : trimesh.Trimesh object
Mesh to align with other
other : trimesh.Trimesh or (n, 3) float
Mesh or points in space
samples : int
Number of samples from mesh surface to align
icp_first : int
How many ICP iterations for the 9 possible
combinations of
icp_final : int
How many ICP itertations for the closest
candidate from the wider search
Returns
-----------
mesh_to_other : (4, 4) float
Transform to align mesh to the other object
cost : float
Average square distance per point
"""
mesh_to_other, cost = registration.mesh_other(
mesh=self,
other=other,
**kwargs)
return mesh_to_other, cost
def compute_stable_poses(self,
center_mass=None,
sigma=0.0,
n_samples=1,
threshold=0.0):
"""
Computes stable orientations of a mesh and their quasi-static probabilities.
This method samples the location of the center of mass from a multivariate
gaussian (mean at com, cov equal to identity times sigma) over n_samples.
For each sample, it computes the stable resting poses of the mesh on a
a planar workspace and evaluates the probabilities of landing in
each pose if the object is dropped onto the table randomly.
This method returns the 4x4 homogeneous transform matrices that place
the shape against the planar surface with the z-axis pointing upwards
and a list of the probabilities for each pose.
The transforms and probabilties that are returned are sorted, with the
most probable pose first.
Parameters
------------
center_mass : (3, ) float
The object center of mass (if None, this method
assumes uniform density and watertightness and
computes a center of mass explicitly)
sigma : float
The covariance for the multivariate gaussian used
to sample center of mass locations
n_samples : int
The number of samples of the center of mass location
threshold : float
The probability value at which to threshold
returned stable poses
Returns
-------
transforms : (n, 4, 4) float
The homogeneous matrices that transform the
object to rest in a stable pose, with the
new z-axis pointing upwards from the table
and the object just touching the table.
probs : (n, ) float
A probability ranging from 0.0 to 1.0 for each pose
"""
return poses.compute_stable_poses(mesh=self,
center_mass=center_mass,
sigma=sigma,
n_samples=n_samples,
threshold=threshold)
def subdivide(self, face_index=None):
"""
Subdivide a mesh, with each subdivided face replaced with four
smaller faces.
Parameters
------------
face_index: (m, ) int or None
If None all faces of mesh will be subdivided
If (m, ) int array of indices: only specified faces will be
subdivided. Note that in this case the mesh will generally
no longer be manifold, as the additional vertex on the midpoint
will not be used by the adjacent faces to the faces specified,
and an additional postprocessing step will be required to
make resulting mesh watertight
"""
# subdivide vertex attributes
vertex_attributes = {}
visual = None
if (hasattr(self.visual, 'uv') and
np.shape(self.visual.uv) == (len(self.vertices), 2)):
# uv coords divided along with vertices
vertices, faces, attr = remesh.subdivide(
vertices=np.hstack((self.vertices, self.visual.uv)),
faces=self.faces,
face_index=face_index,
vertex_attributes=vertex_attributes)
# get a copy of the current visuals
visual = self.visual.copy()
# separate uv coords and vertices
vertices, visual.uv = vertices[:, :3], vertices[:, 3:]
else:
# perform the subdivision with vertex attributes
vertices, faces, attr = remesh.subdivide(
vertices=self.vertices,
faces=self.faces,
face_index=face_index,
vertex_attributes=vertex_attributes)
# create a new mesh
result = Trimesh(
vertices=vertices,
faces=faces,
visual=visual,
vertex_attributes=attr,
process=False)
return result
def subdivide_to_size(self, max_edge, max_iter=10, return_index=False):
"""
Subdivide a mesh until every edge is shorter than a
specified length.
Will return a triangle soup, not a nicely structured mesh.
Parameters
------------
max_edge : float
Maximum length of any edge in the result
max_iter : int
The maximum number of times to run subdivision
return_index : bool
If True, return index of original face for new faces
"""
# subdivide vertex attributes
visual = None
if (hasattr(self.visual, 'uv') and
np.shape(self.visual.uv) == (len(self.vertices), 2)):
# uv coords divided along with vertices
vertices_faces = remesh.subdivide_to_size(
vertices=np.hstack((self.vertices, self.visual.uv)),
faces=self.faces,
max_edge=max_edge,
max_iter=max_iter,
return_index=return_index)
# unpack result
if return_index:
vertices, faces, final_index = vertices_faces
else:
vertices, faces = vertices_faces
# get a copy of the current visuals
visual = self.visual.copy()
# separate uv coords and vertices
vertices, visual.uv = vertices[:, :3], vertices[:, 3:]
else:
# uv coords divided along with vertices
vertices_faces = remesh.subdivide_to_size(
vertices=self.vertices,
faces=self.faces,
max_edge=max_edge,
max_iter=max_iter,
return_index=return_index)
# unpack result
if return_index:
vertices, faces, final_index = vertices_faces
else:
vertices, faces = vertices_faces
# create a new mesh
result = Trimesh(
vertices=vertices,
faces=faces,
visual=visual,
process=False)
if return_index:
return result, final_index
return result
@log_time
def smoothed(self, **kwargs):
"""
Return a version of the current mesh which will render
nicely, without changing source mesh.
Parameters
-------------
angle : float or None
Angle in radians face pairs with angles
smaller than this will appear smoothed
facet_minarea : float or None
Minimum area fraction to consider
IE for `facets_minarea=25` only facets larger
than `mesh.area / 25` will be considered.
Returns
---------
smoothed : trimesh.Trimesh
Non watertight version of current mesh
which will render nicely with smooth shading
"""
# smooth should be recomputed if visuals change
self.visual._verify_hash()
cached = self.visual._cache['smoothed']
if cached is not None:
return cached
# run smoothing
smoothed = graph.smoothed(
self, **kwargs)
self.visual._cache['smoothed'] = smoothed
return smoothed
@property
def visual(self):
"""
Get the stored visuals for the current mesh.
Returns
-------------
visual : ColorVisuals or TextureVisuals
Contains visual information about the mesh
"""
if hasattr(self, '_visual'):
return self._visual
return None
@visual.setter
def visual(self, value):
"""
When setting a visual object, always make sure
that `visual.mesh` points back to the source mesh.
Parameters
--------------
visual : ColorVisuals or TextureVisuals
Contains visual information about the mesh
"""
value.mesh = self
self._visual = value
def section(self,
plane_normal,
plane_origin,
**kwargs):
"""
Returns a 3D cross section of the current mesh and a plane
defined by origin and normal.
Parameters
------------
plane_normal: (3) vector for plane normal
Normal vector of section plane
plane_origin : (3, ) float
Point on the cross section plane
Returns
---------
intersections: Path3D or None
Curve of intersection
"""
# turn line segments into Path2D/Path3D objects
from .exchange.load import load_path
# return a single cross section in 3D
lines, face_index = intersections.mesh_plane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
return_faces=True,
**kwargs)
# if the section didn't hit the mesh return None
if len(lines) == 0:
return None
# otherwise load the line segments into a Path3D object
path = load_path(lines)
# add the face index info into metadata
path.metadata['face_index'] = face_index
return path
def section_multiplane(self,
plane_origin,
plane_normal,
heights):
"""
Return multiple parallel cross sections of the current
mesh in 2D.
Parameters
------------
plane_origin : (3, ) float
Point on the cross section plane
plane_normal: (3) vector for plane normal
Normal vector of section plane
heights : (n, ) float
Each section is offset by height along
the plane normal.
Returns
---------
paths : (n, ) Path2D or None
2D cross sections at specified heights.
path.metadata['to_3D'] contains transform
to return 2D section back into 3D space.
"""
# turn line segments into Path2D/Path3D objects
from .exchange.load import load_path
# do a multiplane intersection
lines, transforms, faces = intersections.mesh_multiplane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
heights=heights)
# turn the line segments into Path2D objects
paths = [None] * len(lines)
for i, faces, segments, T in zip(range(len(lines)),
faces,
lines,
transforms):
if len(segments) > 0:
paths[i] = load_path(
segments,
metadata={'to_3D': T, 'face_index': faces})
return paths
def slice_plane(self,
plane_origin,
plane_normal,
cap=False,
face_index=None,
cached_dots=None,
**kwargs):
"""
Slice the mesh with a plane, returning a new mesh that is the
portion of the original mesh to the positive normal side of the plane
plane_origin : (3,) float
Point on plane to intersect with mesh
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
cap : bool
If True, cap the result with a triangulated polygon
face_index : ((m,) int)
Indexes of mesh.faces to slice. When no mask is
provided, the default is to slice all faces.
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing
Returns
---------
new_mesh: trimesh.Trimesh or None
Subset of current mesh that intersects the half plane
to the positive normal side of the plane
"""
# return a new mesh
new_mesh = intersections.slice_mesh_plane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
cap=cap,
face_index=face_index,
cached_dots=cached_dots,
**kwargs)
return new_mesh
def unwrap(self, image=None):
"""
Returns a Trimesh object equivalent to the current mesh where
the vertices have been assigned uv texture coordinates. Vertices
may be split into as many as necessary by the unwrapping
algorithm, depending on how many uv maps they appear in.
Requires `pip install xatlas`
Parameters
------------
image : None or PIL.Image
Image to assign to the material
Returns
--------
unwrapped : trimesh.Trimesh
Mesh with unwrapped uv coordinates
"""
import xatlas
vmap, faces, uv = xatlas.parametrize(
self.vertices, self.faces)
result = Trimesh(vertices=self.vertices[vmap],
faces=faces,
visual=TextureVisuals(uv=uv, image=image),
process=False)
# run additional checks for unwrapping
if tol.strict:
# check the export object to make sure we didn't
# move the indices around on creation
assert np.allclose(result.visual.uv, uv)
assert np.allclose(result.faces, faces)
assert np.allclose(result.vertices, self.vertices[vmap])
# check to make sure indices are still the
# same order after we've exported to OBJ
export = result.export(file_type='obj')
uv_recon = np.array([L[3:].split() for L in
str.splitlines(export) if
L.startswith('vt ')],
dtype=np.float64)
assert np.allclose(uv_recon, uv)
v_recon = np.array([L[2:].split() for L in
str.splitlines(export) if
L.startswith('v ')],
dtype=np.float64)
assert np.allclose(v_recon, self.vertices[vmap])
return result
@caching.cache_decorator
def convex_hull(self):
"""
Returns a Trimesh object representing the convex hull of
the current mesh.
Returns
--------
convex : trimesh.Trimesh
Mesh of convex hull of current mesh
"""
hull = convex.convex_hull(self)
return hull
def sample(self, count, return_index=False, face_weight=None):
"""
Return random samples distributed across the
surface of the mesh
Parameters
------------
count : int
Number of points to sample
return_index : bool
If True will also return the index of which face each
sample was taken from.
face_weight : None or len(mesh.faces) float
Weight faces by a factor other than face area.
If None will be the same as face_weight=mesh.area
Returns
---------
samples : (count, 3) float
Points on surface of mesh
face_index : (count, ) int
Index of self.faces
"""
samples, index = sample.sample_surface(
mesh=self, count=count, face_weight=face_weight)
if return_index:
return samples, index
return samples
def remove_unreferenced_vertices(self):
"""
Remove all vertices in the current mesh which are not
referenced by a face.
"""
referenced = np.zeros(len(self.vertices), dtype=bool)
referenced[self.faces] = True
inverse = np.zeros(len(self.vertices), dtype=np.int64)
inverse[referenced] = np.arange(referenced.sum())
self.update_vertices(mask=referenced, inverse=inverse)
def unmerge_vertices(self):
"""
Removes all face references so that every face contains
three unique vertex indices and no faces are adjacent.
"""
# new faces are incrementing so every vertex is unique
faces = np.arange(len(self.faces) * 3,
dtype=np.int64).reshape((-1, 3))
# use update_vertices to apply mask to
# all properties that are per-vertex
self.update_vertices(self.faces.reshape(-1))
# set faces to incrementing indexes
self.faces = faces
# keep face normals as the haven't changed
self._cache.clear(exclude=['face_normals'])
def apply_transform(self, matrix):
"""
Transform mesh by a homogeneous transformation matrix.
Does the bookkeeping to avoid recomputing things so this function
should be used rather than directly modifying self.vertices
if possible.
Parameters
------------
matrix : (4, 4) float
Homogeneous transformation matrix
"""
# get c-order float64 matrix
matrix = np.asanyarray(
matrix, order='C', dtype=np.float64)
# only support homogeneous transformations
if matrix.shape != (4, 4):
raise ValueError('Transformation matrix must be (4, 4)!')
# exit early if we've been passed an identity matrix
# np.allclose is surprisingly slow so do this test
elif util.allclose(matrix, np.eye(4), 1e-8):
log.debug('apply_transform passed identity matrix')
return self
# new vertex positions
new_vertices = transformations.transform_points(
self.vertices,
matrix=matrix)
# check to see if the matrix has rotation
# rather than just translation
has_rotation = not util.allclose(
matrix[:3, :3], np.eye(3), atol=1e-6)
# overridden center of mass
if self._center_mass is not None:
self._center_mass = transformations.transform_points(
np.array([self._center_mass, ]),
matrix)[0]
# preserve face normals if we have them stored
if has_rotation and 'face_normals' in self._cache:
# transform face normals by rotation component
self._cache.cache['face_normals'] = util.unitize(
transformations.transform_points(
self.face_normals,
matrix=matrix,
translate=False))
# preserve vertex normals if we have them stored
if has_rotation and 'vertex_normals' in self._cache:
self._cache.cache['vertex_normals'] = util.unitize(
transformations.transform_points(
self.vertex_normals,
matrix=matrix,
translate=False))
# if transformation flips winding of triangles
if has_rotation and transformations.flips_winding(matrix):
log.debug('transform flips winding')
# fliplr will make array non C contiguous
# which will cause hashes to be more
# expensive than necessary so wrap
self.faces = np.ascontiguousarray(
np.fliplr(self.faces))
# assign the new values
self.vertices = new_vertices
# preserve normals and topology in cache
# while dumping everything else
self._cache.clear(exclude={
'face_normals', # transformed by us
'vertex_normals', # also transformed by us
'face_adjacency', # topological
'face_adjacency_edges',
'face_adjacency_unshared',
'edges',
'edges_face',
'edges_sorted',
'edges_unique',
'edges_unique_idx',
'edges_unique_inverse',
'edges_sparse',
'body_count',
'faces_unique_edges',
'euler_number'})
# set the cache ID with the current hash value
self._cache.id_set()
log.debug('mesh transformed by matrix')
return self
def voxelized(self, pitch, method='subdivide', **kwargs):
"""
Return a VoxelGrid object representing the current mesh
discretized into voxels at the specified pitch
Parameters
------------
pitch : float
The edge length of a single voxel
method: implementation key. See `trimesh.voxel.creation.voxelizers`
**kwargs: additional kwargs passed to the specified implementation.
Returns
----------
voxelized : VoxelGrid object
Representing the current mesh
"""
from .voxel import creation
return creation.voxelize(
mesh=self, pitch=pitch, method=method, **kwargs)
@caching.cache_decorator
def as_open3d(self):
"""
Return an `open3d.geometry.TriangleMesh` version of
the current mesh.
Returns
---------
open3d : open3d.geometry.TriangleMesh
Current mesh as an open3d object.
"""
import open3d
# create from numpy arrays
return open3d.geometry.TriangleMesh(
vertices=open3d.utility.Vector3dVector(self.vertices),
triangles=open3d.utility.Vector3iVector(self.faces))
def simplify_quadratic_decimation(self, face_count):
"""
A thin wrapper around the open3d implementation of this:
`open3d.geometry.TriangleMesh.simplify_quadric_decimation`
Parameters
-----------
face_count : int
Number of faces desired in the resulting mesh.
Returns
---------
simple : trimesh.Trimesh
Simplified version of mesh.
"""
simple = self.as_open3d.simplify_quadric_decimation(
int(face_count))
return Trimesh(vertices=simple.vertices, faces=simple.triangles)
def outline(self, face_ids=None, **kwargs):
"""
Given a list of face indexes find the outline of those
faces and return it as a Path3D.
The outline is defined here as every edge which is only
included by a single triangle.
Note that this implies a non-watertight mesh as the
outline of a watertight mesh is an empty path.
Parameters
------------
face_ids : (n, ) int
Indices to compute the outline of.
If None, outline of full mesh will be computed.
**kwargs: passed to Path3D constructor
Returns
----------
path : Path3D
Curve in 3D of the outline
"""
from .path import Path3D
from .path.exchange.misc import faces_to_path
return Path3D(**faces_to_path(
self, face_ids, **kwargs))
def projected(self,
normal,
**kwargs):
"""
Project a mesh onto a plane and then extract the
polygon that outlines the mesh projection on that
plane.
Parameters
----------
mesh : trimesh.Trimesh
Source geometry
check : bool
If True make sure is flat
normal : (3,) float
Normal to extract flat pattern along
origin : None or (3,) float
Origin of plane to project mesh onto
pad : float
Proportion to pad polygons by before unioning
and then de-padding result by to avoid zero-width gaps.
tol_dot : float
Tolerance for discarding on-edge triangles.
max_regions : int
Raise an exception if the mesh has more than this
number of disconnected regions to fail quickly before unioning.
Returns
----------
projected : trimesh.path.Path2D
Outline of source mesh
"""
from .path import Path2D
from .exchange.load import load_path
from .path.polygons import projected
projection = projected(
mesh=self, normal=normal, **kwargs)
if projection is None:
return Path2D()
return load_path(projection)
@caching.cache_decorator
def area(self):
"""
Summed area of all triangles in the current mesh.
Returns
---------
area : float
Surface area of mesh
"""
area = self.area_faces.sum()
return area
@caching.cache_decorator
def area_faces(self):
"""
The area of each face in the mesh.
Returns
---------
area_faces : (n, ) float
Area of each face
"""
area_faces = triangles.area(
crosses=self.triangles_cross,
sum=False)
return area_faces
@caching.cache_decorator
def mass_properties(self):
"""
Returns the mass properties of the current mesh.
Assumes uniform density, and result is probably garbage if mesh
isn't watertight.
Returns
----------
properties : dict
With keys:
'volume' : in global units^3
'mass' : From specified density
'density' : Included again for convenience (same as kwarg density)
'inertia' : Taken at the center of mass and aligned with global
coordinate system
'center_mass' : Center of mass location, in global coordinate system
"""
mass = triangles.mass_properties(
triangles=self.triangles,
crosses=self.triangles_cross,
density=self._density,
center_mass=self._center_mass,
skip_inertia=False)
return mass
def invert(self):
"""
Invert the mesh in-place by reversing the winding of every
face and negating normals without dumping the cache.
Alters `self.faces` by reversing columns, and negating
`self.face_normals` and `self.vertex_normals`.
"""
with self._cache:
if 'face_normals' in self._cache:
self.face_normals = self._cache['face_normals'] * -1.0
if 'vertex_normals' in self._cache:
self.vertex_normals = self._cache['vertex_normals'] * -1.0
# fliplr makes array non-contiguous so cache checks slow
self.faces = np.ascontiguousarray(
np.fliplr(self.faces))
# save our normals
self._cache.clear(exclude=['face_normals',
'vertex_normals'])
def scene(self, **kwargs):
"""
Returns a Scene object containing the current mesh.
Returns
---------
scene : trimesh.scene.scene.Scene
Contains just the current mesh
"""
return Scene(self, **kwargs)
def show(self, **kwargs):
"""
Render the mesh in an opengl window. Requires pyglet.
Parameters
------------
smooth : bool
Run smooth shading on mesh or not,
large meshes will be slow
Returns
-----------
scene : trimesh.scene.Scene
Scene with current mesh in it
"""
scene = self.scene()
return scene.show(**kwargs)
def submesh(self, faces_sequence, **kwargs):
"""
Return a subset of the mesh.
Parameters
------------
faces_sequence : sequence (m, ) int
Face indices of mesh
only_watertight : bool
Only return submeshes which are watertight
append : bool
Return a single mesh which has the faces appended.
if this flag is set, only_watertight is ignored
Returns
---------
submesh : Trimesh or (n,) Trimesh
Single mesh if `append` or list of submeshes
"""
return util.submesh(
mesh=self,
faces_sequence=faces_sequence,
**kwargs)
@caching.cache_decorator
def identifier(self):
"""
Return a float vector which is unique to the mesh
and is robust to rotation and translation.
Returns
-----------
identifier : (7,) float
Identifying properties of the current mesh
"""
return comparison.identifier_simple(self)
@caching.cache_decorator
def identifier_hash(self):
"""
A hash of the rotation invariant identifier vector.
Returns
---------
hashed : str
Hex string of the SHA256 hash from
the identifier vector at hand-tuned sigfigs.
"""
return comparison.identifier_hash(self.identifier)
@property
def identifier_md5(self):
warnings.warn(
'`geom.identifier_md5` is deprecated and will ' +
'be removed in October 2023: replace ' +
'with `geom.identifier_hash`',
DeprecationWarning)
return self.identifier_hash
def export(self, file_obj=None, file_type=None, **kwargs):
"""
Export the current mesh to a file object.
If file_obj is a filename, file will be written there.
Supported formats are stl, off, ply, collada, json,
dict, glb, dict64, msgpack.
Parameters
------------
file_obj : open writeable file object
str, file name where to save the mesh
None, return the export blob
file_type : str
Which file type to export as, if `file_name`
is passed this is not required.
"""
return export_mesh(
mesh=self,
file_obj=file_obj,
file_type=file_type,
**kwargs)
def to_dict(self):
"""
Return a dictionary representation of the current mesh
with keys that can be used as the kwargs for the
Trimesh constructor and matches the schema in:
`trimesh/resources/schema/primitive/trimesh.schema.json`
Returns
----------
result : dict
Matches schema and Trimesh constructor.
"""
return {'vertices': self.vertices.tolist(),
'faces': self.faces.tolist()}
def convex_decomposition(self, maxhulls=20, **kwargs):
"""
Compute an approximate convex decomposition of a mesh.
testVHACD Parameters which can be passed as kwargs:
Name Default
-----------------------------------------------------
resolution 100000
max. concavity 0.001
plane down-sampling 4
convex-hull down-sampling 4
alpha 0.05
beta 0.05
maxhulls 10
pca 0
mode 0
max. vertices per convex-hull 64
min. volume to add vertices to convex-hulls 0.0001
convex-hull approximation 1
OpenCL acceleration 1
OpenCL platform ID 0
OpenCL device ID 0
output output.wrl
log log.txt
Parameters
------------
maxhulls : int
Maximum number of convex hulls to return
**kwargs : testVHACD keyword arguments
Returns
-------
meshes : list of trimesh.Trimesh
List of convex meshes that approximate the original
"""
result = decomposition.convex_decomposition(self,
maxhulls=maxhulls,
**kwargs)
return result
def union(self, other, engine=None, **kwargs):
"""
Boolean union between this mesh and n other meshes
Parameters
------------
other : Trimesh or (n, ) Trimesh
Other meshes to union
engine : None or str
Which backend to use
Returns
---------
union : trimesh.Trimesh
Union of self and other Trimesh objects
"""
result = boolean.union(
meshes=np.append(self, other),
engine=engine,
**kwargs)
return result
def difference(self, other, engine=None, **kwargs):
"""
Boolean difference between this mesh and n other meshes
Parameters
------------
other : trimesh.Trimesh, or list of trimesh.Trimesh objects
Meshes to difference
Returns
---------
difference : trimesh.Trimesh
Difference between self and other Trimesh objects
"""
result = boolean.difference(meshes=np.append(self, other),
engine=engine, **kwargs)
return result
def intersection(self, other, engine=None, **kwargs):
"""
Boolean intersection between this mesh and n other meshes
Parameters
------------
other : trimesh.Trimesh, or list of trimesh.Trimesh objects
Meshes to calculate intersections with
Returns
---------
intersection : trimesh.Trimesh
Mesh of the volume contained by all passed meshes
"""
result = boolean.intersection(meshes=np.append(self, other),
engine=engine, **kwargs)
return result
def contains(self, points):
"""
Given an array of points determine whether or not they
are inside the mesh. This raises an error if called on a
non-watertight mesh.
Parameters
------------
points : (n, 3) float
Points in cartesian space
Returns
---------
contains : (n, ) bool
Whether or not each point is inside the mesh
"""
return self.ray.contains_points(points)
@caching.cache_decorator
def face_angles(self):
"""
Returns the angle at each vertex of a face.
Returns
--------
angles : (len(self.faces), 3) float
Angle at each vertex of a face
"""
angles = triangles.angles(self.triangles)
return angles
@caching.cache_decorator
def face_angles_sparse(self):
"""
A sparse matrix representation of the face angles.
Returns
----------
sparse : scipy.sparse.coo_matrix
Float sparse matrix with with shape:
(len(self.vertices), len(self.faces))
"""
angles = curvature.face_angles_sparse(self)
return angles
@caching.cache_decorator
def vertex_defects(self):
"""
Return the vertex defects, or (2*pi) minus the sum of the angles
of every face that includes that vertex.
If a vertex is only included by coplanar triangles, this
will be zero. For convex regions this is positive, and
concave negative.
Returns
--------
vertex_defect : (len(self.vertices), ) float
Vertex defect at the every vertex
"""
defects = curvature.vertex_defects(self)
return defects
@caching.cache_decorator
def vertex_degree(self):
"""
Return the number of faces each vertex is included in.
Returns
----------
degree : (len(self.vertices), ) int
Number of faces each vertex is included in
"""
# get degree through sparse matrix
degree = np.array(self.faces_sparse.sum(axis=1)).flatten()
return degree
@caching.cache_decorator
def face_adjacency_tree(self):
"""
An R-tree of face adjacencies.
Returns
--------
tree: rtree.index
Where each edge in self.face_adjacency has a
rectangular cell
"""
# the (n,6) interleaved bounding box for every line segment
segment_bounds = np.column_stack((
self.vertices[self.face_adjacency_edges].min(axis=1),
self.vertices[self.face_adjacency_edges].max(axis=1)))
tree = util.bounds_tree(segment_bounds)
return tree
def copy(self, include_cache=False):
"""
Safely return a copy of the current mesh.
By default, copied meshes will have emptied cache
to avoid memory issues and so may be slow on initial
operations until caches are regenerated.
Current object will *never* have its cache cleared.
Parameters
------------
include_cache : bool
If True, will shallow copy cached data to new mesh
Returns
---------
copied : trimesh.Trimesh
Copy of current mesh
"""
# start with an empty mesh
copied = Trimesh()
# always deepcopy vertex and face data
copied._data.data = copy.deepcopy(self._data.data)
# copy visual information
copied.visual = self.visual.copy()
# get metadata
copied.metadata = copy.deepcopy(self.metadata)
# get center_mass and density
if self._center_mass is not None:
copied.center_mass = self.center_mass
copied._density = self._density
# make sure cache ID is set initially
copied._cache.verify()
if include_cache:
# shallow copy cached items into the new cache
# since the data didn't change here when the
# data in the new mesh is changed these items
# will be dumped in the new mesh but preserved
# in the original mesh
copied._cache.cache.update(self._cache.cache)
return copied
def __deepcopy__(self, *args):
# interpret deep copy as "get rid of cached data"
return self.copy(include_cache=False)
def __copy__(self, *args):
# interpret shallow copy as "keep cached data"
return self.copy(include_cache=True)
def eval_cached(self, statement, *args):
"""
Evaluate a statement and cache the result before returning.
Statements are evaluated inside the Trimesh object, and
Parameters
------------
statement : str
Statement of valid python code
*args : list
Available inside statement as args[0], etc
Returns
-----------
result : result of running eval on statement with args
Examples
-----------
r = mesh.eval_cached('np.dot(self.vertices, args[0])', [0, 0, 1])
"""
statement = str(statement)
key = 'eval_cached_' + statement
key += '_'.join(str(i) for i in args)
if key in self._cache:
return self._cache[key]
result = eval(statement)
self._cache[key] = result
return result
def __add__(self, other):
"""
Concatenate the mesh with another mesh.
Parameters
------------
other : trimesh.Trimesh object
Mesh to be concatenated with self
Returns
----------
concat : trimesh.Trimesh
Mesh object of combined result
"""
concat = util.concatenate(self, other)
return concat
| mit | 95ebaec02c4cd858558096ccb690509c | 31.039803 | 86 | 0.559163 | 4.682065 | false | false | false | false |
smarkets/smk_python_sdk | smarkets/errors.py | 1 | 1918 | from __future__ import absolute_import, division, print_function, unicode_literals
import sys as _sys
from collections import namedtuple as _namedtuple
from contextlib import contextmanager as _contextmanager
import decorator as _decorator
import six as _six
def reraise(exception):
prev_cls, prev, tb = _sys.exc_info()
_six.reraise(type(exception), exception, tb)
@_contextmanager
def _swallow_manager(exceptions):
try:
yield
except BaseException as e:
if not isinstance(e, exceptions):
raise
def swallow(exceptions, default=None):
'''
Swallow exception(s) when executing something. Works as function decorator and
as a context manager:
>>> @swallow(NameError, default=2)
... def fun():
... a = b # noqa
... return 1
...
>>> fun()
2
>>> with swallow(KeyError):
... raise KeyError('key')
...
:type exceptions: iterable of Exception or Exception
:param default: value to return in case of an exception
'''
if isinstance(exceptions, type):
exceptions = (exceptions,)
else:
exceptions = tuple(exceptions)
return _SwallowHandler(exceptions, default)
class _SwallowHandler(_namedtuple('_SwallowHandlerBase', 'exceptions default')):
def __call__(self, something):
@_decorator.decorator
def _swallow_decorator(f, *args, **kwargs):
try:
value = f(*args, **kwargs)
except BaseException as e:
if isinstance(e, self.exceptions):
value = self.default
else:
raise
return value
return _swallow_decorator(something)
def __enter__(self):
pass
def __exit__(self, type_, value, tb):
return isinstance(value, self.exceptions)
class Error(Exception):
"Base class for every Smarkets error"
| mit | c0e3529a44854b64ebf22813518dddf9 | 23.589744 | 82 | 0.613139 | 4.419355 | false | false | false | false |
kapsiry/sikteeri | membership/management/commands/procountor.py | 2 | 1992 | # encoding: utf-8
import argparse
from datetime import datetime, timedelta
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from procountor.procountor_api import ProcountorAPIClient
from membership.billing.payments import process_payments
from procountor.models import APIToken
def valid_date(s):
try:
return datetime.strptime(s, "%Y-%m-%d")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
class Command(BaseCommand):
help = 'Import payments from Procountor'
def add_arguments(self, parser):
parser.add_argument('-s', "--startdate", help="Start Date (YYYY-MM-DD)",
default=None, type=valid_date)
def handle(self, *args, **options):
start = options['startdate'] or datetime.now() - timedelta(days=1)
api_key = APIToken.current()
if not api_key:
raise CommandError("No Procountor API key in database, please run Procountor login flow first.")
api = ProcountorAPIClient(api=settings.PROCOUNTOR_API_URL,
company_id=settings.PROCOUNTOR_COMPANY_ID,
redirect_uri=settings.PROCOUNTOR_REDIRECT_URL,
client_id=settings.PROCOUNTOR_CLIENT_ID,
client_secret=settings.PROCOUNTOR_CLIENT_SECRET,
api_key=api_key)
api.refresh_access_token()
statements = api.get_referencepayments(start=start, end=datetime.now())
for message in process_payments(statements):
print(message)
bankstatements = api.get_bankstatements(start=start, end=datetime.now())
for bankstatement in bankstatements:
for message in process_payments(
[x for x in bankstatement.events if x["explanationCode"] in [700, 710]]):
print(message)
| mit | 6e5d0cec58517ae3c21656e29c6030d6 | 37.307692 | 108 | 0.626506 | 4.220339 | false | false | false | false |
kapsiry/sikteeri | membership/decorators.py | 2 | 1735 | # encoding: utf-8
"""
decorators.py
"""
from django.contrib.auth import authenticate
from django.http import HttpResponse, HttpResponseForbidden
from django.conf import settings
from membership.utils import get_client_ip
from sikteeri.iptools import IpRangeList
import base64
def trusted_host_required(view_func):
""" decorator which checks remote address """
def decorator(request, *args, **kwargs):
if not hasattr(settings, 'TRUSTED_HOSTS') or not settings.TRUSTED_HOSTS:
settings.TRUSTED_HOSTS = []
ip = get_client_ip(request)
allowed = IpRangeList(*settings.TRUSTED_HOSTS)
if ip in allowed:
return view_func(request, *args, **kwargs)
response = HttpResponseForbidden("Access denied")
return response
return decorator
def basic_auth_required(view_func):
# http://djangosnippets.org/snippets/448/
""" decorator which performs basic http token authentication """
def _auth(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == "basic":
uname, passwd = base64.b64decode(auth[1]).split(':')
user = authenticate(username=uname, password=passwd)
if user is not None:
if user.is_active:
return view_func(request, *args, **kwargs)
response = HttpResponse("Authorization Required", status=401)
response['WWW-Authenticate'] = 'Basic realm="Secure Area"'
return response
return _auth
def main():
pass
if __name__ == '__main__':
main()
| mit | 1b0ecbca0778c8a3839b6d6fe1acfe0a | 31.12963 | 80 | 0.622478 | 4.170673 | false | false | false | false |
mikedh/trimesh | tests/test_graph.py | 1 | 10535 | try:
from . import generic as g
except BaseException:
import generic as g
class GraphTest(g.unittest.TestCase):
def setUp(self):
self.engines = ['scipy', 'networkx']
def test_soup(self):
# a soup of random triangles, with no adjacent pairs
soup = g.get_mesh('soup.stl')
assert len(soup.face_adjacency) == 0
assert len(soup.face_adjacency_radius) == 0
assert len(soup.face_adjacency_edges) == 0
assert len(soup.face_adjacency_convex) == 0
assert len(soup.face_adjacency_unshared) == 0
assert len(soup.face_adjacency_angles) == 0
assert len(soup.facets) == 0
def test_components(self):
# a soup of random triangles, with no adjacent pairs
soup = g.get_mesh('soup.stl')
# a mesh with multiple watertight bodies
mult = g.get_mesh('cycloidal.ply')
# a mesh with a single watertight body
sing = g.get_mesh('featuretype.STL')
# mesh with a single tetrahedron
tet = g.get_mesh('tet.ply')
for engine in self.engines:
# without requiring watertight the split should be into every face
split = soup.split(only_watertight=False, engine=engine)
assert len(split) == len(soup.faces)
# with watertight there should be an empty list
split = soup.split(only_watertight=True, engine=engine)
assert len(split) == 0
split = mult.split(only_watertight=False, engine=engine)
assert len(split) >= 119
split = mult.split(only_watertight=True, engine=engine)
assert len(split) >= 117
# random triangles should have no facets
facets = g.trimesh.graph.facets(mesh=soup, engine=engine)
assert len(facets) == 0
facets = g.trimesh.graph.facets(mesh=mult, engine=engine)
assert all(len(i) >= 2 for i in facets)
assert len(facets) >= 8654
split = sing.split(only_watertight=False, engine=engine)
assert len(split) == 1
assert split[0].is_watertight
assert split[0].is_winding_consistent
split = sing.split(only_watertight=True, engine=engine)
assert len(split) == 1
assert split[0].is_watertight
assert split[0].is_winding_consistent
# single tetrahedron
assert tet.is_volume
assert tet.body_count == 1
# regardless of method or flag we should have one body result
split = tet.split(only_watertight=True, engine=engine)
assert len(split) == 1
split = tet.split(only_watertight=False, engine=engine)
assert len(split) == 1
def test_vertex_adjacency_graph(self):
f = g.trimesh.graph.vertex_adjacency_graph
# a mesh with a single watertight body
sing = g.get_mesh('featuretype.STL')
vert_adj_g = f(sing)
assert len(sing.vertices) == len(vert_adj_g)
def test_engine_time(self):
for mesh in g.get_meshes():
tic = [g.time.time()]
for engine in self.engines:
mesh.split(engine=engine, only_watertight=False)
g.trimesh.graph.facets(mesh=mesh, engine=engine)
tic.append(g.time.time())
tic_diff = g.np.diff(tic)
tic_min = tic_diff.min()
tic_diff /= tic_min
g.log.info('graph engine on %s (scale %f sec):\n%s',
mesh.metadata['file_name'],
tic_min,
str(g.np.column_stack((self.engines,
tic_diff))))
def test_smoothed(self):
# Make sure smoothing is keeping the same number
# of faces.
for name in ['ADIS16480.STL', 'featuretype.STL']:
mesh = g.get_mesh(name)
assert len(mesh.faces) == len(mesh.smoothed().faces)
def test_engines(self):
edges = g.np.arange(10).reshape((-1, 2))
for i in range(0, 20):
check_engines(nodes=g.np.arange(i),
edges=edges)
edges = g.np.column_stack((g.np.arange(1, 11),
g.np.arange(0, 10)))
for i in range(0, 20):
check_engines(nodes=g.np.arange(i),
edges=edges)
def test_watertight(self):
m = g.get_mesh('shared.STL') # NOQA
# assert m.is_watertight
# assert m.is_winding_consistent
# assert m.is_volume
def test_traversals(self):
# Test traversals (BFS+DFS)
# generate some simple test data
simple_nodes = g.np.arange(20)
simple_edges = g.np.column_stack((simple_nodes[:-1],
simple_nodes[1:]))
simple_edges = g.np.vstack((
simple_edges,
[[19, 0],
[10, 1000],
[500, 501]])).astype(g.np.int64)
all_edges = g.data['edges']
all_edges.append(simple_edges)
for edges in all_edges:
edges = g.np.array(edges, dtype=g.np.int64)
assert g.trimesh.util.is_shape(edges, (-1, 2))
# collect the new nodes
nodes = g.np.unique(edges)
# the basic BFS/DFS traversal
dfs_basic = g.trimesh.graph.traversals(edges, 'dfs')
bfs_basic = g.trimesh.graph.traversals(edges, 'bfs')
# check return types
assert all(i.dtype == g.np.int64 for i in dfs_basic)
assert all(i.dtype == g.np.int64 for i in bfs_basic)
# check to make sure traversals visited every node
dfs_set = set(g.np.hstack(dfs_basic))
bfs_set = set(g.np.hstack(bfs_basic))
nodes_set = set(nodes)
assert dfs_set == nodes_set
assert bfs_set == nodes_set
# check traversal filling
# fill_traversals should always include every edge
# regardless of the path so test on bfs/dfs/empty
for traversal in [dfs_basic, bfs_basic, []]:
# disconnect consecutive nodes that are not edges
# and add edges that were left off by jumps
dfs = g.trimesh.graph.fill_traversals(traversal, edges)
# edges that are included in the new separated traversal
inc = g.trimesh.util.vstack_empty(
[g.np.column_stack((i[:-1], i[1:]))
for i in dfs])
# make a set from edges included in the traversal
inc_set = set(g.trimesh.grouping.hashable_rows(
g.np.sort(inc, axis=1)))
# make a set of the source edges we were supposed to include
edge_set = set(g.trimesh.grouping.hashable_rows(
g.np.sort(edges, axis=1)))
# we should have exactly the same edges
# after the filled traversal as we started with
assert len(inc) == len(edges)
# every edge should occur exactly once
assert len(inc_set) == len(inc)
# unique edges should be the same
assert inc_set == edge_set
# check all return dtypes
assert all(i.dtype == g.np.int64 for i in dfs)
def test_adjacency(self):
for add_degen in [False, True]:
for name in ['featuretype.STL', 'soup.stl']:
m = g.get_mesh(name)
if add_degen:
# make the first face degenerate
m.faces[0][2] = m.faces[0][0]
# degenerate faces should be filtered
assert g.np.not_equal(*m.face_adjacency.T).all()
# check the various paths of calling face adjacency
a = g.trimesh.graph.face_adjacency(
m.faces.view(g.np.ndarray).copy(),
return_edges=False)
b, be = g.trimesh.graph.face_adjacency(
m.faces.view(g.np.ndarray).copy(),
return_edges=True)
c = g.trimesh.graph.face_adjacency(
mesh=m, return_edges=False)
c, ce = g.trimesh.graph.face_adjacency(
mesh=m, return_edges=True)
# make sure they all return the expected result
assert g.np.allclose(a, b)
assert g.np.allclose(a, c)
assert len(be) == len(a)
assert len(ce) == len(a)
# package properties to loop through
zips = zip(m.face_adjacency,
m.face_adjacency_edges,
m.face_adjacency_unshared)
for a, e, v in zips:
# get two adjacenct faces as a set
fa = set(m.faces[a[0]])
fb = set(m.faces[a[1]])
# face should be different
assert fa != fb
# shared edge should be in both faces
# removing 2 vertices should leave one
da = fa.difference(e)
db = fb.difference(e)
assert len(da) == 1
assert len(db) == 1
# unshared vertex should be correct
assert da.issubset(v)
assert db.issubset(v)
assert da != db
assert len(v) == 2
def check_engines(edges, nodes):
"""
Make sure connected component graph engines are
returning the exact same values
"""
results = []
engines = [None, 'scipy', 'networkx']
for engine in engines:
c = g.trimesh.graph.connected_components(
edges, nodes=nodes, engine=engine)
if len(c) > 0:
# check to see if every resulting component
# was in the passed set of nodes
diff = g.np.setdiff1d(g.np.hstack(c), nodes)
assert len(diff) == 0
# store the result as a set of tuples so we can compare
results.append(set([tuple(sorted(i)) for i in c]))
# make sure different engines are returning the same thing
try:
assert all(i == results[0] for i in results[1:])
except BaseException as E:
print(results)
raise E
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
| mit | 471a7b6b6bbbb3793784ab94bb8b1655 | 37.309091 | 78 | 0.529378 | 3.967985 | false | true | false | false |
mikedh/trimesh | examples/outlined.py | 2 | 1159 | """
outlined.py
--------------
Show a mesh with edges highlighted using GL_LINES
"""
import trimesh
import numpy as np
if __name__ == '__main__':
mesh = trimesh.load('../models/featuretype.STL')
# get edges we want to highlight by finding edges
# that have sharp angles between adjacent faces
edges = mesh.face_adjacency_edges[mesh.face_adjacency_angles > np.radians(30)]
# get a Path3D object for the edges we want to highlight
path = trimesh.path.Path3D(**trimesh.path.exchange.misc.edges_to_path(
edges, mesh.vertices.copy()))
# set the mesh face colors to white
mesh.visual.face_colors = [255, 255, 255, 255]
# create a scene with both the mesh and the outline edges
scene = trimesh.Scene([mesh, path])
# set the camera resolution
scene.camera.resolution = (4000, 2000)
# set the camera transform to look at the mesh
scene.camera_transform = scene.camera.look_at(
points=mesh.vertices,
rotation=trimesh.transformations.euler_matrix(np.pi / 3, 0, np.pi / 5))
# write a PNG of the render
with open('outlined.PNG', 'wb') as f:
f.write(scene.save_image())
| mit | 8edbde8e2ebded589491bd1ef79e4b0e | 32.114286 | 82 | 0.664366 | 3.599379 | false | false | false | false |
mikedh/trimesh | trimesh/intersections.py | 1 | 27708 | """
intersections.py
------------------
Primarily mesh-plane intersections (slicing).
"""
import numpy as np
from . import util
from . import geometry
from . import grouping
from . import triangles as tm
from . import transformations as tf
from .constants import tol
def mesh_plane(mesh,
plane_normal,
plane_origin,
return_faces=False,
local_faces=None,
cached_dots=None):
"""
Find a the intersections between a mesh and a plane,
returning a set of line segments on that plane.
Parameters
---------
mesh : Trimesh object
Source mesh to slice
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
plane_origin : (3,) float
Point on plane to intersect with mesh
return_faces : bool
If True return face index each line is from
local_faces : None or (m,) int
Limit section to just these faces.
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing.
Returns
----------
lines : (m, 2, 3) float
List of 3D line segments in space.
face_index : (m,) int
Index of mesh.faces for each line
Only returned if return_faces was True
"""
def triangle_cases(signs):
"""
Figure out which faces correspond to which intersection
case from the signs of the dot product of each vertex.
Does this by bitbang each row of signs into an 8 bit
integer.
code : signs : intersects
0 : [-1 -1 -1] : No
2 : [-1 -1 0] : No
4 : [-1 -1 1] : Yes; 2 on one side, 1 on the other
6 : [-1 0 0] : Yes; one edge fully on plane
8 : [-1 0 1] : Yes; one vertex on plane 2 on different sides
12 : [-1 1 1] : Yes; 2 on one side, 1 on the other
14 : [0 0 0] : No (on plane fully)
16 : [0 0 1] : Yes; one edge fully on plane
20 : [0 1 1] : No
28 : [1 1 1] : No
Parameters
----------
signs: (n,3) int, all values are -1,0, or 1
Each row contains the dot product of all three vertices
in a face with respect to the plane
Returns
---------
basic : (n,) bool
Which faces are in the basic intersection case
one_vertex : (n,) bool
Which faces are in the one vertex case
one_edge : (n,) bool
Which faces are in the one edge case
"""
signs_sorted = np.sort(signs, axis=1)
coded = np.zeros(len(signs_sorted), dtype=np.int8) + 14
for i in range(3):
coded += signs_sorted[:, i] << 3 - i
# one edge fully on the plane
# note that we are only accepting *one* of the on- edge cases,
# where the other vertex has a positive dot product (16) instead
# of both on- edge cases ([6, 16])
# this is so that for regions that are co-planar with the the section plane
# we don't end up with an invalid boundary
key = np.zeros(29, dtype=bool)
key[16] = True
one_edge = key[coded]
# one vertex on plane, other two on different sides
key[:] = False
key[8] = True
one_vertex = key[coded]
# one vertex on one side of the plane, two on the other
key[:] = False
key[[4, 12]] = True
basic = key[coded]
return basic, one_vertex, one_edge
def handle_on_vertex(signs, faces, vertices):
# case where one vertex is on plane
# and two are on different sides
vertex_plane = faces[signs == 0]
edge_thru = faces[signs != 0].reshape((-1, 2))
point_intersect, valid = plane_lines(
plane_origin,
plane_normal,
vertices[edge_thru.T],
line_segments=False)
lines = np.column_stack((
vertices[vertex_plane[valid]],
point_intersect)).reshape((-1, 2, 3))
return lines
def handle_on_edge(signs, faces, vertices):
# case where two vertices are on the plane and one is off
edges = faces[signs == 0].reshape((-1, 2))
points = vertices[edges]
return points
def handle_basic(signs, faces, vertices):
# case where one vertex is on one side and two are on the other
unique_element = grouping.unique_value_in_row(
signs, unique=[-1, 1])
edges = np.column_stack(
(faces[unique_element],
faces[np.roll(unique_element, 1, axis=1)],
faces[unique_element],
faces[np.roll(unique_element, 2, axis=1)])).reshape(
(-1, 2))
intersections, valid = plane_lines(plane_origin,
plane_normal,
vertices[edges.T],
line_segments=False)
# since the data has been pre- culled, any invalid intersections at all
# means the culling was done incorrectly and thus things are broken
assert valid.all()
return intersections.reshape((-1, 2, 3))
# check input plane
plane_normal = np.asanyarray(plane_normal, dtype=np.float64)
plane_origin = np.asanyarray(plane_origin, dtype=np.float64)
if plane_origin.shape != (3,) or plane_normal.shape != (3,):
raise ValueError('Plane origin and normal must be (3,)!')
if local_faces is None:
# do a cross section against all faces
faces = mesh.faces
else:
local_faces = np.asanyarray(
local_faces, dtype=np.int64)
# only take the subset of faces if passed
faces = mesh.faces[local_faces]
if cached_dots is not None:
dots = cached_dots
else:
# dot product of each vertex with the plane normal indexed by face
# so for each face the dot product of each vertex is a row
# shape is the same as mesh.faces (n,3)
dots = np.dot(mesh.vertices - plane_origin, plane_normal)
# sign of the dot product is -1, 0, or 1
# shape is the same as mesh.faces (n,3)
signs = np.zeros(len(mesh.vertices), dtype=np.int8)
signs[dots < -tol.merge] = -1
signs[dots > tol.merge] = 1
signs = signs[faces]
# figure out which triangles are in the cross section,
# and which of the three intersection cases they are in
cases = triangle_cases(signs)
# handlers for each case
handlers = (handle_basic,
handle_on_vertex,
handle_on_edge)
# the (m, 2, 3) line segments
lines = np.vstack([h(signs[c],
faces[c],
mesh.vertices)
for c, h in zip(cases, handlers)])
if return_faces:
# everything that hit something
index = np.hstack([np.nonzero(c)[0] for c in cases])
assert index.dtype.kind == 'i'
if local_faces is None:
return lines, index
# we are considering a subset of faces
# so we need to take the indexes from original
return lines, local_faces[index]
return lines
def mesh_multiplane(
mesh,
plane_origin,
plane_normal,
heights):
"""
A utility function for slicing a mesh by multiple
parallel planes which caches the dot product operation.
Parameters
-------------
mesh : trimesh.Trimesh
Geometry to be sliced by planes
plane_origin : (3,) float
Point on a plane
plane_normal : (3,) float
Normal vector of plane
heights : (m,) float
Offset distances from plane to slice at:
at `height=0` it will be exactly on the passed plane.
Returns
--------------
lines : (m,) sequence of (n, 2, 2) float
Lines in space for m planes
to_3D : (m, 4, 4) float
Transform to move each section back to 3D
face_index : (m,) sequence of (n,) int
Indexes of mesh.faces for each segment
"""
# check input plane
plane_normal = util.unitize(plane_normal)
plane_origin = np.asanyarray(plane_origin,
dtype=np.float64)
heights = np.asanyarray(heights, dtype=np.float64)
# dot product of every vertex with plane
vertex_dots = np.dot(
plane_normal,
(mesh.vertices - plane_origin).T)
# reconstruct transforms for each 2D section
base_transform = geometry.plane_transform(
origin=plane_origin,
normal=plane_normal)
base_transform = np.linalg.inv(base_transform)
# alter translation Z inside loop
translation = np.eye(4)
# store results
transforms = []
face_index = []
segments = []
# loop through user specified heights
for height in heights:
# offset the origin by the height
new_origin = plane_origin + (plane_normal * height)
# offset the dot products by height and index by faces
new_dots = vertex_dots - height
# run the intersection with the cached dot products
lines, index = mesh_plane(
mesh=mesh,
plane_origin=new_origin,
plane_normal=plane_normal,
return_faces=True,
cached_dots=new_dots)
# get the transforms to 3D space and back
translation[2, 3] = height
to_3D = np.dot(base_transform, translation)
to_2D = np.linalg.inv(to_3D)
transforms.append(to_3D)
# transform points to 2D frame
lines_2D = tf.transform_points(
lines.reshape((-1, 3)), to_2D)
# if we didn't screw up the transform all
# of the Z values should be zero
# assert np.allclose(lines_2D[:, 2], 0.0)
# reshape back in to lines and discard Z
lines_2D = lines_2D[:, :2].reshape((-1, 2, 2))
# store (n, 2, 2) float lines
segments.append(lines_2D)
# store (n,) int indexes of mesh.faces
face_index.append(index)
# (n, 4, 4) transforms from 2D to 3D
transforms = np.array(transforms, dtype=np.float64)
return segments, transforms, face_index
def plane_lines(plane_origin,
plane_normal,
endpoints,
line_segments=True):
"""
Calculate plane-line intersections
Parameters
---------
plane_origin : (3,) float
Point on plane
plane_normal : (3,) float
Plane normal vector
endpoints : (2, n, 3) float
Points defining lines to be tested
line_segments : bool
If True, only returns intersections as valid if
vertices from endpoints are on different sides
of the plane.
Returns
---------
intersections : (m, 3) float
Cartesian intersection points
valid : (n, 3) bool
Indicate whether a valid intersection exists
for each input line segment
"""
endpoints = np.asanyarray(endpoints)
plane_origin = np.asanyarray(plane_origin).reshape(3)
line_dir = util.unitize(endpoints[1] - endpoints[0])
plane_normal = util.unitize(np.asanyarray(plane_normal).reshape(3))
t = np.dot(plane_normal, (plane_origin - endpoints[0]).T)
b = np.dot(plane_normal, line_dir.T)
# If the plane normal and line direction are perpendicular, it means
# the vector is 'on plane', and there isn't a valid intersection.
# We discard on-plane vectors by checking that the dot product is nonzero
valid = np.abs(b) > tol.zero
if line_segments:
test = np.dot(plane_normal,
np.transpose(plane_origin - endpoints[1]))
different_sides = np.sign(t) != np.sign(test)
nonzero = np.logical_or(np.abs(t) > tol.zero,
np.abs(test) > tol.zero)
valid = np.logical_and(valid, different_sides)
valid = np.logical_and(valid, nonzero)
d = np.divide(t[valid], b[valid])
intersection = endpoints[0][valid]
intersection = intersection + np.reshape(d, (-1, 1)) * line_dir[valid]
return intersection, valid
def planes_lines(plane_origins,
plane_normals,
line_origins,
line_directions,
return_distance=False,
return_denom=False):
"""
Given one line per plane find the intersection points.
Parameters
-----------
plane_origins : (n,3) float
Point on each plane
plane_normals : (n,3) float
Normal vector of each plane
line_origins : (n,3) float
Point at origin of each line
line_directions : (n,3) float
Direction vector of each line
return_distance : bool
Return distance from origin to point also
return_denom : bool
Return denominator, so you can check for small values
Returns
----------
on_plane : (n,3) float
Points on specified planes
valid : (n,) bool
Did plane intersect line or not
distance : (n,) float
[OPTIONAL] Distance from point
denom : (n,) float
[OPTIONAL] Denominator
"""
# check input types
plane_origins = np.asanyarray(plane_origins, dtype=np.float64)
plane_normals = np.asanyarray(plane_normals, dtype=np.float64)
line_origins = np.asanyarray(line_origins, dtype=np.float64)
line_directions = np.asanyarray(line_directions, dtype=np.float64)
# vector from line to plane
origin_vectors = plane_origins - line_origins
projection_ori = util.diagonal_dot(origin_vectors, plane_normals)
projection_dir = util.diagonal_dot(line_directions, plane_normals)
valid = np.abs(projection_dir) > 1e-5
distance = np.divide(projection_ori[valid],
projection_dir[valid])
on_plane = line_directions[valid] * distance.reshape((-1, 1))
on_plane += line_origins[valid]
result = [on_plane, valid]
if return_distance:
result.append(distance)
if return_denom:
result.append(projection_dir)
return result
def slice_faces_plane(vertices,
faces,
plane_normal,
plane_origin,
face_index=None,
cached_dots=None):
"""
Slice a mesh (given as a set of faces and vertices) with a plane, returning a
new mesh (again as a set of faces and vertices) that is the
portion of the original mesh to the positive normal side of the plane.
Parameters
---------
vertices : (n, 3) float
Vertices of source mesh to slice
faces : (n, 3) int
Faces of source mesh to slice
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
plane_origin : (3,) float
Point on plane to intersect with mesh
face_index : ((m,) int)
Indexes of faces to slice. When no mask is provided, the
default is to slice all faces.
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing
Returns
----------
new_vertices : (n, 3) float
Vertices of sliced mesh
new_faces : (n, 3) int
Faces of sliced mesh
"""
if len(vertices) == 0:
return vertices, faces
# Construct a mask for the faces to slice.
if face_index is not None:
faces = faces[face_index]
if cached_dots is not None:
dots = cached_dots
else:
# dot product of each vertex with the plane normal indexed by face
# so for each face the dot product of each vertex is a row
# shape is the same as faces (n,3)
dots = np.dot(vertices - plane_origin, plane_normal)
# Find vertex orientations w.r.t. faces for all triangles:
# -1 -> vertex "inside" plane (positive normal direction)
# 0 -> vertex on plane
# 1 -> vertex "outside" plane (negative normal direction)
signs = np.zeros(len(vertices), dtype=np.int8)
signs[dots < -tol.merge] = 1
signs[dots > tol.merge] = -1
signs = signs[faces]
# Find all triangles that intersect this plane
# onedge <- indices of all triangles intersecting the plane
# inside <- indices of all triangles "inside" the plane (positive normal)
signs_sum = signs.sum(axis=1, dtype=np.int8)
signs_asum = np.abs(signs).sum(axis=1, dtype=np.int8)
# Cases:
# (0,0,0), (-1,0,0), (-1,-1,0), (-1,-1,-1) <- inside
# (1,0,0), (1,1,0), (1,1,1) <- outside
# (1,0,-1), (1,-1,-1), (1,1,-1) <- onedge
onedge = np.logical_and(
signs_asum >= 2,
np.abs(signs_sum) <= 1)
inside = signs_sum == -signs_asum
# for any faces that lie exactly on-the-plane
# we want to only include them if their normal
# is backwards from the slicing normal
on_plane = signs_asum == 0
if on_plane.any():
# compute the normals and whether
# face is degenerate here
check, valid = tm.normals(vertices[faces[on_plane]])
# only include faces back from normal
dot_check = np.dot(check, plane_normal)
# exclude any degenerate faces from the result
inside[on_plane] = valid
# exclude the degenerate face from our mask
on_plane[on_plane] = valid
# apply results for this subset
inside[on_plane] = dot_check < 0.0
# Automatically include all faces that are "inside"
new_faces = faces[inside]
# Separate faces on the edge into two cases: those which will become
# quads (two vertices inside plane) and those which will become triangles
# (one vertex inside plane)
triangles = vertices[faces]
cut_triangles = triangles[onedge]
cut_faces_quad = faces[np.logical_and(onedge, signs_sum < 0)]
cut_faces_tri = faces[np.logical_and(onedge, signs_sum >= 0)]
cut_signs_quad = signs[np.logical_and(onedge, signs_sum < 0)]
cut_signs_tri = signs[np.logical_and(onedge, signs_sum >= 0)]
# If no faces to cut, the surface is not in contact with this plane.
# Thus, return a mesh with only the inside faces
if len(cut_faces_quad) + len(cut_faces_tri) == 0:
if len(new_faces) == 0:
# if no new faces at all return empty arrays
empty = (np.zeros((0, 3), dtype=np.float64),
np.zeros((0, 3), dtype=np.int64))
return empty
# find the unique indices in the new faces
# using an integer-only unique function
unique, inverse = grouping.unique_bincount(new_faces.reshape(-1),
minlength=len(vertices),
return_inverse=True)
# use the unique indices for our final vertices and faces
final_vert = vertices[unique]
final_face = inverse.reshape((-1, 3))
return final_vert, final_face
# Extract the intersections of each triangle's edges with the plane
o = cut_triangles # origins
d = np.roll(o, -1, axis=1) - o # directions
num = (plane_origin - o).dot(plane_normal) # compute num/denom
denom = np.dot(d, plane_normal)
denom[denom == 0.0] = 1e-12 # prevent division by zero
dist = np.divide(num, denom)
# intersection points for each segment
int_points = np.einsum('ij,ijk->ijk', dist, d) + o
# Initialize the array of new vertices with the current vertices
new_vertices = vertices
# Handle the case where a new quad is formed by the intersection
# First, extract the intersection points belonging to a new quad
quad_int_points = int_points[(signs_sum < 0)[onedge], :, :]
num_quads = len(quad_int_points)
if num_quads > 0:
# Extract the vertex on the outside of the plane, then get the vertices
# (in CCW order of the inside vertices)
quad_int_inds = np.where(cut_signs_quad == 1)[1]
quad_int_verts = cut_faces_quad[
np.stack((range(num_quads), range(num_quads)), axis=1),
np.stack(((quad_int_inds + 1) % 3, (quad_int_inds + 2) % 3), axis=1)]
# Fill out new quad faces with the intersection points as vertices
new_quad_faces = np.append(
quad_int_verts,
np.arange(len(new_vertices),
len(new_vertices) +
2 * num_quads).reshape(num_quads, 2), axis=1)
# Extract correct intersection points from int_points and order them in
# the same way as they were added to faces
new_quad_vertices = quad_int_points[
np.stack((range(num_quads), range(num_quads)), axis=1),
np.stack((((quad_int_inds + 2) % 3).T, quad_int_inds.T),
axis=1), :].reshape(2 * num_quads, 3)
# Add new vertices to existing vertices, triangulate quads, and add the
# resulting triangles to the new faces
new_vertices = np.append(new_vertices, new_quad_vertices, axis=0)
new_tri_faces_from_quads = geometry.triangulate_quads(new_quad_faces)
new_faces = np.append(new_faces, new_tri_faces_from_quads, axis=0)
# Handle the case where a new triangle is formed by the intersection
# First, extract the intersection points belonging to a new triangle
tri_int_points = int_points[(signs_sum >= 0)[onedge], :, :]
num_tris = len(tri_int_points)
if num_tris > 0:
# Extract the single vertex for each triangle inside the plane and get the
# inside vertices (CCW order)
tri_int_inds = np.where(cut_signs_tri == -1)[1]
tri_int_verts = cut_faces_tri[range(
num_tris), tri_int_inds].reshape(num_tris, 1)
# Fill out new triangles with the intersection points as vertices
new_tri_faces = np.append(
tri_int_verts,
np.arange(len(new_vertices),
len(new_vertices) +
2 * num_tris).reshape(num_tris, 2),
axis=1)
# Extract correct intersection points and order them in the same way as
# the vertices were added to the faces
new_tri_vertices = tri_int_points[
np.stack((range(num_tris), range(num_tris)), axis=1),
np.stack((tri_int_inds.T, ((tri_int_inds + 2) % 3).T),
axis=1),
:].reshape(2 * num_tris, 3)
# Append new vertices and new faces
new_vertices = np.append(new_vertices, new_tri_vertices, axis=0)
new_faces = np.append(new_faces, new_tri_faces, axis=0)
# find the unique indices in the new faces
# using an integer-only unique function
unique, inverse = grouping.unique_bincount(new_faces.reshape(-1),
minlength=len(new_vertices),
return_inverse=True)
# use the unique indexes for our final vertex and faces
final_vert = new_vertices[unique]
final_face = inverse.reshape((-1, 3))
return final_vert, final_face
def slice_mesh_plane(mesh,
plane_normal,
plane_origin,
face_index=None,
cap=False,
cached_dots=None,
**kwargs):
"""
Slice a mesh with a plane returning a new mesh that is the
portion of the original mesh to the positive normal side
of the plane.
Parameters
---------
mesh : Trimesh object
Source mesh to slice
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
plane_origin : (3,) float
Point on plane to intersect with mesh
cap : bool
If True, cap the result with a triangulated polygon
face_index : ((m,) int)
Indexes of mesh.faces to slice. When no mask is provided, the
default is to slice all faces.
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing
kwargs : dict
Passed to the newly created sliced mesh
Returns
----------
new_mesh : Trimesh object
Sliced mesh
"""
# check input for none
if mesh is None:
return None
# avoid circular import
from .base import Trimesh
from .creation import triangulate_polygon
from .path import polygons
from scipy.spatial import cKDTree
# check input plane
plane_normal = np.asanyarray(
plane_normal, dtype=np.float64)
plane_origin = np.asanyarray(
plane_origin, dtype=np.float64)
# check to make sure origins and normals have acceptable shape
shape_ok = ((plane_origin.shape == (3,) or
util.is_shape(plane_origin, (-1, 3))) and
(plane_normal.shape == (3,) or
util.is_shape(plane_normal, (-1, 3))) and
plane_origin.shape == plane_normal.shape)
if not shape_ok:
raise ValueError('plane origins and normals must be (n, 3)!')
# start with copy of original mesh, faces, and vertices
vertices = mesh.vertices.copy()
faces = mesh.faces.copy()
if 'process' not in kwargs:
kwargs['process'] = False
# slice away specified planes
for origin, normal in zip(plane_origin.reshape((-1, 3)),
plane_normal.reshape((-1, 3))):
# save the new vertices and faces
vertices, faces = slice_faces_plane(
vertices=vertices,
faces=faces,
plane_normal=normal,
plane_origin=origin,
face_index=face_index)
# check if cap arg specified
if cap:
if face_index:
# This hasn't been implemented yet.
raise NotImplementedError(
"face_index and cap can't be used together")
# start by deduplicating vertices again
unique, inverse = grouping.unique_rows(vertices)
vertices = vertices[unique]
# will collect additional faces
f = inverse[faces]
# remove degenerate faces by checking to make sure
# that each face has three unique indices
f = f[(f[:, :1] != f[:, 1:]).all(axis=1)]
# transform to the cap plane
to_2D = geometry.plane_transform(
origin=origin,
normal=-normal)
to_3D = np.linalg.inv(to_2D)
vertices_2D = tf.transform_points(vertices, to_2D)
edges = geometry.faces_to_edges(f)
edges.sort(axis=1)
on_plane = np.abs(vertices_2D[:, 2]) < 1e-8
edges = edges[on_plane[edges].all(axis=1)]
edges = edges[edges[:, 0] != edges[:, 1]]
unique_edge = grouping.group_rows(
edges, require_count=1)
if len(unique) < 3:
continue
tree = cKDTree(vertices)
# collect new faces
faces = [f]
for p in polygons.edges_to_polygons(
edges[unique_edge], vertices_2D[:, :2]):
vn, fn = triangulate_polygon(p)
# collect the original index for the new vertices
vn3 = tf.transform_points(util.stack_3D(vn), to_3D)
distance, vid = tree.query(vn3)
if distance.max() > 1e-8:
util.log.debug('triangulate may have inserted vertex!')
# triangulation should not have inserted vertices
faces.append(vid[fn])
faces = np.vstack(faces)
# return the sliced mesh
return Trimesh(vertices=vertices, faces=faces, **kwargs)
| mit | 606feae38be79e43a54547306d042ef6 | 34.79845 | 83 | 0.582287 | 3.948133 | false | false | false | false |
smarkets/smk_python_sdk | smarkets/uuid.py | 1 | 8456 | from __future__ import absolute_import
"""
Utility methods for dealing with Smarkets UUIDS.
There are 3 main representations of IDs used in Smarkets:
- Integers (as in the API)
- Tagged UUIDs (used mostly on non-user-facing bits of the site)
- "Friendly" IDs/slugs (used on user-facing bits of the site)
"""
import logging
from collections import namedtuple
from six import binary_type, integer_types, string_types
from six.moves import reduce
log = logging.getLogger(__name__)
UuidTagBase = namedtuple('UuidTagBase', ['name', 'int_tag', 'prefix'])
UuidBase = namedtuple('UuidBase', ['number', 'tag'])
class UuidTag(UuidTagBase): # pylint: disable=E1001
"Represents tag information"
__slots__ = ()
tag_mult = 1 << 16
@property
def hex_str(self):
"Hex tag value"
return '%04x' % self.int_tag
def tag_number(self, number):
"Adds this tag to a number"
return number * self.tag_mult + self.int_tag
@classmethod
def split_int_tag(cls, number):
"Splits a number into the ID and tag"
return divmod(number, cls.tag_mult)
TAGS = (
UuidTag('Account', int('acc1', 16), 'a'),
UuidTag('ContractGroup', int('c024', 16), 'm'),
UuidTag('Contract', int('cccc', 16), 'c'),
UuidTag('Order', int('fff0', 16), 'o'),
UuidTag('Comment', int('b1a4', 16), 'b'),
UuidTag('Entity', int('0444', 16), 'n'),
UuidTag('Event', int('1100', 16), 'e'),
UuidTag('Session', int('9999', 16), 's'),
UuidTag('User', int('0f00', 16), 'u'),
UuidTag('Referrer', int('4e4e', 16), 'r'),
)
class Uuid(UuidBase): # pylint: disable=E1001
"Represents a UUID"
__slots__ = ()
chars = (
'0123456789'
'abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
)
# Various indexes into uuid map
tags = dict((t.name, t) for t in TAGS)
tags_by_hex_str = dict((t.hex_str, t) for t in TAGS)
tags_by_prefix = dict((t.prefix, t) for t in TAGS)
tags_by_int_tag = dict((t.int_tag, t) for t in TAGS)
mask64 = (1 << 64) - 1
@property
def low(self):
"Lower 64 bits of number"
return self.number & self.mask64
@property
def high(self):
"Higher 64 bits of number"
return (self.number >> 64) & self.mask64
@property
def shorthex(self):
"Short hex representation of Uuid"
return '%x' % self.number
def to_slug(self, prefix=True, base=36, chars=None, pad=0):
"Convert to slug representation"
if chars is None:
chars = self.chars
if base < 2 or base > len(chars):
raise TypeError("base must be between 2 and %s" % len(chars))
chars = chars[:base]
number = self.tag.tag_number(self.number)
slug = self.pad_uuid(self.base_n(number, chars), pad=pad)
if prefix:
return '%s-%s' % (self.tag.prefix, slug)
else:
return slug
def to_hex(self, pad=32):
"Convert to tagged hex representation"
hex_str = '%x%s' % (self.number, self.tag.hex_str)
return self.pad_uuid(hex_str, pad=pad)
@staticmethod
def base_n(number, chars):
"Recursive helper for calculating a number in base len(chars)"
return ((number == 0) and "0") \
or (Uuid.base_n(number // (len(chars)), chars).lstrip("0") +
chars[number % (len(chars))])
@staticmethod
def pad_uuid(uuid, pad=32, padchar='0'):
"Pads a UUID with <pad> <padchar>s"
return padchar * (pad - len(uuid)) + uuid
@classmethod
def unsplit64(cls, high, low):
"Converts a high/low 64-bit integer pair into a 128-bit large integer"
return ((high & cls.mask64) << 64) | (low & cls.mask64)
@classmethod
def from_int(cls, number, ttype):
"Convert an integer and tag type to a Uuid"
if isinstance(number, tuple):
number = cls.unsplit64(*number)
if not isinstance(number, integer_types):
raise TypeError("Number must be an integer: %r" % number)
if number < 0:
raise TypeError("Number cannot be negative: %r" % number)
tag = cls.tags.get(ttype)
if tag is None:
raise ValueError("invalid type: %r" % ttype)
return cls(number, tag)
@classmethod
def from_slug(cls, slug, base=36, chars=None):
"Convert a slug into a Uuid"
if not isinstance(slug, string_types):
raise TypeError("slug must be a string: %r" % slug)
if chars is None:
chars = cls.chars
if base < 2 or base > len(chars):
raise TypeError("base must be between 2 and %s" % len(chars))
if base <= 36:
slug = slug.lower()
chars = chars[:base]
index = dict(zip(chars, range(0, len(chars))))
prefix = None
if len(slug) > 1 and '-' in slug:
# We have a prefix
prefix, slug = slug.split('-')
number = reduce(lambda acc, val: acc + val[0] * len(index) ** val[1],
zip([index[x] for x in slug],
reversed(range(0, len(slug)))), 0)
number, int_tag = UuidTag.split_int_tag(number)
tag = cls.tags_by_int_tag.get(int_tag)
if tag is None:
raise ValueError("invalid integer tag: %r" % int_tag)
if prefix and tag != cls.tags_by_prefix.get(prefix):
raise ValueError("prefix %r did not match tag %r" % (prefix, tag))
return cls(number, tag)
@classmethod
def from_hex(cls, hex_str):
"""Convert a hex uuid into a Uuid
:type hex_str: byte string or unicode string
"""
if isinstance(hex_str, binary_type):
hex_str = hex_str.decode()
if not isinstance(hex_str, string_types):
raise TypeError("hex_str must be a string: %r" % hex_str)
hex_tag = hex_str[-4:]
number = int(hex_str[:-4], 16)
tag = cls.tags_by_hex_str.get(hex_tag)
if tag is None:
raise ValueError("invalid hex tag: %r" % hex_tag)
return cls(number, tag)
def int_to_slug(number, ttype):
"Convert a large integer to a slug"
return Uuid.from_int(number, ttype).to_slug()
def slug_to_int(slug, return_tag=None, split=False):
"""
Convert a slug to an integer, optionally splitting into high and
low 64 bit parts
"""
uuid = Uuid.from_slug(slug)
number = (uuid.high, uuid.low) if split else uuid.number
if return_tag == 'type':
return (number, uuid.tag.name)
elif return_tag == 'int':
return (number, uuid.tag.int_tag)
else:
return number
def uuid_to_slug(number, prefix=True):
"""
Convert a Smarkets UUID (128-bit hex) to a slug
"""
return Uuid.from_hex(number).to_slug(prefix=prefix)
def slug_to_uuid(slug):
"""
Convert a slug to a Smarkets UUID
"""
return Uuid.from_slug(slug).to_hex()
def int_to_uuid(number, ttype):
"""Convert an untagged integer into a tagged uuid
:type ttype: str or unicode on Python 2, str on Python 3
"""
return Uuid.from_int(number, ttype).to_hex()
def uuid_to_int(uuid, return_tag=None, split=False):
"Convert a tagged uuid into an integer, optionally returning type"
uuid = Uuid.from_hex(uuid)
number = (uuid.high, uuid.low) if split else uuid.number
if return_tag == 'type':
return (number, uuid.tag.name)
elif return_tag == 'int':
return (number, uuid.tag.int_tag)
else:
return number
def uid_or_int_to_int(value, expected_type):
if not isinstance(value, integer_types):
value, type_ = uuid_to_int(value, return_tag='type')
if type_ != expected_type:
raise ValueError("Expected tag %r doesn't match %r" % (expected_type, type_))
return value
contract_group_id_to_uid = lambda id_: int_to_uuid(id_, 'ContractGroup')
contract_id_to_uid = lambda id_: int_to_uuid(id_, 'Contract')
event_id_to_uid = lambda id_: int_to_uuid(id_, 'Event')
order_id_to_uid = lambda id_: int_to_uuid(id_, 'Order')
account_id_to_uid = lambda id_: int_to_uuid(id_, 'Account')
entity_id_to_uid = lambda id_: int_to_uuid(id_, 'Entity')
user_id_to_uid = lambda id_: int_to_uuid(id_, 'User')
session_id_to_uid = lambda id_: int_to_uuid(id_, 'Session')
def uuid_to_short(uuid):
"Converts a full UUID to the shortened version"
return uuid[:-4].lstrip('0')
| mit | bb365cbb9331a0c3e5aba832e53e4178 | 31.152091 | 89 | 0.594134 | 3.402817 | false | false | false | false |
mikedh/trimesh | trimesh/scene/transforms.py | 1 | 26298 | import numpy as np
import collections
from copy import deepcopy
from .. import util
from .. import caching
from .. import transformations
from ..caching import hash_fast
# we compare to identity a lot
_identity = np.eye(4)
_identity.flags['WRITEABLE'] = False
class SceneGraph(object):
"""
Hold data about positions and instances of geometry
in a scene. This includes a forest (i.e. multi-root tree)
of transforms and information on which node is the base
frame, and which geometries are affiliated with which
nodes.
"""
def __init__(self, base_frame='world'):
"""
Create a scene graph, holding homogeneous transformation
matrices and instance information about geometry.
Parameters
-----------
base_frame : any
The root node transforms will be positioned from.
"""
# a graph structure, subclass of networkx DiGraph
self.transforms = EnforcedForest()
# hashable, the base or root frame
self.base_frame = base_frame
# cache transformation matrices keyed with tuples
self._cache = caching.Cache(self.__hash__)
def update(self, frame_to, frame_from=None, **kwargs):
"""
Update a transform in the tree.
Parameters
------------
frame_from : hashable object
Usually a string (eg 'world').
If left as None it will be set to self.base_frame
frame_to : hashable object
Usually a string (eg 'mesh_0')
matrix : (4,4) float
Homogeneous transformation matrix
quaternion : (4,) float
Quaternion ordered [w, x, y, z]
axis : (3,) float
Axis of rotation
angle : float
Angle of rotation, in radians
translation : (3,) float
Distance to translate
geometry : hashable
Geometry object name, e.g. 'mesh_0'
extras: dictionary
Optional metadata attached to the new frame
(exports to glTF node 'extras').
"""
# if no frame specified, use base frame
if frame_from is None:
frame_from = self.base_frame
# pass through
attr = {k: v for k, v in kwargs.items()
if k in {'geometry', 'extras'}}
# convert various kwargs to a single matrix
attr['matrix'] = kwargs_to_matrix(**kwargs)
# add the edges for the transforms
# wi ll return if it changed anything
self.transforms.add_edge(frame_from, frame_to, **attr)
# set the node attribute with the geometry information
if 'geometry' in kwargs:
self.transforms.node_data[
frame_to]['geometry'] = kwargs['geometry']
def get(self, frame_to, frame_from=None):
"""
Get the transform from one frame to another.
Parameters
------------
frame_to : hashable
Node name, usually a string (eg 'mesh_0')
frame_from : hashable
Node name, usually a string (eg 'world').
If None it will be set to self.base_frame
Returns
----------
transform : (4, 4) float
Homogeneous transformation matrix
Raises
-----------
ValueError
If the frames aren't connected.
"""
# use base frame if not specified
if frame_from is None:
frame_from = self.base_frame
# look up transform to see if we have it already
key = (frame_from, frame_to)
if key in self._cache:
return self._cache[key]
# get the geometry at the final node if any
geometry = self.transforms.node_data[
frame_to].get('geometry')
# get a local reference to edge data
data = self.transforms.edge_data
if frame_from == frame_to:
# if we're going from ourself return identity
matrix = _identity
elif key in data:
# if the path is just an edge return early
matrix = data[key]['matrix']
else:
# we have a 3+ node path
# get the path from the forest always going from
# parent -> child -> child
path = self.transforms.shortest_path(
frame_from, frame_to)
# the path should always start with `frame_from`
assert path[0] == frame_from
# and end with the `frame_to` node
assert path[-1] == frame_to
# loop through pairs of the path
matrices = []
for u, v in zip(path[:-1], path[1:]):
forward = data.get((u, v))
if forward is not None:
if 'matrix' in forward:
# append the matrix from u to v
matrices.append(forward['matrix'])
continue
# since forwards didn't exist backward must
# exist otherwise this is a disconnected path
# and we should raise an error anyway
backward = data[(v, u)]
if 'matrix' in backward:
# append the inverted backwards matrix
matrices.append(
np.linalg.inv(backward['matrix']))
# filter out any identity matrices
matrices = [m for m in matrices if
np.abs((m - _identity)).max() > 1e-8]
if len(matrices) == 0:
matrix = _identity
elif len(matrices) == 1:
matrix = matrices[0]
else:
# multiply matrices into single transform
matrix = util.multi_dot(matrices)
# matrix being edited in-place leads to subtle bugs
matrix.flags['WRITEABLE'] = False
# store the result
self._cache[key] = (matrix, geometry)
return matrix, geometry
def __hash__(self):
return self.transforms.__hash__()
def copy(self):
"""
Return a copy of the current TransformForest.
Returns
------------
copied : TransformForest
Copy of current object.
"""
# create a copy without transferring cache
copied = SceneGraph()
copied.base_frame = deepcopy(self.base_frame)
copied.transforms = deepcopy(self.transforms)
return copied
def to_flattened(self):
"""
Export the current transform graph with all
transforms baked into world->instance.
Returns
---------
flat : dict
Keyed {node : {transform, geometry}
"""
flat = {}
base_frame = self.base_frame
for node in self.nodes:
if node == base_frame:
continue
# get the matrix and geometry name
matrix, geometry = self.get(
frame_to=node, frame_from=base_frame)
# store matrix as list rather than numpy array
flat[node] = {'transform': matrix.tolist(),
'geometry': geometry}
return flat
def to_gltf(self, scene, mesh_index=None):
"""
Export a transforms as the 'nodes' section of the
GLTF header dict.
Parameters
------------
scene : trimesh.Scene
Scene with geometry.
mesh_index : dict or None
Mapping { key in scene.geometry : int }
Returns
--------
gltf : dict
With 'nodes' referencing a list of dicts
"""
if mesh_index is None:
# geometry is an OrderedDict
# map mesh name to index: {geometry key : index}
mesh_index = {name: i for i, name
in enumerate(scene.geometry.keys())}
# get graph information into local scope before loop
graph = self.transforms
# get the stored node data
node_data = graph.node_data
edge_data = graph.edge_data
base_frame = self.base_frame
# list of dict, in gltf format
# start with base frame as first node index
result = [{'name': base_frame}]
# {node name : node index in gltf}
lookup = {base_frame: 0}
# collect the nodes in order
for node in node_data.keys():
if node == base_frame:
continue
# assign the index to the node-name lookup
lookup[node] = len(result)
# populate a result at the correct index
result.append({'name': node})
# get generated properties outside of loop
# does the scene have a defined camera to export
has_camera = scene.has_camera
children = graph.children
# then iterate through to collect data
for info in result:
# name of the scene node
node = info['name']
# get the original node names for children
childs = children.get(node, [])
if len(childs) > 0:
info['children'] = [lookup[k] for k in childs]
# if we have a mesh store by index
if 'geometry' in node_data[node]:
mesh_key = node_data[node]['geometry']
if mesh_key in mesh_index:
info['mesh'] = mesh_index[mesh_key]
# check to see if we have camera node
if has_camera and node == scene.camera.name:
info['camera'] = 0
if node != base_frame:
parent = graph.parents[node]
# get the matrix from this edge
matrix = edge_data[(parent, node)]['matrix']
# only include if it's not an identify matrix
if not util.allclose(matrix, _identity):
info['matrix'] = matrix.T.reshape(-1).tolist()
# if an extra was stored on this edge
extras = edge_data[(parent, node)].get('extras')
if extras:
# convert any numpy arrays to lists
extras.update(
{k: v.tolist() for k, v in extras.items()
if hasattr(v, 'tolist')})
info['extras'] = extras
return {'nodes': result}
def to_edgelist(self):
"""
Export the current transforms as a list of
edge tuples, with each tuple having the format:
(node_a, node_b, {metadata})
Returns
---------
edgelist : (n,) list
Of edge tuples
"""
# save local reference to node_data
nodes = self.transforms.node_data
# save cleaned edges
export = []
# loop through (node, node, edge attributes)
for edge, attr in self.transforms.edge_data.items():
# node indexes from edge
a, b = edge
# geometry is a node property but save it to the
# edge so we don't need two dictionaries
b_attr = nodes[b]
# make sure we're not stomping on original
attr_new = attr.copy()
# apply node geometry to edge attributes
if 'geometry' in b_attr:
attr_new['geometry'] = b_attr['geometry']
# convert any numpy arrays to regular lists
attr_new.update(
{k: v.tolist() for k, v in attr_new.items()
if hasattr(v, 'tolist')})
export.append([a, b, attr_new])
return export
def from_edgelist(self, edges, strict=True):
"""
Load transform data from an edge list into the current
scene graph.
Parameters
-------------
edgelist : (n,) tuples
Keyed (node_a, node_b, {key: value})
strict : bool
If True raise a ValueError when a
malformed edge is passed in a tuple.
"""
# loop through each edge
for edge in edges:
# edge contains attributes
if len(edge) == 3:
self.update(edge[1], edge[0], **edge[2])
# edge just contains nodes
elif len(edge) == 2:
self.update(edge[1], edge[0])
# edge is broken
elif strict:
raise ValueError(
'edge incorrect shape: %s', str(edge))
def to_networkx(self):
"""
Return a `networkx` copy of this graph.
Returns
----------
graph : networkx.DiGraph
Directed graph.
"""
import networkx
return networkx.from_edgelist(
self.to_edgelist(),
create_using=networkx.DiGraph)
def show(self, **kwargs):
"""
Plot the scene graph using `networkx.draw_networkx`
which uses matplotlib to display the graph.
Parameters
-----------
kwargs : dict
Passed to `networkx.draw_networkx`
"""
import networkx
import matplotlib.pyplot as plt
# default kwargs will only be set if not
# passed explicitly to the show command
defaults = {'with_labels': True}
kwargs.update(**{k: v for k, v in defaults.items()
if k not in kwargs})
networkx.draw_networkx(
G=self.to_networkx(),
**kwargs)
plt.show()
def load(self, edgelist):
"""
Load transform data from an edge list into the current
scene graph.
Parameters
-------------
edgelist : (n,) tuples
Structured (node_a, node_b, {key: value})
"""
self.from_edgelist(edgelist, strict=True)
@caching.cache_decorator
def nodes(self):
"""
A list of every node in the graph.
Returns
-------------
nodes : (n,) array
All node names.
"""
return self.transforms.nodes
@caching.cache_decorator
def nodes_geometry(self):
"""
The nodes in the scene graph with geometry attached.
Returns
------------
nodes_geometry : (m,) array
Node names which have geometry associated
"""
return [n for n, attr in
self.transforms.node_data.items()
if 'geometry' in attr]
@caching.cache_decorator
def geometry_nodes(self):
"""
Which nodes have this geometry? Inverse
of `nodes_geometry`.
Returns
------------
geometry_nodes : dict
Keyed {geometry_name : node name}
"""
res = collections.defaultdict(list)
for node, attr in self.transforms.node_data.items():
if 'geometry' in attr:
res[attr['geometry']].append(node)
return res
def remove_geometries(self, geometries):
"""
Remove the reference for specified geometries
from nodes without deleting the node.
Parameters
------------
geometries : list or str
Name of scene.geometry to dereference.
"""
# make sure we have a set of geometries to remove
if util.is_string(geometries):
geometries = [geometries]
geometries = set(geometries)
# remove the geometry reference from the node without deleting nodes
# this lets us keep our cached paths, and will not screw up children
for node, attrib in self.transforms.node_data.items():
if 'geometry' in attrib and attrib['geometry'] in geometries:
attrib.pop('geometry')
# it would be safer to just run _cache.clear
# but the only property using the geometry should be
# nodes_geometry: if this becomes not true change this to clear!
self._cache.cache.pop('nodes_geometry', None)
def __contains__(self, key):
return key in self.transforms.node_data
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
value = np.asanyarray(value)
if value.shape != (4, 4):
raise ValueError('Matrix must be specified!')
return self.update(key, matrix=value)
def clear(self):
self.transforms = EnforcedForest()
self._cache.clear()
class EnforcedForest(object):
"""
A simple forest graph data structure: every node
is allowed to have exactly one parent. This makes
traversal and implementation much simpler than a
full graph data type; by storing only one parent
reference, it enforces the structure for "free."
"""
def __init__(self):
# since every node can have only one parent
# this data structure transparently enforces
# the forest data structure without checks
# a dict {child : parent}
self.parents = {}
# store data for a particular edge keyed by tuple
# {(u, v) : data }
self.edge_data = collections.defaultdict(dict)
# {u: data}
self.node_data = collections.defaultdict(dict)
# if multiple calls are made for the same path
# but the connectivity hasn't changed return cached
self._cache = {}
def add_edge(self, u, v, **kwargs):
"""
Add an edge to the forest cleanly.
Parameters
-----------
u : any
Hashable node key.
v : any
Hashable node key.
kwargs : dict
Stored as (u, v) edge data.
Returns
--------
changed : bool
Return if this operation changed anything.
"""
self._hash = None
# topology has changed so clear cache
if (u, v) not in self.edge_data:
self._cache = {}
else:
# check to see if matrix and geometry are identical
edge = self.edge_data[(u, v)]
if (util.allclose(kwargs.get('matrix', _identity),
edge.get('matrix', _identity),
1e-8)
and (edge.get('geometry') ==
kwargs.get('geometry'))):
return False
# store a parent reference for traversal
self.parents[v] = u
# store kwargs for edge data keyed with tuple
self.edge_data[(u, v)] = kwargs
# set empty node data
self.node_data[u].update({})
if 'geometry' in kwargs:
self.node_data[v].update(
{'geometry': kwargs['geometry']})
else:
self.node_data[v].update({})
return True
def remove_node(self, u):
"""
Remove a node from the forest.
Parameters
-----------
u : any
Hashable node key.
Returns
--------
changed : bool
Return if this operation changed anything.
"""
# check if node is part of forest
if u not in self.node_data:
return False
# topology will change so clear cache
self._cache = {}
self._hash = None
# delete all children's references and parent reference
children = [child for (child, parent) in self.parents.items() if parent == u]
for c in children:
del self.parents[c]
if u in self.parents:
del self.parents[u]
# delete edge data
edges = [(a, b) for (a, b) in self.edge_data if a == u or b == u]
for e in edges:
del self.edge_data[e]
# delete node data
del self.node_data[u]
return True
def shortest_path(self, u, v):
"""
Find the shortest path between `u` and `v`, returning
a path where the first element is always `u` and the
last element is always `v`, disregarding edge direction.
Parameters
-----------
u : any
Hashable node key.
v : any
Hashable node key.
Returns
-----------
path : (n,)
Path between `u` and `v`
"""
# see if we've already computed this path
if u == v:
# the path between itself is an edge case
return []
elif (u, v) in self._cache:
# return the same path for either direction
return self._cache[(u, v)]
elif (v, u) in self._cache:
return self._cache[(v, u)][::-1]
# local reference to parent dict for performance
parents = self.parents
# store both forward and backwards traversal
forward = [u]
backward = [v]
# cap iteration to number of total nodes
for _ in range(len(parents) + 1):
# store the parent both forwards and backwards
f = parents.get(forward[-1])
b = parents.get(backward[-1])
forward.append(f)
backward.append(b)
if f == v:
self._cache[(u, v)] = forward
return forward
elif b == u:
# return reversed path
backward = backward[::-1]
self._cache[(u, v)] = backward
return backward
elif (b in forward) or (f is None and b is None):
# we have a either a common node between both
# traversal directions or we have consumed the whole
# tree in both directions so try to find the common node
common = set(backward).intersection(
forward).difference({None})
if len(common) == 0:
raise ValueError('No path from {}->{}!'.format(u, v))
elif len(common) > 1:
# get the first occuring common element in "forward"
link = next(f for f in forward if f in common)
assert link in common
else:
# take the only common element
link = next(iter(common))
# combine the forward and backwards traversals
a = forward[:forward.index(link) + 1]
b = backward[:backward.index(link)]
path = a + b[::-1]
# verify we didn't screw up the order
assert path[0] == u
assert path[-1] == v
self._cache[(u, v)] = path
return path
raise ValueError('Iteration limit exceeded!')
@property
def nodes(self):
"""
Get a set of every node.
Returns
-----------
nodes : set
Every node currently stored.
"""
return self.node_data.keys()
@property
def children(self):
"""
Get the children of each node.
Returns
----------
children : dict
Keyed {node : [child, child, ...]}
"""
child = collections.defaultdict(list)
# append children to parent references
# skip self-references to avoid a node loop
[child[v].append(u) for u, v in
self.parents.items() if u != v]
# return as a vanilla dict
return dict(child)
def successors(self, node):
"""
Get all nodes that are successors to specified node,
including the specified node.
Parameters
-------------
node : any
Hashable key for a node.
Returns
------------
successors : set
Nodes that succeed specified node.
"""
# get mapping of {parent : child}
children = self.children
# if node doesn't exist return early
if node not in children:
return set([node])
# children we need to collect
queue = [node]
# start collecting values with children of source
collected = set(queue)
# cap maximum iterations
for _ in range(len(self.node_data) + 1):
if len(queue) == 0:
# no more nodes to visit so we're done
return collected
# add the children of this node to be processed
childs = children.get(queue.pop())
if childs is not None:
queue.extend(childs)
collected.update(childs)
return collected
def __hash__(self):
"""
Actually hash all of the data.
Previously we were relying on "dirty" flags but
that made the bookkeeping unreasonably critical.
This was optimized a bit, and is evaluating on an
older laptop on a scene with 77 nodes and 76 edges
10,000 times in 0.7s which seems fast enough.
"""
hashed = getattr(self, '_hash', None)
if hashed is not None:
return hashed
hashed = hash_fast(
(''.join(str(hash(k)) + v.get('geometry', '')
for k, v in self.edge_data.items()) +
''.join(str(k) + v.get('geometry', '')
for k, v in self.node_data.items())).encode('utf-8') +
b''.join(v['matrix'].tobytes()
for v in self.edge_data.values()
if 'matrix' in v))
self._hash = hashed
return hashed
def kwargs_to_matrix(
matrix=None,
quaternion=None,
translation=None,
axis=None,
angle=None,
**kwargs):
"""
Take multiple keyword arguments and parse them
into a homogeneous transformation matrix.
Returns
---------
matrix : (4, 4) float
Homogeneous transformation matrix.
"""
if matrix is not None:
# a matrix takes immediate precedence over other options
return np.array(matrix, dtype=np.float64)
elif quaternion is not None:
matrix = transformations.quaternion_matrix(quaternion)
elif axis is not None and angle is not None:
matrix = transformations.rotation_matrix(angle, axis)
else:
matrix = np.eye(4)
if translation is not None:
# translation can be used in conjunction with any
# of the methods specifying transforms
matrix[:3, 3] += translation
return matrix
| mit | 85afdd219aa750abadf54a692c371346 | 30.799274 | 85 | 0.529774 | 4.680192 | false | false | false | false |
mikedh/trimesh | trimesh/path/exchange/load.py | 1 | 2598 | import os
from .dxf import _dxf_loaders
from .svg_io import svg_to_path
from ..path import Path
from . import misc
from ... import util
def load_path(file_obj, file_type=None, **kwargs):
"""
Load a file to a Path file_object.
Parameters
-----------
file_obj : One of the following:
- Path, Path2D, or Path3D file_objects
- open file file_object (dxf or svg)
- file name (dxf or svg)
- shapely.geometry.Polygon
- shapely.geometry.MultiLineString
- dict with kwargs for Path constructor
- (n,2,(2|3)) float, line segments
file_type : str
Type of file is required if file
file_object passed.
Returns
---------
path : Path, Path2D, Path3D file_object
Data as a native trimesh Path file_object
"""
if isinstance(file_obj, Path):
# we have been passed a Path file_object so
# do nothing and return the passed file_object
return file_obj
elif util.is_file(file_obj):
# for open file file_objects use loaders
kwargs.update(path_loaders[file_type](
file_obj, file_type=file_type))
elif util.is_string(file_obj):
# strings passed are evaluated as file file_objects
with open(file_obj, 'rb') as f:
# get the file type from the extension
file_type = os.path.splitext(file_obj)[-1][1:].lower()
# call the loader
kwargs.update(path_loaders[file_type](f, file_type=file_type))
elif util.is_instance_named(file_obj, ['Polygon', 'MultiPolygon']):
# convert from shapely polygons to Path2D
kwargs.update(misc.polygon_to_path(file_obj))
elif util.is_instance_named(file_obj, 'MultiLineString'):
# convert from shapely LineStrings to Path2D
kwargs.update(misc.linestrings_to_path(file_obj))
elif isinstance(file_obj, dict):
# load as kwargs
from ...exchange.load import load_kwargs
return load_kwargs(file_obj)
elif util.is_sequence(file_obj):
# load as lines in space
kwargs.update(misc.lines_to_path(file_obj))
else:
raise ValueError('Not a supported object type!')
from ...exchange.load import load_kwargs
return load_kwargs(kwargs)
def path_formats():
"""
Get a list of supported path formats.
Returns
------------
loaders : list of str
Extensions of loadable formats, ie:
['svg', 'dxf']
"""
return set(path_loaders.keys())
path_loaders = {'svg': svg_to_path}
path_loaders.update(_dxf_loaders)
| mit | af6daaa8e8cd5fb38a78277f4827af7b | 29.928571 | 74 | 0.617398 | 3.776163 | false | false | false | false |
mikedh/trimesh | trimesh/viewer/notebook.py | 1 | 3052 | """
notebook.py
-------------
Render trimesh.Scene objects in HTML
and jupyter notebooks using three.js
"""
import os
import base64
# for our template
from .. import util
from .. import resources
def scene_to_html(scene):
"""
Return HTML that will render the scene using
GLTF/GLB encoded to base64 loaded by three.js
Parameters
--------------
scene : trimesh.Scene
Source geometry
Returns
--------------
html : str
HTML containing embedded geometry
"""
# fetch HTML template from ZIP archive
# it is bundling all of three.js so compression is nice
base = util.decompress(
resources.get('templates/viewer.zip', decode=False),
file_type='zip')['viewer.html.template'].read().decode('utf-8')
scene.camera
# get export as bytes
data = scene.export(file_type='glb')
# encode as base64 string
encoded = base64.b64encode(data).decode('utf-8')
# replace keyword with our scene data
result = base.replace('$B64GLTF', encoded)
return result
def scene_to_notebook(scene, height=500, **kwargs):
"""
Convert a scene to HTML containing embedded geometry
and a three.js viewer that will display nicely in
an IPython/Jupyter notebook.
Parameters
-------------
scene : trimesh.Scene
Source geometry
Returns
-------------
html : IPython.display.HTML
Object containing rendered scene
"""
# keep as soft dependency
from IPython import display
# convert scene to a full HTML page
as_html = scene_to_html(scene=scene)
# escape the quotes in the HTML
srcdoc = as_html.replace('"', '"')
# embed this puppy as the srcdoc attr of an IFframe
# I tried this a dozen ways and this is the only one that works
# display.IFrame/display.Javascript really, really don't work
# div is to avoid IPython's pointless hardcoded warning
embedded = display.HTML(' '.join([
'<div><iframe srcdoc="{srcdoc}"',
'width="100%" height="{height}px"',
'style="border:none;"></iframe></div>']).format(
srcdoc=srcdoc,
height=height))
return embedded
def in_notebook():
"""
Check to see if we are in an IPython or Jypyter notebook.
Returns
-----------
in_notebook : bool
Returns True if we are in a notebook
"""
try:
# function returns IPython context, but only in IPython
ipy = get_ipython() # NOQA
# we only want to render rich output in notebooks
# in terminals we definitely do not want to output HTML
name = str(ipy.__class__).lower()
terminal = 'terminal' in name
# spyder uses ZMQshell, and can appear to be a notebook
spyder = '_' in os.environ and 'spyder' in os.environ['_']
# assume we are in a notebook if we are not in
# a terminal and we haven't been run by spyder
notebook = (not terminal) and (not spyder)
return notebook
except BaseException:
return False
| mit | 1f460602c985bc80879417b8baee8bb0 | 26.495495 | 71 | 0.624181 | 4.118758 | false | false | false | false |
kapsiry/sikteeri | procountor/procountor_api.py | 2 | 12672 | import random
import string
from datetime import datetime, timedelta
from decimal import Decimal
import requests
import logging
logger = logging.getLogger("ProcountorAPI")
class ProcountorAPIException(Exception):
pass
class ProcountorBankStatement(object):
def __init__(self, row):
self.id = row.get("id", None)
self.accountNumber = row.get("accountNumber", None)
self.startDate = datetime.strptime(row.get("startDate", None), "%Y-%m-%d")
self.endDate = datetime.strptime(row.get("endDate", None), "%Y-%m-%d")
self.currency = row.get("currency", None)
self.numberOfDeposits = row.get("numberOfDeposits", None)
self.depositSum = row.get("depositSum", 0)
self.numberOfWithdrawals = row.get("numberOfWithdrawals", None)
self.withdrawalSum = row.get("withdrawalSum", 0)
self.startBalance = row.get("startBalance", 0)
self.endBalance = row.get("endBalance", 0)
self.events = []
for potential_event in row.get("events", []):
for event in potential_event.get("events", []) + [potential_event]:
self.events.append(ProcountorBankStatementEvent(event))
class ProcountorBankStatementEvent(object):
"""
BankStatement event object
"""
MAPPINGS = {
'transaction': 'archiveCode',
'amount': 'sum',
'date': 'payDate',
'event_type_description': 'explanationDescription',
'fromto': 'name',
'reference': 'reference',
}
# http://www.finanssiala.fi/maksujenvalitys/dokumentit/ISO20022_Account_Statement_Guide_V1_3.pdf pages 39-40
EXPLANATIONCODES = {
700: 'maksuliikennepalvelu',
701: 'toistuva maksuliikennepalvelu',
702: 'Laksumaksupalvelu',
703: 'Maksupäätemaksu',
704: 'Suoramaksupalvelu',
705: 'Viitesiirto',
706: 'Maksupalvelu',
710: 'Talletus',
720: 'Nosto',
721: 'Maksukorttimaksu',
722: 'Shekki',
730: 'Pankkimaksu',
740: 'Korkomaksu',
750: 'Luottokorkomaksu',
760: 'Lainamaksu',
}
def __init__(self, row):
self.id = row.get("id", 0)
self.payDate = datetime.strptime(row.get("payDate", ""), "%Y-%m-%d")
self.valueDate = row.get("valueDate", None)
if self.valueDate:
self.valueDate = datetime.strptime(self.valueDate, "%Y-%m-%d")
self.sum = row.get("sum", 0)
self.accountNumber = row.get("accountNumber", None)
self.name = row.get("name", None) or "" # Force name to be string
self.explanationCode = row.get("explanationCode", 0)
self.explanationDescription = self.EXPLANATIONCODES.get(self.explanationCode,
str(self.explanationCode))
self.archiveCode = row.get("archiveCode", "")
self.message = row.get("message", "")
self.reference = row.get("reference", "")
if not self.reference and self.explanationCode == 710:
# Try to figure if SEPA payment message contains reference
message_parts = self.message.split()
maybe_reference = ""
if self.message.startswith("SEPA-MAKSU") and len(message_parts) == 4:
maybe_reference = ''.join(message_parts[1:-1])
elif self.message.startswith("SEPA PIKASIIRTO") and len(message_parts) == 5:
maybe_reference = ''.join(message_parts[2:-1])
if maybe_reference:
try:
int(maybe_reference)
self.reference = maybe_reference
except ValueError:
pass
self.allocated = row.get("allocated", True)
self.invoiceId = row.get("invoiceId", 0)
self.productId = row.get("productId", 0)
self.endToEndId = row.get("endToEndId", 0)
self.attachments = []
def __getitem__(self, key):
"""
This is a compatibility getter for csv bills processing
:param key:
:return:
"""
if key in self.MAPPINGS:
return getattr(self, self.MAPPINGS[key], None)
return getattr(self, key, None)
class ProcountorReferencePayment(object):
"""
Procountor Reference Payments
"""
MAPPINGS = {
'transaction': 'archiveId',
'amount': 'sum',
'date': 'paymentDate',
'fromto': 'name',
'reference': 'reference',
}
def __init__(self, row):
self.id = row.get("id", 0)
self.paymentDate = row.get("paymentDate", None)
if self.paymentDate:
self.paymentDate = datetime.strptime(self.paymentDate, "%Y-%m-%d")
self.valueDate = row.get("valueDate", None)
if self.valueDate:
self.valueDate = datetime.strptime(self.valueDate, "%Y-%m-%d")
self.sum = Decimal(row.get("sum", 0))
self.accountNumber = row.get("accountNumber", None)
self.name = row.get("name", None)
self.reference = row.get("bankReference", "").replace(' ', '').lstrip('0')
self.archiveId = row.get("archiveId", "")
self.allocated = row.get("allocated", True)
self.invoiceId = row.get("invoiceId", 0)
self.event_type_description = "Viitesiirto"
self.message = ""
self.attachments = []
def __getitem__(self, key):
"""
This is a compatibility getter for csv bills processing
ReferencePayment {
id (integer, optional): Unique identifier of the reference payment. ,
accountNumber (string, optional): Account number for which the reference payment is generated. ,
valueDate (string, optional): Date when the event was registered in the counterpart bank. ,
paymentDate (string, optional): Date when the payment was paid by the payer in his/her own bank. ,
sum (number, optional): The total amount for the reference payment. ,
name (string, optional): Name of the counterparty. ,
bankReference (string, optional): A reference value for the bank. ,
archiveId (string, optional): Archive code of the reference payment. Archive codes are unique in one bank
but two events from different banks can share the same archive code. ,
allocated (boolean, optional): Is the reference payment allocated to an invoice. If it is, the event must
also have an invoice ID. ,
invoiceId (integer, optional): Unique identifier of the invoice linked to the event. ,
attachments (Array[Attachment], optional): A list of attachments added to the reference payment.
}
"""
if key in self.MAPPINGS:
return getattr(self, self.MAPPINGS[key], None)
return getattr(self, key, None)
class ProcountorAPIClient(object):
def __init__(self, api, company_id, redirect_uri, client_id, client_secret, api_key):
self.session = requests.Session()
self.api = api.rstrip("/")
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.company_id = company_id
self.api_key = api_key
self._oauth_access_token = None
self._oauth_expires = None
self.state = "".join([random.choice(string.digits+string.ascii_letters) for x in range(16)])
def _error_handler(self, url, parameters, response):
if response.status_code >= 400:
logger.debug(response.request.body)
logger.debug(response.request.headers)
raise ProcountorAPIException("GET %s params %s failed with error (%d) %s" % (url, parameters,
response.status_code,
response.content))
return response
def get(self, path, headers=None, params=None):
url = "%s/%s" % (self.api, path)
if not params:
params = {}
response = self.session.get(url, params=params, headers=headers, allow_redirects=False)
return self._error_handler(url, params, response)
def post(self, path, body=None, headers=None, params=None):
if not headers:
headers = {}
if not params:
params = {}
url = "%s/%s" % (self.api, path)
response = self.session.post(url, data=body, params=params, headers=headers, allow_redirects=False)
return self._error_handler(url, params, response)
def refresh_access_token(self):
if self._oauth_access_token and self._oauth_expires > (datetime.now() + timedelta(seconds=60)):
return
params = {
"grant_type": "client_credentials",
"redirect_uri": self.redirect_uri,
"api_key": self.api_key,
"client_id": self.client_id,
"client_secret": self.client_secret,
}
headers = {"Content-type": "application/x-www-form-urlencoded"}
res = self.post("oauth/token", params=params, headers=headers)
if res.status_code != 200:
raise ProcountorAPIException("Token fetch failed, wrong response status code %d", res.status_code)
data = res.json()
self._oauth_access_token = data["access_token"]
self._oauth_expires = datetime.now() + timedelta(seconds=data["expires_in"])
self.session.headers.update({"Authorization": "Bearer %s" % self._oauth_access_token})
def get_referencepayments(self, start, end):
"""
Get refence payments
:param start:
:param end:
:return:
"""
params = {
"startDate": start.strftime("%Y-%m-%d"),
"endDate": end.strftime("%Y-%m-%d"),
"orderById": "asc",
}
out = []
while True:
res = self.get("referencepayments", params=params)
result = res.json()
meta = result.get("meta")
out += [ProcountorReferencePayment(row) for row in result.get("results", [])]
if meta.get("resultCount") == meta.get("pageSize"):
params["previousId"] = str(out[-1].id)
else:
break
return out
def get_bankstatements(self, start, end):
"""
TODO: Fetch all pages!!!
{
"bankStatements": [
{
"id": 0,
"accountNumber": "string",
"startDate": "2018-06-02",
"endDate": "2018-06-02",
"currency": "EUR",
"numberOfDeposits": 0,
"depositSum": 0,
"numberOfWithdrawals": 0,
"withdrawalSum": 0,
"startBalance": 0,
"endBalance": 0,
"events": [
{
"id": 0,
"payDate": "2018-06-02",
"valueDate": "2018-06-02",
"sum": 0,
"accountNumber": "string",
"name": "string",
"explanationCode": 0,
"archiveCode": "string",
"message": "string",
"reference": "string",
"allocated": true,
"invoiceId": 0,
"productId": 0,
"endToEndId": 0,
"attachments": [
{
"id": 0,
"name": "Picture.jpg",
"referenceType": "INVOICE",
"referenceId": 0,
"mimeType": "string"
}
]
}
]
}
]
}
"""
params = {
"startDate": start.strftime("%Y-%m-%d"),
"endDate": end.strftime("%Y-%m-%d")
}
res = self.get("bankstatements", params=params)
return [ProcountorBankStatement(x) for x in res.json().get("results", [])]
def get_ledgerreceipts(self, start, end):
params = {
"startDate": start.strftime("%Y-%m-%d"),
"endDate": end.strftime("%Y-%m-%d")
}
res = self.get("ledgerreceipts", params=params)
return res.json()
def get_invoices(self, start, end, status="PAID"):
params = {
"startDate": start.strftime("%Y-%m-%d"),
"endDate": end.strftime("%Y-%m-%d"),
"status": status,
}
res = self.get("invoices", params=params)
return res.json()
| mit | 51f7eed73de4f8b4cac3bd6a3b8b61e6 | 37.277946 | 117 | 0.54562 | 4.014575 | false | false | false | false |
mikedh/trimesh | trimesh/geometry.py | 1 | 14644 | import numpy as np
from . import util
from .constants import log
try:
import scipy.sparse
except BaseException as E:
from . import exceptions
# raise E again if anyone tries to use sparse
scipy = exceptions.ExceptionModule(E)
def plane_transform(origin, normal):
"""
Given the origin and normal of a plane find the transform
that will move that plane to be coplanar with the XY plane.
Parameters
----------
origin : (3,) float
Point that lies on the plane
normal : (3,) float
Vector that points along normal of plane
Returns
---------
transform: (4,4) float
Transformation matrix to move points onto XY plane
"""
transform = align_vectors(normal, [0, 0, 1])
if origin is not None:
transform[:3, 3] = -np.dot(
transform, np.append(origin, 1))[:3]
return transform
def align_vectors(a, b, return_angle=False):
"""
Find the rotation matrix that transforms one 3D vector
to another.
Parameters
------------
a : (3,) float
Unit vector
b : (3,) float
Unit vector
return_angle : bool
Return the angle between vectors or not
Returns
-------------
matrix : (4, 4) float
Homogeneous transform to rotate from `a` to `b`
angle : float
If `return_angle` angle in radians between `a` and `b`
"""
a = np.array(a, dtype=np.float64)
b = np.array(b, dtype=np.float64)
if a.shape != (3,) or b.shape != (3,):
raise ValueError('vectors must be (3,)!')
# find the SVD of the two vectors
au = np.linalg.svd(a.reshape((-1, 1)))[0]
bu = np.linalg.svd(b.reshape((-1, 1)))[0]
if np.linalg.det(au) < 0:
au[:, -1] *= -1.0
if np.linalg.det(bu) < 0:
bu[:, -1] *= -1.0
# put rotation into homogeneous transformation
matrix = np.eye(4)
matrix[:3, :3] = bu.dot(au.T)
if return_angle:
# projection of a onto b
# first row of SVD result is normalized source vector
dot = np.dot(au[0], bu[0])
# clip to avoid floating point error
angle = np.arccos(np.clip(dot, -1.0, 1.0))
if dot < -1e-5:
angle += np.pi
return matrix, angle
return matrix
def faces_to_edges(faces, return_index=False):
"""
Given a list of faces (n,3), return a list of edges (n*3,2)
Parameters
-----------
faces : (n, 3) int
Vertex indices representing faces
Returns
-----------
edges : (n*3, 2) int
Vertex indices representing edges
"""
faces = np.asanyarray(faces)
# each face has three edges
edges = faces[:, [0, 1, 1, 2, 2, 0]].reshape((-1, 2))
if return_index:
# edges are in order of faces due to reshape
face_index = np.tile(np.arange(len(faces)),
(3, 1)).T.reshape(-1)
return edges, face_index
return edges
def vector_angle(pairs):
"""
Find the angles between pairs of unit vectors.
Parameters
----------
pairs : (n, 2, 3) float
Unit vector pairs
Returns
----------
angles : (n,) float
Angles between vectors in radians
"""
pairs = np.asanyarray(pairs, dtype=np.float64)
if len(pairs) == 0:
return np.array([])
elif util.is_shape(pairs, (2, 3)):
pairs = pairs.reshape((-1, 2, 3))
elif not util.is_shape(pairs, (-1, 2, (2, 3))):
raise ValueError('pairs must be (n,2,(2|3))!')
# do the dot product between vectors
dots = util.diagonal_dot(pairs[:, 0], pairs[:, 1])
# clip for floating point error
dots = np.clip(dots, -1.0, 1.0)
# do cos and remove arbitrary sign
angles = np.abs(np.arccos(dots))
return angles
def triangulate_quads(quads, dtype=np.int64):
"""
Given an array of quad faces return them as triangle faces,
also handles pure triangles and mixed triangles and quads.
Parameters
-----------
quads: (n, 4) int
Vertex indices of quad faces.
Returns
-----------
faces : (m, 3) int
Vertex indices of triangular faces.c
"""
quads = np.asanyarray(quads)
if len(quads) == 0:
return quads.astype(dtype)
elif len(quads.shape) == 2 and quads.shape[1] == 3:
# if they are just triangles return immediately
return quads.astype(dtype)
elif len(quads.shape) == 2 and quads.shape[1] == 4:
# if they are just quads stack and return
return np.vstack((quads[:, [0, 1, 2]],
quads[:, [2, 3, 0]])).astype(dtype)
else:
# mixed tris, and quads, and other so filter and handle
tri = np.array([i for i in quads if len(i) == 3])
quad = np.array([i for i in quads if len(i) == 4])
# triangulate arbitrary polygons as fans
poly = [[[f[0], f[i + 1], f[i + 2]]
for i in range(len(f) - 2)]
for f in quads if len(f) > 4]
if len(quad) == 0 and len(poly) == 0:
return tri.astype(dtype)
if len(poly) > 0:
poly = np.vstack(poly)
if len(quad) > 0:
quad = np.vstack((quad[:, [0, 1, 2]],
quad[:, [2, 3, 0]]))
# combine triangulated quads with triangles
return util.vstack_empty([
tri, quad, poly]).astype(dtype)
def vertex_face_indices(vertex_count,
faces,
faces_sparse):
"""
Find vertex face indices from the faces array of vertices
Parameters
-----------
vertex_count : int
The number of vertices faces refer to
faces : (n, 3) int
List of vertex indices
faces_sparse : scipy.sparse.COO
Sparse matrix
Returns
-----------
vertex_faces : (vertex_count, ) int
Face indices for every vertex
Array padded with -1 in each row for all vertices with fewer
face indices than the max number of face indices.
"""
# Create 2D array with row for each vertex and
# length of max number of faces for a vertex
try:
counts = np.bincount(
faces.flatten(), minlength=vertex_count)
except TypeError:
# casting failed on 32 bit Windows
log.warning('casting failed, falling back!')
# fall back to np.unique (usually ~35x slower than bincount)
counts = np.unique(faces.flatten(), return_counts=True)[1]
assert len(counts) == vertex_count
assert faces.max() < vertex_count
# start cumulative sum at zero and clip off the last value
starts = np.append(0, np.cumsum(counts)[:-1])
# pack incrementing array into final shape
pack = np.arange(counts.max()) + starts[:, None]
# pad each row with -1 to pad to the max length
padded = -(pack >= (starts + counts)[:, None]).astype(np.int64)
try:
# do most of the work with a sparse dot product
identity = scipy.sparse.identity(len(faces), dtype=int)
sorted_faces = faces_sparse.dot(identity).nonzero()[1]
# this will fail if any face was degenerate
# TODO
# figure out how to filter out degenerate faces from sparse
# result if sorted_faces.size != faces.size
padded[padded == 0] = sorted_faces
except BaseException:
# fall back to a slow loop
log.warning('vertex_faces falling back to slow loop! ' +
'mesh probably has degenerate faces',
exc_info=True)
sort = np.zeros(faces.size, dtype=np.int64)
flat = faces.flatten()
for v in range(vertex_count):
# assign the data in order
sort[starts[v]:starts[v] + counts[v]] = (np.where(flat == v)[0] // 3)[::-1]
padded[padded == 0] = sort
return padded
def mean_vertex_normals(vertex_count,
faces,
face_normals,
sparse=None,
**kwargs):
"""
Find vertex normals from the mean of the faces that contain
that vertex.
Parameters
-----------
vertex_count : int
The number of vertices faces refer to
faces : (n, 3) int
List of vertex indices
face_normals : (n, 3) float
Normal vector for each face
Returns
-----------
vertex_normals : (vertex_count, 3) float
Normals for every vertex
Vertices unreferenced by faces will be zero.
"""
def summed_sparse():
# use a sparse matrix of which face contains each vertex to
# figure out the summed normal at each vertex
# allow cached sparse matrix to be passed
if sparse is None:
matrix = index_sparse(vertex_count, faces)
else:
matrix = sparse
summed = matrix.dot(face_normals)
return summed
def summed_loop():
# loop through every face, in tests was ~50x slower than
# doing this with a sparse matrix
summed = np.zeros((vertex_count, 3))
for face, normal in zip(faces, face_normals):
summed[face] += normal
return summed
try:
summed = summed_sparse()
except BaseException:
log.warning(
'unable to use sparse matrix, falling back!',
exc_info=True)
summed = summed_loop()
# invalid normals will be returned as zero
vertex_normals = util.unitize(summed)
return vertex_normals
def weighted_vertex_normals(vertex_count,
faces,
face_normals,
face_angles,
use_loop=False):
"""
Compute vertex normals from the faces that contain that vertex.
The contibution of a face's normal to a vertex normal is the
ratio of the corner-angle in which the vertex is, with respect
to the sum of all corner-angles surrounding the vertex.
Grit Thuerrner & Charles A. Wuethrich (1998)
Computing Vertex Normals from Polygonal Facets,
Journal of Graphics Tools, 3:1, 43-46
Parameters
-----------
vertex_count : int
The number of vertices faces refer to
faces : (n, 3) int
List of vertex indices
face_normals : (n, 3) float
Normal vector for each face
face_angles : (n, 3) float
Angles at each vertex in the face
Returns
-----------
vertex_normals : (vertex_count, 3) float
Normals for every vertex
Vertices unreferenced by faces will be zero.
"""
def summed_sparse():
# use a sparse matrix of which face contains each vertex to
# figure out the summed normal at each vertex
# allow cached sparse matrix to be passed
# fill the matrix with vertex-corner angles as weights
corner_angles = face_angles[np.repeat(np.arange(len(faces)), 3),
np.argsort(faces, axis=1).ravel()]
# create a sparse matrix
matrix = index_sparse(vertex_count, faces).astype(np.float64)
# assign the corner angles to the sparse matrix data
matrix.data = corner_angles
return matrix.dot(face_normals)
def summed_loop():
summed = np.zeros((vertex_count, 3), np.float64)
for vertex_idx in np.arange(vertex_count):
# loop over all vertices
# compute normal contributions from surrounding faces
# obviously slower than with the sparse matrix
face_idxs, inface_idxs = np.where(faces == vertex_idx)
surrounding_angles = face_angles[face_idxs, inface_idxs]
summed[vertex_idx] = np.dot(
surrounding_angles /
surrounding_angles.sum(),
face_normals[face_idxs])
return summed
# normals should be unit vectors
face_ok = (face_normals ** 2).sum(axis=1) > 0.5
# don't consider faces with invalid normals
faces = faces[face_ok]
face_normals = face_normals[face_ok]
face_angles = face_angles[face_ok]
if not use_loop:
try:
return util.unitize(summed_sparse())
except BaseException:
log.warning(
'unable to use sparse matrix, falling back!',
exc_info=True)
# we either crashed or were asked to loop
return util.unitize(summed_loop())
def index_sparse(columns, indices, data=None):
"""
Return a sparse matrix for which vertices are contained in which faces.
A data vector can be passed which is then used instead of booleans
Parameters
------------
columns : int
Number of columns, usually number of vertices
indices : (m, d) int
Usually mesh.faces
Returns
---------
sparse: scipy.sparse.coo_matrix of shape (columns, len(faces))
dtype is boolean
Examples
----------
In [1]: sparse = faces_sparse(len(mesh.vertices), mesh.faces)
In [2]: sparse.shape
Out[2]: (12, 20)
In [3]: mesh.faces.shape
Out[3]: (20, 3)
In [4]: mesh.vertices.shape
Out[4]: (12, 3)
In [5]: dense = sparse.toarray().astype(int)
In [6]: dense
Out[6]:
array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]])
In [7]: dense.sum(axis=0)
Out[7]: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3])
"""
indices = np.asanyarray(indices)
columns = int(columns)
row = indices.reshape(-1)
col = np.tile(np.arange(len(indices)).reshape(
(-1, 1)), (1, indices.shape[1])).reshape(-1)
shape = (columns, len(indices))
if data is None:
data = np.ones(len(col), dtype=bool)
# assemble into sparse matrix
matrix = scipy.sparse.coo_matrix((data, (row, col)),
shape=shape,
dtype=data.dtype)
return matrix
| mit | 85ec9b531c62f9fb584046a8ff47e56e | 30.62851 | 87 | 0.556474 | 3.621167 | false | false | false | false |
neuroailab/tfutils | tfutils/imagenet_data.py | 1 | 14375 | """
This script contains class method `dataset_func` which will return dataset elements
The data format structure of ImageNet required for `dataset_func` is similar as
data structure generated by following structure:
https://github.com/tensorflow/models/blob/master/research/inception/inception/data/build_imagenet_data.py
The only difference is that each tfrecords file only contains two attributes:
images: jpeg format of images
labels: int64 of 0-999 labels
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import os
import sys
import numpy as np
BELOW_TF15 = tf.__version__ < '1.15'
def fetch_dataset(filename):
"""
Useful util function for fetching records
"""
buffer_size = 32 * 1024 * 1024 # 32 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
def _at_least_x_are_equal(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def color_normalize(image):
image = tf.cast(image, tf.float32) / 255
imagenet_mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
imagenet_std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
image = (image - imagenet_mean) / imagenet_std
return image
class ImageNet(object):
"""
Class where data provider for ImageNet will be built
"""
TRAIN_LEN = 1281167
VAL_LEN = 50000
def __init__(self,
image_dir,
prep_type,
crop_size=224,
smallest_side=256,
resize=None,
is_train=True,
drop_remainder=False,
seed=None):
self.image_dir = image_dir
# Parameters about preprocessing
self.prep_type = prep_type
self.crop_size = crop_size
self.smallest_side = smallest_side
self.resize = resize
self.num_cores = 8
self.drop_remainder = drop_remainder
self.is_train = is_train
# Placeholders to be filled later
self.on_tpu = None
self.file_pattern = None
# Control the RNG
self.seed = seed
def get_tfr_filenames(self):
"""
Get list of tfrecord filenames
for given folder_name fitting the given file_pattern
"""
assert self.file_pattern, "Please specify file pattern!"
tfrecord_pattern = os.path.join(self.image_dir, self.file_pattern)
datasource = tf.gfile.Glob(tfrecord_pattern)
datasource.sort()
return np.asarray(datasource)
def get_resize_scale(self, height, width):
"""
Get the resize scale so that the shortest side is `smallest_side`
"""
smallest_side = tf.convert_to_tensor(
self.smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(
tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height,
)
return scale
def resize_cast_to_uint8(self, image):
image = tf.cast(
tf.image.resize_bilinear(
[image], [self.crop_size, self.crop_size])[0],
dtype=tf.uint8,
)
image.set_shape([self.crop_size, self.crop_size, 3])
return image
def central_crop_from_jpg(self, image_string):
"""
Resize the image to make its smallest side to be 256;
then get the central 224 crop
"""
shape = tf.image.extract_jpeg_shape(image_string)
scale = self.get_resize_scale(shape[0], shape[1])
cp_height = tf.cast(self.crop_size / scale, tf.int32)
cp_width = tf.cast(self.crop_size / scale, tf.int32)
cp_begin_x = tf.cast((shape[0] - cp_height) / 2, tf.int32)
cp_begin_y = tf.cast((shape[1] - cp_width) / 2, tf.int32)
bbox = tf.stack([cp_begin_x, cp_begin_y, cp_height, cp_width])
crop_image = tf.image.decode_and_crop_jpeg(
image_string, bbox, channels=3)
image = self.resize_cast_to_uint8(crop_image)
return image
def resnet_crop_from_jpg(self, image_str):
"""
Random crop in Inception style, see GoogLeNet paper, also used by ResNet
"""
shape = tf.image.extract_jpeg_shape(image_str)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32, shape=[1, 1, 4])
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=(3.0 / 4, 4.0 / 3.0),
area_range=(0.08, 1.0),
max_attempts=100,
use_image_if_no_bounding_boxes=True,
seed=0 if self.seed is None else self.seed
)
# Get the cropped image
bbox_begin, bbox_size, bbox = sample_distorted_bounding_box
random_image = tf.image.decode_and_crop_jpeg(
image_str,
tf.stack([bbox_begin[0], bbox_begin[1],
bbox_size[0], bbox_size[1]]),
channels=3,
)
bad = _at_least_x_are_equal(shape, tf.shape(random_image), 3)
# central crop if bad
min_size = tf.minimum(shape[0], shape[1])
offset_height = tf.random_uniform(
shape=[], minval=0, maxval=shape[0] - min_size + 1,
dtype=tf.int32, seed=self.seed
)
offset_width = tf.random_uniform(
shape=[], minval=0, maxval=shape[1] - min_size + 1,
dtype=tf.int32, seed=self.seed
)
bad_image = tf.image.decode_and_crop_jpeg(
image_str,
tf.stack([offset_height, offset_width, min_size, min_size]),
channels=3,
)
image = tf.cond(bad, lambda: bad_image, lambda: random_image)
image = self.resize_cast_to_uint8(image)
return image
def alexnet_crop_from_jpg(self, image_string):
"""
Resize the image to make its smallest side to be 256;
then randomly get a 224 crop
"""
shape = tf.image.extract_jpeg_shape(image_string)
scale = self.get_resize_scale(shape[0], shape[1])
cp_height = tf.cast(self.crop_size / scale, tf.int32)
cp_width = tf.cast(self.crop_size / scale, tf.int32)
# Randomly sample begin x and y
x_range = [0, shape[0] - cp_height + 1]
y_range = [0, shape[1] - cp_width + 1]
if self.prep_type == "alex_center":
# Original AlexNet preprocessing uses center 256*256 to crop
min_shape = tf.minimum(shape[0], shape[1])
x_range = [
tf.cast((shape[0] - min_shape) / 2, tf.int32),
shape[0]
- cp_height
+ 1
- tf.cast((shape[0] - min_shape) / 2, tf.int32),
]
y_range = [
tf.cast((shape[1] - min_shape) / 2, tf.int32),
shape[1] - cp_width + 1 -
tf.cast((shape[1] - min_shape) / 2, tf.int32),
]
cp_begin_x = tf.random_uniform(
shape=[], minval=x_range[0], maxval=x_range[1],
dtype=tf.int32, seed=self.seed
)
cp_begin_y = tf.random_uniform(
shape=[], minval=y_range[0], maxval=y_range[1],
dtype=tf.int32, seed=self.seed
)
bbox = tf.stack([cp_begin_x, cp_begin_y, cp_height, cp_width])
crop_image = tf.image.decode_and_crop_jpeg(
image_string, bbox, channels=3)
image = self.resize_cast_to_uint8(crop_image)
return image
def preprocessing(self, image_string):
"""
Preprocessing for each image
"""
assert self.is_train is not None, "Must specify is_train"
def _rand_crop(image_string):
if self.prep_type == "resnet":
image = self.resnet_crop_from_jpg(image_string)
else:
image = self.alexnet_crop_from_jpg(image_string)
return image
if self.prep_type == "inception":
if self.resize is None:
inception_image_size = 299
else:
inception_image_size = self.resize
import tfutils.inception_preprocessing as inception_preprocessing
image = tf.image.decode_jpeg(image_string, channels=3)
image = inception_preprocessing.preprocess_image(image,
is_training=self.is_train,
image_size=inception_image_size,
seed=self.seed)
else:
if self.is_train:
image = _rand_crop(image_string)
image = tf.image.random_flip_left_right(image, seed=self.seed)
else:
image = self.central_crop_from_jpg(image_string)
image = color_normalize(image)
if self.resize is not None:
image = tf.image.resize_images(image,
[self.resize, self.resize],
align_corners=True)
return image
def data_parser(self, value):
"""
Parse record and preprocessing
"""
# Load the image and preprocess it
keys_to_features = {
"images": tf.FixedLenFeature((), tf.string, ""),
"labels": tf.FixedLenFeature([], tf.int64, -1),
}
parsed = tf.parse_single_example(value, keys_to_features)
image_string = parsed["images"]
image_label = parsed["labels"]
# Do the preprocessing
image = self.preprocessing(image_string)
if self.on_tpu:
return image, image_label
else:
return {"images": image, "labels": image_label}
def process_dataset(self, dataset):
# if training, shuffle. repeat indefinitely
if self.is_train:
dataset = dataset.shuffle(self.q_cap, seed=self.seed)
dataset = dataset.repeat()
else:
dataset = dataset.repeat()
# re-shuffle if training
if self.is_train:
# Read each file
if BELOW_TF15:
use_sloppy = self.seed is None
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
fetch_dataset, cycle_length=self.num_cores, sloppy=use_sloppy
)
)
else:
dataset = dataset.interleave(fetch_dataset,
cycle_length=self.num_cores)
# Use a slightly different seed for the reshuffle, but still
# deterministically computed on the seed attribute
shuffle_seed = None if self.seed is None else self.seed+1
dataset.shuffle(self.q_cap, seed=shuffle_seed)
else:
if BELOW_TF15:
# Read each file, but make it deterministic for validation
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
fetch_dataset, cycle_length=self.num_cores, sloppy=False
)
)
else:
# Taking advantage of the new interface, let's keep this
# determinstic
dataset = dataset.interleave(fetch_dataset,
cycle_length=self.num_cores)
# apply preprocessing to each image
dataset = dataset.map(
self.data_parser,
num_parallel_calls=64)
dataset = dataset.prefetch(4)
if BELOW_TF15 and self.drop_remainder:
dataset = dataset.apply(
tf.contrib.data.batch_and_drop_remainder(self.batch_size))
else:
dataset = dataset.batch(self.batch_size,
drop_remainder=self.drop_remainder)
return dataset
def dataset_func(
self,
is_train,
batch_size,
q_cap=51200,
file_pattern="train-*"
):
"""
Build the dataset, get the elements
"""
self.on_tpu = False
self.is_train = is_train
self.file_pattern = file_pattern
self.batch_size = batch_size
self.q_cap = q_cap
tfr_list = self.get_tfr_filenames()
if self.is_train:
dataset = tf.data.Dataset.list_files(tfr_list,
seed=self.seed)
else:
dataset = tf.data.Dataset.list_files(tfr_list,
shuffle=False,
seed=self.seed)
dataset = self.process_dataset(dataset)
# Batch the dataset and make iteratior
next_element = dataset.make_one_shot_iterator().get_next()
return next_element
def dataset_func_tpu(
self, params):
"""
Build the dataset, get the elements
"""
self.on_tpu = True
self.drop_remainder = True
if self.is_train:
self.file_pattern = 'train-*'
else:
self.file_pattern = 'validation-*'
self.batch_size = params['batch_size']
self.q_cap = 1024
# get file pattern and create dataset
file_pattern = os.path.join(
self.image_dir, self.file_pattern)
if self.is_train:
dataset = tf.data.Dataset.list_files(file_pattern,
seed=self.seed)
else:
dataset = tf.data.Dataset.list_files(file_pattern,
shuffle=False,
seed=self.seed)
dataset = self.process_dataset(dataset)
dataset = dataset.prefetch(2)
images, labels = dataset.make_one_shot_iterator().get_next()
return images, labels
| mit | da6bd0cb567142e914f5201c903479e1 | 34.146699 | 109 | 0.539687 | 3.919029 | false | false | false | false |
braintree/braintree_python | braintree/us_bank_account.py | 1 | 1519 | import braintree
from braintree.resource import Resource
from braintree.configuration import Configuration
from braintree.ach_mandate import AchMandate
from braintree.us_bank_account_verification import UsBankAccountVerification
class UsBankAccount(Resource):
@staticmethod
def find(token):
return Configuration.gateway().us_bank_account.find(token)
@staticmethod
def sale(token, transactionRequest):
transactionRequest["payment_method_token"] = token
if not "options" in transactionRequest:
transactionRequest["options"] = {}
transactionRequest["options"]["submit_for_settlement"] = True
return Configuration.gateway().transaction.sale(transactionRequest)
@staticmethod
def signature():
signature = [
"routing_number",
"last_4",
"account_type",
"account_holder_name",
"token",
"image_url",
"bank_name",
"ach_mandate"
]
return signature
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if attributes.get("ach_mandate") is not None:
self.ach_mandate = AchMandate(gateway, self.ach_mandate)
else:
self.ach_mandate = None
if attributes.get("verifications") is not None:
self.verifications = [UsBankAccountVerification(gateway, v) for v in self.verifications]
else:
self.verifications = None
| mit | c09cc1ff96ee115879d652ae336a25a5 | 32.755556 | 100 | 0.638578 | 4.402899 | false | true | false | false |
braintree/braintree_python | braintree/util/crypto.py | 1 | 1224 | import hashlib
import hmac
import sys
text_type = str
class Crypto:
@staticmethod
def sha1_hmac_hash(secret_key, content):
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('ascii')
if isinstance(content, text_type):
content = content.encode('ascii')
return hmac.new(hashlib.sha1(secret_key).digest(), content, hashlib.sha1).hexdigest()
@staticmethod
def sha256_hmac_hash(secret_key, content):
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('ascii')
if isinstance(content, text_type):
content = content.encode('ascii')
return hmac.new(hashlib.sha256(secret_key).digest(), content, hashlib.sha256).hexdigest()
@staticmethod
def secure_compare(left, right):
if left is None or right is None:
return False
left_bytes = [ord(char) for char in left]
right_bytes = [ord(char) for char in right]
if len(left_bytes) != len(right_bytes):
return False
result = 0
for left_byte, right_byte in zip(left_bytes, right_bytes):
result |= left_byte ^ right_byte
return result == 0
| mit | f6cdb3190c14bf0520878766d1845de1 | 31.210526 | 97 | 0.617647 | 3.948387 | false | false | false | false |
braintree/braintree_python | braintree/test/nonces.py | 1 | 5118 | class Nonces(object):
Transactable = "fake-valid-nonce"
Consumed = "fake-consumed-nonce"
PayPalOneTimePayment = "fake-paypal-one-time-nonce"
PayPalFuturePayment = "fake-paypal-future-nonce"
PayPalBillingAgreement = "fake-paypal-billing-agreement-nonce"
ApplePayVisa = "fake-apple-pay-visa-nonce"
ApplePayMasterCard = "fake-apple-pay-mastercard-nonce"
ApplePayAmEx = "fake-apple-pay-amex-nonce"
AbstractTransactable = "fake-abstract-transactable-nonce"
Europe = "fake-europe-bank-account-nonce"
# NEXT_MAJOR_VERSION - rename AndroidPay to GooglePay
AndroidPayCard = "fake-android-pay-nonce"
AndroidPayCardDiscover = "fake-android-pay-discover-nonce"
AndroidPayCardVisa = "fake-android-pay-visa-nonce"
AndroidPayCardMasterCard = "fake-android-pay-mastercard-nonce"
AndroidPayCardAmEx = "fake-android-pay-amex-nonce"
# NEXT_MAJOR_VERSION remove amex express checkout
AmexExpressCheckoutCard = "fake-amex-express-checkout-nonce"
VenmoAccount = "fake-venmo-account-nonce"
VenmoAccountTokenIssuanceError = "fake-token-issuance-error-venmo-account-nonce"
ThreeDSecureVisaFullAuthentication = "fake-three-d-secure-visa-full-authentication-nonce"
ThreeDSecureVisaLookupTimeout = "fake-three-d-secure-visa-lookup-timeout-nonce"
ThreeDSecureVisaFailedSignature = "fake-three-d-secure-visa-failed-signature-nonce"
ThreeDSecureVisaFailedAuthentication = "fake-three-d-secure-visa-failed-authentication-nonce"
ThreeDSecureVisaAttemptsNonParticipating = "fake-three-d-secure-visa-attempts-non-participating-nonce"
ThreeDSecureVisaNoteEnrolled = "fake-three-d-secure-visa-not-enrolled-nonce"
ThreeDSecureVisaUnavailable = "fake-three-d-secure-visa-unavailable-nonce"
ThreeDSecureVisaMPILookupError = "fake-three-d-secure-visa-mpi-lookup-error-nonce"
ThreeDSecureVisaMPIAuthenticateError = "fake-three-d-secure-visa-mpi-authenticate-error-nonce"
ThreeDSecureVisaAuthenticationUnavailable = "fake-three-d-secure-visa-authentication-unavailable-nonce"
ThreeDSecureVisaBypassedAuthentication = "fake-three-d-secure-visa-bypassed-authentication-nonce"
ThreeDSecureTwoVisaSuccessfulFrictionlessAuthentication = "fake-three-d-secure-two-visa-successful-frictionless-authentication-nonce"
ThreeDSecureTwoVisaSuccessfulStepUpAuthentication = "fake-three-d-secure-two-visa-successful-step-up-authentication-nonce"
ThreeDSecureTwoVisaErrorOnLookup = "fake-three-d-secure-two-visa-error-on-lookup-nonce"
ThreeDSecureTwoVisaTimeoutOnLookup = "fake-three-d-secure-two-visa-timeout-on-lookup-nonce"
TransactableVisa = "fake-valid-visa-nonce"
TransactableAmEx = "fake-valid-amex-nonce"
TransactableMasterCard = "fake-valid-mastercard-nonce"
TransactableDiscover = "fake-valid-discover-nonce"
TransactableJCB = "fake-valid-jcb-nonce"
TransactableMaestro = "fake-valid-maestro-nonce"
TransactableDinersClub = "fake-valid-dinersclub-nonce"
TransactablePrepaid = "fake-valid-prepaid-nonce"
TransactableCommercial = "fake-valid-commercial-nonce"
TransactableDurbinRegulated = "fake-valid-durbin-regulated-nonce"
TransactableHealthcare = "fake-valid-healthcare-nonce"
TransactableDebit = "fake-valid-debit-nonce"
TransactablePayroll = "fake-valid-payroll-nonce"
TransactableNoIndicators = "fake-valid-no-indicators-nonce"
TransactableUnknownIndicators = "fake-valid-unknown-indicators-nonce"
TransactableCountryOfIssuanceUSA = "fake-valid-country-of-issuance-usa-nonce"
TransactableCountryOfIssuanceCAD = "fake-valid-country-of-issuance-cad-nonce"
TransactableIssuingBankNetworkOnly = "fake-valid-issuing-bank-network-only-nonce"
ProcessorDeclinedVisa = "fake-processor-declined-visa-nonce"
ProcessorDeclinedMasterCard = "fake-processor-declined-mastercard-nonce"
ProcessorDeclinedAmEx = "fake-processor-declined-amex-nonce"
ProcessorDeclinedDiscover = "fake-processor-declined-discover-nonce"
ProcessorFailureJCB = "fake-processor-failure-jcb-nonce"
LocalPayment = "fake-local-payment-method-nonce"
LuhnInvalid = "fake-luhn-invalid-nonce"
PayPalFuturePaymentRefreshToken = "fake-paypal-future-refresh-token-nonce"
SEPA = "fake-sepa-bank-account-nonce"
GatewayRejectedFraud = "fake-gateway-rejected-fraud-nonce"
GatewayRejectedRiskThreshold = "fake-gateway-rejected-risk-thresholds-nonce"
# NEXT_MAJOR_VERSION remove masterpass
MasterpassAmEx = "fake-masterpass-amex-nonce"
MasterpassDiscover = "fake-masterpass-discover-nonce"
MasterpassMasterCard = "fake-masterpass-mastercard-nonce"
MasterpassVisa = "fake-masterpass-visa-nonce"
VisaCheckoutAmEx = "fake-visa-checkout-amex-nonce"
VisaCheckoutDiscover = "fake-visa-checkout-discover-nonce"
VisaCheckoutMasterCard = "fake-visa-checkout-mastercard-nonce"
VisaCheckoutVisa = "fake-visa-checkout-visa-nonce"
SamsungPayAmex = "tokensam_fake_american_express"
SamsungPayDiscover = "tokensam_fake_american_express"
SamsungPayMasterCard = "tokensam_fake_mastercard"
SamsungPayVisa = "tokensam_fake_visa"
| mit | b898319464e6a079d24947fde61915d9 | 64.615385 | 137 | 0.782141 | 3.15342 | false | false | false | false |
braintree/braintree_python | tests/integration/test_exchange_rate_quote.py | 1 | 4968 | from tests.test_helper import *
from braintree.exchange_rate_quote_request import ExchangeRateQuoteRequest
class TestExchangeRateQuote(unittest.TestCase):
@staticmethod
def get_gateway():
config = Configuration("development", "integration_merchant_id",
public_key="integration_public_key",
private_key="integration_private_key")
return BraintreeGateway(config)
def test_exchange_rate_quote_with_full_graphql(self):
attribute1 = {"base_currency":"USD",
"quote_currency":"EUR",
"base_amount":"12.19",
"markup":"12.14"}
attribute2 = {"base_currency":"EUR",
"quote_currency":"CAD",
"base_amount":"15.16",
"markup":"2.64"}
request = ExchangeRateQuoteRequest().add_exchange_rate_quote_input(
attribute1).done().add_exchange_rate_quote_input(attribute2).done()
result = self.get_gateway().exchange_rate_quote.generate(request)
self.assertTrue(result.is_success)
quotes = result.exchange_rate_quote_payload.get_quotes()
self.assertEqual(2, len(quotes))
quote1 = quotes[0]
self.assertEqual("12.19", str(quote1.base_amount.value))
self.assertEqual("USD", quote1.base_amount.currency_code)
self.assertEqual("12.16", str(quote1.quote_amount.value))
self.assertEqual("EUR", quote1.quote_amount.currency_code)
self.assertEqual("0.997316360864", quote1.exchange_rate)
self.assertEqual("0.01", quote1.trade_rate)
self.assertEqual("2021-06-16T02:00:00.000000Z", quote1.expires_at)
self.assertEqual("2021-06-16T00:00:00.000000Z", quote1.refreshes_at)
self.assertEqual("ZXhjaGFuZ2VyYXRlcXVvdGVfMDEyM0FCQw", quote1.id)
quote2 = quotes[1]
self.assertEqual("15.16", str(quote2.base_amount.value))
self.assertEqual("EUR", quote2.base_amount.currency_code)
self.assertEqual("23.30", str(quote2.quote_amount.value))
self.assertEqual("CAD", quote2.quote_amount.currency_code)
self.assertEqual("1.536744692129366", quote2.exchange_rate)
self.assertIsNone(quote2.trade_rate)
self.assertEqual("2021-06-16T02:00:00.000000Z", quote2.expires_at)
self.assertEqual("2021-06-16T00:00:00.000000Z", quote2.refreshes_at)
self.assertEqual("ZXhjaGFuZ2VyYXRlcXVvdGVfQUJDMDEyMw", quote2.id)
def test_exchange_rate_quote_with_graphqul_quote_currency_validation_error(self):
attribute1 = {"base_currency":"USD",
"base_amount":"12.19",
"markup":"12.14"}
attribute2 = {"base_currency":"EUR",
"quote_currency":"CAD",
"base_amount":"15.16",
"markup":"2.64"}
request = ExchangeRateQuoteRequest().add_exchange_rate_quote_input(
attribute1).done().add_exchange_rate_quote_input(attribute2).done()
result = self.get_gateway().exchange_rate_quote.generate(request)
self.assertFalse(result.is_success)
self.assertTrue("'quoteCurrency'" in result.message)
def test_exchange_rate_quote_with_graphql_base_currency_validation_error(self):
attribute1 = {"base_currency":"USD",
"quote_currency":"EUR",
"base_amount":"12.19",
"markup":"12.14"}
attribute2 = {"quote_currency":"CAD",
"base_amount":"15.16",
"markup":"2.64"}
request = ExchangeRateQuoteRequest().add_exchange_rate_quote_input(
attribute1).done().add_exchange_rate_quote_input(attribute2).done()
result = self.get_gateway().exchange_rate_quote.generate(request)
self.assertFalse(result.is_success)
self.assertTrue("'baseCurrency'" in result.message)
def test_exchange_rate_quote_with_graphql_without_base_amount(self):
attribute1 = {"base_currency":"USD",
"quote_currency":"EUR"}
attribute2 = {"base_currency":"EUR",
"quote_currency":"CAD"}
request = ExchangeRateQuoteRequest().add_exchange_rate_quote_input(
attribute1).done().add_exchange_rate_quote_input(attribute2).done()
result = self.get_gateway().exchange_rate_quote.generate(request)
self.assertTrue(result.is_success)
def test_exchange_rate_quote_with_graphql_without_base_and_quote_currency(self):
attribute1 = {"base_amount":"12.19",
"markup":"12.14"}
request = ExchangeRateQuoteRequest().add_exchange_rate_quote_input(
attribute1).done()
result = self.get_gateway().exchange_rate_quote.generate(request)
self.assertFalse(result.is_success)
self.assertTrue("'baseCurrency'" in result.message) | mit | 0de6b40ac1a3b971a6f45b36afb634fc | 45.009259 | 85 | 0.613527 | 3.81274 | false | true | false | false |
eggpi/citationhunt | handlers/citationhunt.py | 2 | 5738 | import chdb
import config
from . import database
from utils import *
from .common import *
import collections
import datetime
import urllib.request, urllib.parse, urllib.error
import urllib.parse
now = datetime.datetime.now
Category = collections.namedtuple('Category', ['id', 'title'])
CATEGORY_ALL = Category('all', '')
def get_category_by_id(lang_code, cat_id):
if cat_id in ('', None) or cat_id == CATEGORY_ALL.id:
return CATEGORY_ALL
c = database.query_category_by_id(lang_code, cat_id)
# Normalize invalid categories to 'all'
return Category(*c) if c is not None else None
def select_random_id(lang_code, category, intersection):
ret = None
if category is not CATEGORY_ALL:
ret = database.query_snippet_by_category(lang_code, category.id)
elif intersection:
ret = database.query_snippet_by_intersection(lang_code, intersection)
if ret is None:
# Try to pick one id at random. For small datasets, the probability
# of getting an empty set in a query is non-negligible, so retry a
# bunch of times as needed.
with log_time('select without category'):
for retry in range(1000):
ret = database.query_random_snippet(lang_code)
if ret: break
assert ret and len(ret) == 1
return ret[0]
def select_next_id(lang_code, curr_id, category, intersection):
if category is CATEGORY_ALL and not intersection:
next_id = curr_id
for i in range(3): # super paranoid :)
next_id = select_random_id(lang_code, category, intersection)
if next_id != curr_id:
break
return next_id
if category is not CATEGORY_ALL:
ret = database.query_next_id_in_category(
lang_code, curr_id, category.id)
else:
assert intersection
ret = database.query_next_id_in_intersection(
lang_code, curr_id, intersection)
if ret is None:
# curr_id doesn't belong to the category or intersection
return None
assert ret and len(ret) == 1
return ret[0]
@validate_lang_code
def citation_hunt(lang_code):
id = flask.request.args.get('id')
cat = flask.request.args.get('cat')
inter = flask.request.args.get('custom', '')
cfg = flask.g._cfg
strings = flask.g._strings
lang_dir = cfg.lang_dir
if flask.current_app.debug:
lang_dir = flask.request.args.get('dir', lang_dir)
if inter and cat:
inter = ''
cat = get_category_by_id(lang_code, cat)
if cat is None:
# Invalid category, try again by id.
return flask.redirect(
flask.url_for('citation_hunt', id = id, lang_code = lang_code))
if id is not None:
sinfo = database.query_snippet_by_id(lang_code, id)
if sinfo is None:
# invalid id
flask.abort(404)
snippet, section, aurl, atitle, date = sinfo
snippet = flask.Markup(snippet)
next_snippet_id = select_next_id(lang_code, id, cat, inter)
if next_snippet_id is None:
# snippet doesn't belong to the category or intersection!
assert inter or cat is not CATEGORY_ALL
return flask.redirect(
flask.url_for('citation_hunt',
id = id, lang_code = lang_code))
article_url_path = urllib.parse.quote(
e(urllib.parse.urlparse(aurl).path.lstrip('/')))
old_snippet = False
if date is not None and cfg.old_snippet_threshold is not None:
old_snippet = (now() - date) > cfg.old_snippet_threshold
return flask.render_template('index.html',
snippet_id = id, snippet = snippet,
section = section, article_url = aurl,
article_url_path = article_url_path,
article_title = atitle, current_category = cat,
current_custom = inter,
next_snippet_id = next_snippet_id,
old_snippet = old_snippet,
config = cfg,
lang_tag = flask.g._lang_tag,
lang_dir = lang_dir,
lang_code = lang_code,
strings = strings,
js_strings = strings['js'])
id = select_random_id(lang_code, cat, inter)
redirect_params = {'id': id, 'lang_code': lang_code}
if cat is not CATEGORY_ALL:
redirect_params['cat'] = cat.id
elif inter:
redirect_params['custom'] = inter
return flask.redirect(
flask.url_for('citation_hunt', **redirect_params))
def do_search(search_function, lang_code):
try:
max_results = int(flask.request.args.get('max_results'))
except:
max_results = float('inf')
q = flask.request.args.get('q')
if q is None:
return flask.jsonify(error = 'missing query')
return flask.jsonify(
results = search_function(
lang_code, q, max_results = min(max_results, 400)))
@validate_lang_code
def search_category(lang_code):
return do_search(
database.search_category, lang_code)
@validate_lang_code
def search_article_title(lang_code):
return do_search(
database.search_article_title, lang_code)
@validate_lang_code
def fixed(lang_code):
from_ts = flask.request.args.get('from_ts', None)
try:
from_ts = datetime.datetime.fromtimestamp(float(from_ts))
except:
# Technically an invalid request, but let's just normalize below
from_ts = None
pass
now = datetime.datetime.today()
max_delta = datetime.timedelta(hours = 24)
if from_ts is None or abs(now - from_ts) > max_delta:
from_ts = now - max_delta
return flask.make_response(
str(database.query_fixed_snippets(lang_code, from_ts)), 200)
| mit | a01c434e5a3a056c63c1f4cef9db3ba5 | 33.987805 | 77 | 0.61694 | 3.713916 | false | false | false | false |
istresearch/scrapy-cluster | kafka-monitor/plugins/action_handler.py | 1 | 1457 | from __future__ import absolute_import
from .base_handler import BaseHandler
import tldextract
import redis
import sys
from redis.exceptions import ConnectionError
class ActionHandler(BaseHandler):
schema = "action_schema.json"
def setup(self, settings):
'''
Setup redis and tldextract
'''
self.extract = tldextract.TLDExtract()
self.redis_conn = redis.Redis(host=settings['REDIS_HOST'],
port=settings['REDIS_PORT'],
db=settings.get('REDIS_DB'))
try:
self.redis_conn.info()
self.logger.debug("Connected to Redis in ActionHandler")
except ConnectionError:
self.logger.error("Failed to connect to Redis in ActionHandler")
# plugin is essential to functionality
sys.exit(1)
def handle(self, dict):
'''
Processes a vaild action request
@param dict: a valid dictionary object
'''
# format key
key = "{action}:{spiderid}:{appid}".format(
action=dict['action'],
spiderid=dict['spiderid'],
appid=dict['appid'])
if "crawlid" in dict:
key = key + ":" + dict['crawlid']
self.redis_conn.set(key, dict['uuid'])
dict['parsed'] = True
dict['valid'] = True
self.logger.info('Added action to Redis', extra=dict)
| mit | 825453ccbb5b4d68287ea9ea44c55cac | 28.734694 | 76 | 0.560741 | 4.323442 | false | false | false | false |
istresearch/scrapy-cluster | utils/scutils/redis_queue.py | 1 | 4196 | from future import standard_library
standard_library.install_aliases()
from builtins import object
try:
import pickle as pickle
except ImportError:
import pickle
class Base(object):
'''
Queue/Stack base class
'''
def __init__(self, server, key, encoding=pickle):
'''Initialize the redis queue.
@param server: the redis connection
@param key: the key for this queue
@param encoding: The encoding module to use.
Note that if you wish to use any other encoding besides pickle, it
is assumed you have already imported that module in your code before
calling this constructor.
'''
self.server = server
self.key = key
self.encoding = encoding
if not hasattr(self.encoding, 'dumps'):
raise NotImplementedError("encoding does not support dumps()")
if not hasattr(self.encoding, 'loads'):
raise NotImplementedError("encoding does not support loads()")
def _encode_item(self, item):
'''
Encode an item object
@requires: The object be serializable
'''
if self.encoding.__name__ == 'pickle':
return self.encoding.dumps(item, protocol=-1)
else:
return self.encoding.dumps(item)
def _decode_item(self, encoded_item):
'''
Decode an item previously encoded
'''
return self.encoding.loads(encoded_item)
def __len__(self):
'''
Return the length of the queue
'''
raise NotImplementedError
def push(self, item):
'''
Push an item
'''
raise NotImplementedError
def pop(self, timeout=0):
'''
Pop an item
'''
raise NotImplementedError
def clear(self):
'''
Clear queue/stack
'''
self.server.delete(self.key)
class RedisQueue(Base):
'''
FIFO queue
'''
def __len__(self):
'''
Return the length of the queue
'''
return self.server.llen(self.key)
def push(self, item):
'''
Push an item
'''
# ignore priority
self.server.lpush(self.key, self._encode_item(item))
def pop(self, timeout=0):
'''
Pop an item
'''
if timeout > 0:
data = self.server.brpop(self.key, timeout)
if isinstance(data, tuple):
data = data[1]
else:
data = self.server.rpop(self.key)
if data:
return self._decode_item(data)
class RedisPriorityQueue(Base):
'''
Priority queue abstraction using redis' sorted set
'''
def __len__(self):
'''Return the length of the queue'''
return self.server.zcard(self.key)
def push(self, item, priority):
'''
Push an item
@param priority: the priority of the item
'''
data = self._encode_item(item)
pairs = {data: -priority}
self.server.zadd(self.key, **pairs)
def pop(self, timeout=0):
'''
Pop an item
timeout not support in this queue class
'''
# use atomic range/remove using multi/exec
pipe = self.server.pipeline()
pipe.multi()
pipe.zrange(self.key, 0, 0).zremrangebyrank(self.key, 0, 0)
results, count = pipe.execute()
if results:
return self._decode_item(results[0])
class RedisStack(Base):
'''
Stack
'''
def __len__(self):
'''
Return the length of the stack
'''
return self.server.llen(self.key)
def push(self, item):
'''
Push an item
'''
self.server.lpush(self.key, self._encode_item(item))
def pop(self, timeout=0):
'''
Pop an item
'''
if timeout > 0:
data = self.server.blpop(self.key, timeout)
if isinstance(data, tuple):
data = data[1]
else:
data = self.server.lpop(self.key)
if data:
return self._decode_item(data)
__all__ = ['RedisQueue', 'RedisPriorityQueue', 'RedisStack']
| mit | d3e3063223de5c7120348c2892ba8b2d | 23.114943 | 76 | 0.545043 | 4.325773 | false | false | false | false |
ydkhatri/mac_apt | plugins/terminalstate.py | 1 | 10842 | '''
Copyright (c) 2017 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
terminalstate.py
---------------
This plugin reads Terminal Saved State information which includes
full text content of terminal window.
'''
import io
import logging
import nska_deserialize as nd
import os
import struct
from Crypto.Cipher import AES
from plugins.helpers.common import CommonFunctions
from plugins.helpers.macinfo import *
from plugins.helpers.writer import *
__Plugin_Name = "TERMINALSTATE" # Cannot have spaces, and must be all caps!
__Plugin_Friendly_Name = "Terminal Saved State"
__Plugin_Version = "1.0"
__Plugin_Description = "Reads Terminal saved state files which includes full text content of terminal windows"
__Plugin_Author = "Yogesh Khatri"
__Plugin_Author_Email = "yogesh@swiftforensics.com"
__Plugin_Modes = "MACOS,ARTIFACTONLY" # Valid values are 'MACOS', 'IOS, 'ARTIFACTONLY'
__Plugin_ArtifactOnly_Usage = 'Provide the folder /Users/<USER>/Library/Saved Application State/com.apple.Terminal.savedState as input'
log = logging.getLogger('MAIN.' + __Plugin_Name) # Do not rename or remove this ! This is the logger object
#---- Do not change the variable names in above section ----#
class TerminalWindowInfo():
def __init__(self, title, working_dir, content, user, source):
self.content = content
self.title = title
self.working_dir = working_dir
self.user = user
self.source = source
# self.file_created_time = ''
# self.file_modified_time = ''
def PrintAll(terminals, output_params):
terminal_info = [ ('Title',DataType.TEXT),('WorkingDir',DataType.TEXT),('Content',DataType.TEXT),
('User', DataType.TEXT),('Source',DataType.TEXT)
]
log.info (str(len(terminals)) + " terminal saved state(s) found")
terminals_list = []
for t in terminals:
t_item = [ t.title, t.working_dir, t.content,
t.user, t.source
]
terminals_list.append(t_item)
WriteList("terminal saved state", "TerminalState", terminals_list, terminal_info, output_params, '')
def get_decoded_plist_data(data):
data_size = len(data)
name = ''
if data_size > 8:
name_len = struct.unpack('>I', data[4:8])[0]
name = data[8 : 8 + name_len]
log.debug('NSName = {}'.format(name))
rchv = data[8 + name_len : 12 + name_len] # "rchv"
if rchv != b"rchv":
log.warning('magic was not "rchv", it was {}'.format(str(rchv)))
nsa_plist_len = struct.unpack('>I', data[12 + name_len : 16 + name_len])[0]
nsa_plist = data[16 + name_len : 16 + name_len + nsa_plist_len]
f = io.BytesIO(nsa_plist)
try:
deserialized_plist = nd.deserialize_plist(f)
except (nd.DeserializeError, nd.biplist.NotBinaryPlistException,
nd.biplist.InvalidPlistException,plistlib.InvalidFileException,
nd.ccl_bplist.BplistError, ValueError, TypeError,
OSError, OverflowError) as ex:
log.exception("")
f.close()
return (name, None)
f.close()
return (name, deserialized_plist)
else:
log.warning('Plist seems empty!')
return (name, None)
def get_key_for_window_id(plist, ns_window_id):
key = None
for item in plist:
w_id = item.get('NSWindowID', None)
if w_id == ns_window_id:
key = item.get('NSDataKey', None)
if key == None:
log.error("Error fetching key, key was not found for windowID={}!".format(ns_window_id))
break
return key
def decrypt(enc_data, key, iv):
'''Decrypts the data given encrypted data, key and IV'''
try:
cipher = AES.new(key, AES.MODE_CBC, iv)
dec_data = cipher.decrypt(enc_data)
return dec_data
except (KeyError, ValueError) as ex:
log.exception('Decryption error:')
return b''
def ParseTerminalPlist_NSWindow(plist):
'''Returns terminal (Title, Working Dir, Contents) as a tuple'''
title = ''
working_dir = ''
contents = ''
if isinstance(plist, dict):
return # not a list
try:
for item in plist:
for k, v in item.items():
if k == 'NSTitle':
title = v
elif k == 'TTWindowState':
window_settings = v.get('Window Settings', None)
if not window_settings: continue
for w in window_settings:
for key, value in w.items():
if key in ('Tab Contents', 'Tab Contents v2'):
for content in value:
if isinstance(content, bytes):
contents += content.decode('utf8', 'backslashreplace')
elif key in ('Tab Working Directory URL String', 'Tab Working Directory URL'):
working_dir = value
except ValueError as ex:
log.error("Error reading terminal plist, error was: {}".format(str(ex)))
return (title, working_dir, contents)
def ProcessFile(windows_plist_file_path, data_file_path, terminals):
success, windows_plist, error = CommonFunctions.ReadPlist(windows_plist_file_path)
if success:
with open(data_file_path, 'rb') as f:
all_data = f.read() # Should be a small file
Process(windows_plist, all_data, terminals, '', data_file_path)
else:
log.error(f"Error reading plist - {windows_plist_file_path}. Error={error}")
def AddUnique(terminal_info, terminals):
duplicate_found = False
for t in terminals:
if (t.source == terminal_info.source) and \
(t.user == terminal_info.user) and \
(t.working_dir == terminal_info.working_dir) and \
(t.content == terminal_info.content) and \
(t.title == terminal_info.title):
duplicate_found = True
break
if not duplicate_found:
terminals.append(terminal_info)
def Process(windows_plist, all_data, terminals, user, data_source):
iv = struct.pack("<IIII", 0, 0, 0, 0)
if windows_plist:
pos = 0
# Parsing data.data
size_data = len(all_data)
while (pos + 16) < size_data:
magic = all_data[pos:pos+8]
ns_window_id, rec_length = struct.unpack(">II", all_data[pos+8:pos+16])
pos += 16
rec_length -= 16
if (pos + rec_length) <= size_data:
enc_data = all_data[pos:pos + rec_length]
if magic != b"NSCR1000":
log.error("Unknown header:" + str(magic))
key = get_key_for_window_id(windows_plist, ns_window_id)
if key:
dec_data = decrypt(enc_data, key, iv)
data_name, new_data = get_decoded_plist_data(dec_data)
if new_data and data_name == b'_NSWindow':
title, working_dir, contents = ParseTerminalPlist_NSWindow(new_data)
if not(len(contents) == 0 and len(working_dir) == 0 and len(title) == 0):
t = TerminalWindowInfo(title, working_dir, contents, user, data_source)
#terminals.append(t)
AddUnique(t, terminals)
else:
print('key not found for window_id={}'.format(ns_window_id))
pos += rec_length
def Plugin_Start(mac_info):
'''Main Entry point function for plugin'''
processed_paths = []
terminals = []
saved_state_path = '{}/Library/Saved Application State/com.apple.Terminal.savedState'
for user in mac_info.users:
if user.home_dir == '/private/var/empty': continue # Optimization, nothing should be here!
elif user.home_dir == '/private/var/root': user_name = 'root' # Some other users use the same root folder, we will list all such users as 'root', as there is no way to tell
if user.home_dir in processed_paths: continue # Avoid processing same folder twice (some users have same folder! (Eg: root & daemon))
processed_paths.append(user.home_dir)
source_path = saved_state_path.format(user.home_dir)
windows_plist_path = source_path + '/windows.plist'
data_path = source_path + '/data.data'
if mac_info.IsValidFolderPath(source_path) and mac_info.IsValidFilePath(windows_plist_path) and mac_info.IsValidFilePath(data_path):
mac_info.ExportFile(windows_plist_path, __Plugin_Name, user.user_name + "_", False)
mac_info.ExportFile(data_path, __Plugin_Name, user.user_name + "_", False)
success, windows_plist, error = mac_info.ReadPlist(windows_plist_path)
if success:
try:
all_data_file = mac_info.Open(data_path)
if (all_data_file):
all_data = all_data_file.read()
Process(windows_plist, all_data, terminals, user.user_name, data_path)
else:
log.error('Failed to open data.data file - {}'.format(data_path))
except (ValueError, OSError):
log.exception('')
else:
log.error('Failed to open windows.plist: {}'.format(windows_plist_path))
if len(terminals) > 0:
PrintAll(terminals, mac_info.output_params)
else:
log.info('No Terminal saved state found')
def Plugin_Start_Standalone(input_files_list, output_params):
'''Main entry point function when used on single artifacts (mac_apt_singleplugin), not on a full disk image'''
log.info("Module Started as standalone")
terminals = []
for input_path in input_files_list:
log.debug("Input folder passed was: " + input_path)
if os.path.isdir(input_path):
windows_plist_path = os.path.join(input_path, 'windows.plist')
data_path = os.path.join(input_path, 'data.data')
ProcessFile(windows_plist_path, data_path, terminals)
else:
log.error('Input path "{}" is not a folder. Provide the input path to folder com.apple.Terminal.savedState'.format(input_path))
if len(terminals) > 0:
PrintAll(terminals, output_params)
else:
log.info('No Terminal saved state found')
def Plugin_Start_Ios(ios_info):
'''Entry point for ios_apt plugin'''
pass
if __name__ == '__main__':
print ("This plugin is a part of a framework and does not run independently on its own!") | mit | 654c001f72acb9d5bf09477098b7fc8f | 41.190661 | 180 | 0.589375 | 3.850142 | false | false | false | false |
hungpham2511/toppra | toppra/utils.py | 1 | 5642 | """
Some utility functions need to generate PathConstraints. Most are
specific to different scenarios.
"""
import logging
import functools
import warnings
import numpy as np
logger = logging.getLogger(__name__)
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
# pylint: disable=C0111
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn(
f"Call to deprecated function {func.__name__} in module {func.__module__}.",
category=DeprecationWarning,
)
return func(*args, **kwargs)
return new_func
def setup_logging(level="WARN"):
"""Setup basic logging facility to console.
"""
logger_toppra = logging.getLogger("toppra")
handler_basic = logging.StreamHandler()
handler_basic.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)5s [%(filename)s : %(lineno)d] %(message)s")
handler_basic.setFormatter(formatter)
logger_toppra.setLevel(level)
logger_toppra.addHandler(handler_basic)
def compute_jacobian_wrench(robot, link, point):
"""Compute the wrench Jacobian for `link` at `point`.
We look for J_wrench such that
J_wrench.T * wrench = J_trans.T * F + J_rot.T * tau
return the induced generalized joint torques.
J_wrench is computed by stacking J_translation and J_rotation
"""
jacobian_translation = robot.ComputeJacobianTranslation(link.GetIndex(), point)
jacobian_rotation = robot.ComputeJacobianAxisAngle(link.GetIndex())
jacobian_wrench = np.vstack((jacobian_translation, jacobian_rotation))
return jacobian_wrench
def inv_dyn(rave_robot, q, qd, qdd, forceslist=None, returncomponents=True):
"""Inverse dynamics equation.
Simple wrapper around OpenRAVE's ComputeInverseDynamics
function. Return the numerical values of the components of the
inverse dynamics equation.
M(q) qdd + C(q, qd) qd + g(q)
= t1 + t2 + t3
Parameters
----------
rave_robot : OpenRAVE.robot
q : (_N, ) ndarray
Joint position.
qd : (_N, ) ndarray
Joint velocity.
qdd : (_N, ) ndarray
Joint acceleration.
returncomponents : Bool
If True, return the list [t1, t2, t3]
If False, return t1 + t2 + t3
Returns
-------
res : (3, ) List, or ndarray
See returncomponents parameter.
"""
if np.isscalar(q): # Scalar case
_q = [q]
_qd = [qd]
_qdd = [qdd]
else:
_q = q
_qd = qd
_qdd = qdd
# Temporary remove kinematic Limits
vlim = rave_robot.GetDOFVelocityLimits()
alim = rave_robot.GetDOFAccelerationLimits()
rave_robot.SetDOFVelocityLimits(100 * vlim)
rave_robot.SetDOFAccelerationLimits(100 * alim)
# Do computation
with rave_robot:
rave_robot.SetDOFValues(_q)
rave_robot.SetDOFVelocities(_qd)
res = rave_robot.ComputeInverseDynamics(
_qdd, forceslist, returncomponents=returncomponents
)
# Restore kinematic limits
rave_robot.SetDOFVelocityLimits(vlim)
rave_robot.SetDOFAccelerationLimits(alim)
return res
def smooth_singularities(parametrization_instance, us, xs, vs=None):
"""Smooth jitters due to singularities.
Solving TOPP for discrete problem generated from collocation
scheme tends to create jitters. This function finds and smooths
them.
Notes
-----
(`us_smth`, `xs_smth`) is a *valid* path-parameterization. They
satisfy the linear continuity condition :math:`x_{i+1} = x_i + 2 delta_i u_i`.
This function is safe: it will always return a solution.
Parameters
----------
parametrization_instance: :class:`.qpOASESPPSolver`
us: array
Shape (_N, ). Controls.
xs: array
Shape (_N+1, ). Squared velocities.
vs: array, optional
If not given, `vs_smth` will not be returned.
Returns
-------
us_smth: array
Shape (_N, ). Smoothed controls.
xs_smth: array
Shape (_N+1, ). Smoothed squared velocities.
vs_smth: array
If `vs` is not given, `vs_smth` will not be returned.
"""
# Find the indices
singular_indices = []
uds = np.diff(us, n=1)
for i in range(parametrization_instance.N - 3):
if uds[i] < 0 < uds[i + 1] and uds[i + 2] < 0:
logger.debug("Found potential singularity at %d", i)
singular_indices.append(i)
logger.debug("Found singularities at %s", singular_indices)
# Smooth the singularities
xs_smth = np.copy(xs)
us_smth = np.copy(us)
if vs is not None:
vs_smth = np.copy(vs)
for index in singular_indices:
idstart = max(0, index)
idend = min(parametrization_instance.N, index + 4)
xs_smth[range(idstart, idend + 1)] = xs_smth[idstart] + (
xs_smth[idend] - xs_smth[idstart]
) * np.linspace(0, 1, idend + 1 - idstart)
if vs is not None:
data = [
vs_smth[idstart] + (xs_smth[idend] - xs_smth[idstart]) * frac
for frac in np.linspace(0, 1, idend + 1 - idstart)
]
vs_smth[range(idstart, idend + 1)] = np.array(data)
for i in range(parametrization_instance.N):
us_smth[i] = (
(xs_smth[i + 1] - xs_smth[i])
/ 2
/ (parametrization_instance.ss[i + 1] - parametrization_instance.ss[i])
)
if vs is not None:
return us_smth, xs_smth, vs_smth
return us_smth, xs_smth
| mit | 78b688d12f200ef15ccc41aae64d0e4d | 29.497297 | 91 | 0.620525 | 3.450765 | false | false | false | false |
ydkhatri/mac_apt | plugins/helpers/disk_report.py | 1 | 7200 | '''
Copyright (c) 2017 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
'''
import pytsk3
from plugins.helpers.apfs_reader import ApfsSysDataLinkedVolume
import plugins.helpers.macinfo as macinfo
from plugins.helpers.writer import *
import logging
import textwrap
log = logging.getLogger('MAIN.DISK_REPORT')
class Vol_Info:
def __init__(self, name, size, used, file_sys_type, offset, has_os):
self.name = name
self.size_bytes = size
self.size_used = used
self.file_system = file_sys_type
self.offset = offset
self.size_str = Disk_Info.GetSizeStr(size)
self.has_os = has_os
class Disk_Info:
@staticmethod
def GetSizeStr(size_bytes):
size_str = ''
if size_bytes < 1024:
size_str = str(size_bytes) + " bytes"
elif size_bytes >= 1024 and size_bytes < 1024 * 1024:
size_str = '{0:.2f} KB'.format(size_bytes / 1024)
elif size_bytes >= 1024 * 1024 and size_bytes < 1024 * 1024 * 1024:
size_str = '{0:.2f} MB'.format(size_bytes / (1024 * 1024))
elif size_bytes >= 1024 * 1024 * 1024 and size_bytes < 1024 * 1024 * 1024 * 1024:
size_str = '{0:.2f} GB'.format(size_bytes / (1024 * 1024 * 1024))
else:
size_str = '{0:.2f} TB'.format(size_bytes / (1024 * 1024 * 1024 * 1024))
return size_str
def __init__(self, mac_info, source_image_path, apfs_container_only=False):
self.mac_info = mac_info
self.image_path = source_image_path
self.apfs_block_size = 0
if mac_info.is_apfs:
self.apfs_block_size = mac_info.apfs_container.block_size
self.apfs_container_only = apfs_container_only
if apfs_container_only:
self.block_size = 0
else:
self.block_size = mac_info.vol_info.info.block_size
self.img = mac_info.pytsk_image
self.volumes = []
self.total_disk_size_in_bytes = self.img.get_size()
self.total_MB = self.total_disk_size_in_bytes / (1024 * 1024)
self.total_GB = self.total_disk_size_in_bytes / (1024 * 1024 * 1024)
def Write(self):
log.info('Disk info')
log.info('Disk Size = {:.2f} GB ({} bytes)'.format(self.total_GB, self.total_disk_size_in_bytes))
log.info('Part Scheme = {}'.format(str(self.mac_info.vol_info.info.vstype)[12:]))
log.info('Block size = {} bytes'.format(self.block_size))
log.info('Num Sectors = {} '.format(self.total_disk_size_in_bytes/self.block_size))
self.ReadVolumesFromPartTable()
data_info = [ ('Type',DataType.TEXT),('Scheme_or_FS-Type',DataType.TEXT),('Name',DataType.TEXT),
('Offset',DataType.INTEGER),('Size',DataType.TEXT), ('Size_in_bytes',DataType.INTEGER),
('Size_Used',DataType.TEXT),('macOS_Installed',DataType.TEXT) ]
info = [ ['Partition', x.file_system, x.name, x.offset, x.size_str, x.size_bytes, x.size_used,
'*' if x.has_os else ''] for x in self.volumes]
info.insert(0, ['Disk', str(self.mac_info.vol_info.info.vstype)[12:], '', 0, Disk_Info.GetSizeStr(self.total_disk_size_in_bytes), self.total_disk_size_in_bytes, '', ''])
WriteList("disk, partition & volume information", "Disk_Info", info, data_info, self.mac_info.output_params,'')
def IsApfsBootVolume(self, volume):
'''Checks if this is the boot volume. For Catalina (10.15), it will return True for
both SYSTEM and DATA volumes
'''
if self.mac_info.macos_FS == volume:
return True
elif isinstance(self.mac_info.macos_FS, ApfsSysDataLinkedVolume):
if volume == self.mac_info.macos_FS.sys_vol or volume == self.mac_info.macos_FS.data_vol:
return True
return False
def ReadVolumesFromPartTable(self):
if self.apfs_container_only:
size = self.mac_info.apfs_container_size
for volume in self.mac_info.apfs_container.volumes:
used_space = Disk_Info.GetSizeStr(volume.container.block_size * volume.num_blocks_used)
vol = Vol_Info(volume.volume_name, size, used_space, 'APFS', 0, self.IsApfsBootVolume(volume))
self.volumes.append(vol)
else:
for part in self.mac_info.vol_info:
if (int(part.flags) & pytsk3.TSK_VS_PART_FLAG_ALLOC):
partition_start_offset = self.block_size * part.start
partition_size_in_sectors = part.len
file_system = 'Unknown'
part_is_apfs = False
used_space = ''
try:
fs = pytsk3.FS_Info(self.img, offset=partition_start_offset)
fs_info = fs.info # TSK_FS_INFO
fs_type = str(fs_info.ftype)[12:]
if fs_type.find("_") > 0: fs_type = fs_type[0:fs_type.find("_")]
file_system = fs_type
if file_system == 'HFS' and self.mac_info.macos_partition_start_offset == partition_start_offset: # For macOS partition only
hfs_info = self.mac_info.hfs_native.GetVolumeInfo()
used_space = Disk_Info.GetSizeStr(hfs_info.block_size * (hfs_info.total_blocks - hfs_info.free_blocks))
except Exception as ex:
if self.mac_info.is_apfs and partition_start_offset == self.mac_info.macos_partition_start_offset:
part_is_apfs = True
for volume in self.mac_info.apfs_container.volumes:
used_space = Disk_Info.GetSizeStr(volume.container.block_size * volume.num_blocks_used)
vol = Vol_Info(volume.volume_name,
partition_size_in_sectors * self.block_size,
used_space, 'APFS',
partition_start_offset,
self.IsApfsBootVolume(volume))
self.volumes.append(vol)
elif part.desc.decode('utf-8', 'ignore').upper() in ("EFI SYSTEM PARTITION", "APPLE_PARTITION_MAP"):
log.debug(" Skipping {}".format(part.desc.decode('utf-8', 'ignore')))
else:
log.debug(" Error: Failed to detect/parse file system!")
if not part_is_apfs:
vol = Vol_Info(part.desc.decode('utf-8', 'ignore'),
partition_size_in_sectors * self.block_size, used_space,
file_system, partition_start_offset, self.mac_info.macos_partition_start_offset==partition_start_offset)
self.volumes.append(vol)
| mit | 0a9463b42fae2716fc0a97505cc2c1b6 | 51.731343 | 177 | 0.550139 | 3.742204 | false | false | false | false |
ydkhatri/mac_apt | plugins/screentime.py | 1 | 8675 | '''
Copyright (c) 2019 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
'''
from plugins.helpers.macinfo import *
from plugins.helpers.writer import *
from plugins.helpers.common import *
import os
import sqlite3
import logging
__Plugin_Name = "SCREENTIME"
__Plugin_Friendly_Name = "Screen Time Data"
__Plugin_Version = "1.0"
__Plugin_Description = "Parses application Screen Time data"
__Plugin_Author = "Jack Farley"
__Plugin_Author_Email = "jfarley248@gmail.com"
__Plugin_Modes = "IOS,MACOS,ARTIFACTONLY"
__Plugin_ArtifactOnly_Usage = 'Provide Screen Time database found at:' \
'/private/var/folders/XX/XXXXXXXXXXXXXXXXXXX_XXXXXXXXX/0/com.apple.ScreenTimeAgent/Store/'
log = logging.getLogger('MAIN.' + __Plugin_Name) # Do not rename or remove this ! This is the logger object
#---- Do not change the variable names in above section ----#
class ScreenTime:
def __init__(self, app, total_time, start_date, end_date, num_notifics, num_pickups, num_pickups_no_app,
device_name, apple_id, full_name, family_type, source):
self.app = app
self.total_time= total_time
self.start_date = start_date
self.end_date = end_date
self.num_notifics= num_notifics
self.num_pickups = num_pickups
self.num_pickups_no_app = num_pickups_no_app
self.device_name= device_name
self.apple_id = apple_id
self.full_name = full_name
self.family_type = family_type
self.source = source
def PrintAll(screen_time_data, output_params, source_path):
screen_time_info = [ ('Application',DataType.TEXT),('Total_Time',DataType.TEXT),('Start_Date',DataType.TEXT),
('End_Date',DataType.TEXT),('Notification_Count',DataType.INTEGER), ('Pickup_Count',DataType.INTEGER),
('Pickups_Without_Usage',DataType.INTEGER),('Device_Name',DataType.TEXT),('Apple_ID',DataType.TEXT),
('Full_Name', DataType.TEXT),
('Family_Member_Type', DataType.TEXT),('Source',DataType.TEXT)
]
screen_time_list = []
for sc in screen_time_data:
sc_items = [sc.app, sc.total_time, sc.start_date,
sc.end_date,
sc.num_notifics, sc.num_pickups,
sc.num_pickups_no_app, sc.device_name, sc.apple_id,
sc.full_name, sc.family_type, sc.source
]
screen_time_list.append(sc_items)
WriteList("ScreenTime Info", "ScreenTime", screen_time_list, screen_time_info, output_params, source_path)
def OpenDbFromImage(mac_info, inputPath):
'''Returns tuple of (connection, wrapper_obj)'''
try:
sqlite = SqliteWrapper(mac_info)
conn = sqlite.connect(inputPath)
if conn:
log.debug ("Opened database successfully")
return conn, sqlite
except sqlite3.Error as ex:
log.exception ("Failed to open database, is it a valid Screen Time DB?")
return None, None
def OpenDb(inputPath):
log.info ("Processing file " + inputPath)
try:
conn = CommonFunctions.open_sqlite_db_readonly(inputPath)
log.debug ("Opened database successfully")
return conn
except sqlite3.Error:
log.exception ("Failed to open database, is it a valid Screen Time DB?")
return None
def findDb(mac_info):
db_path_arr = []
for user in mac_info.users:
if not user.DARWIN_USER_DIR or not user.user_name:
continue # TODO: revisit this later!
else:
darwin_user_folders = user.DARWIN_USER_DIR.split(',')
for darwin_user_dir in darwin_user_folders:
db_path = (darwin_user_dir + '/com.apple.ScreenTimeAgent/Store/RMAdminStore-Local.sqlite')
if not mac_info.IsValidFilePath(db_path): continue
else:
db_path_arr.append(db_path)
return db_path_arr
def ReadScreenTime(db, screen_time_arr, source):
try:
query = "SELECT " \
"IFNULL(zut.ZBUNDLEIDENTIFIER, zut.ZDOMAIN) as app, " \
"time(zut.ZTOTALTIMEINSECONDS, 'unixepoch') as total_time, " \
"datetime(zub.ZSTARTDATE + 978307200, 'unixepoch') as start_date, " \
"datetime(zub.ZLASTEVENTDATE + 978307200, 'unixepoch') as end_date, " \
"zuci.ZNUMBEROFNOTIFICATIONS as num_notifics, " \
"zuci.ZNUMBEROFPICKUPS as num_pickups, " \
"zub.ZNUMBEROFPICKUPSWITHOUTAPPLICATIONUSAGE as num_pickups_no_app, " \
"zcd.ZNAME as device_name, zcu.ZAPPLEID as apple_id, " \
"zcu.ZGIVENNAME || \" \" || zcu.ZFAMILYNAME as full_name, " \
"zcu.ZFAMILYMEMBERTYPE as family_type " \
"FROM ZUSAGETIMEDITEM as zut " \
"LEFT JOIN ZUSAGECATEGORY as zuc on zuc.Z_PK = zut.ZCATEGORY " \
"LEFT JOIN ZUSAGEBLOCK as zub on zub.Z_PK = zuc.ZBLOCK " \
"LEFT JOIN ZUSAGE as zu on zu.Z_PK = zub.ZUSAGE " \
"LEFT JOIN ZCOREDEVICE as zcd on zcd.Z_PK = zu.ZDEVICE " \
"LEFT JOIN ZCOREUSER as zcu on zcu.Z_PK = zu.ZUSER " \
"LEFT JOIN ZUSAGECOUNTEDITEM as zuci on zuci.ZBLOCK = zuc.ZBLOCK AND zuci.ZBUNDLEIDENTIFIER = zut.ZBUNDLEIDENTIFIER " \
"ORDER BY zub.ZSTARTDATE;"
db.row_factory = sqlite3.Row
cursor = db.execute(query)
test_row = cursor.fetchone()
if test_row is None:
log.warning("SQL Query worked, but no results were found in database: " + str(source))
for row in cursor:
if row['num_notifics'] is None:
num_notifics = 0
else:
num_notifics = row['num_notifics']
if row['num_pickups'] is None:
num_pickups = 0
else:
num_pickups = row['num_pickups']
if row['num_pickups_no_app'] is None:
num_pickups_no_app = 0
else:
num_pickups_no_app = row['num_pickups_no_app']
sc = ScreenTime(row['app'], row['total_time'], row['start_date'], row['end_date'], num_notifics,
num_pickups, num_pickups_no_app, row['device_name'],
row['apple_id'], row['full_name'], row['family_type'], source)
screen_time_arr.append(sc)
except sqlite3.Error:
log.exception('Query execution failed. Query was: ' + query)
def ProcessSCDbFromPath(mac_info, screen_time_arr, source_path):
mac_info.ExportFile(source_path, __Plugin_Name)
db, wrapper = OpenDbFromImage(mac_info, source_path)
if db != None:
ReadScreenTime(db, screen_time_arr, source_path)
db.close()
def Plugin_Start(mac_info):
'''Main Entry point function for plugin'''
path_to_screentime_db = findDb(mac_info)
screen_time_arr = []
for screentime_dbs in path_to_screentime_db:
ProcessSCDbFromPath(mac_info, screen_time_arr, screentime_dbs)
if screen_time_arr:
log.info("Screen Time data found!")
PrintAll(screen_time_arr, mac_info.output_params, '')
else:
log.info("No Screen Time artifacts found.")
def Plugin_Start_Standalone(input_files_list, output_params):
log.info("Module Started as standalone")
for input_path in input_files_list:
log.debug("Input file passed was: " + input_path)
screen_time_arr = []
db = OpenDb(input_path)
if db != None:
filename = os.path.basename(input_path)
ReadScreenTime(db, screen_time_arr, input_path)
if screen_time_arr:
PrintAll(screen_time_arr, output_params, '')
else:
log.info("No Screen Time artifacts found.")
def Plugin_Start_Ios(ios_info):
'''Entry point for ios_apt plugin'''
paths_to_screentime_db = ["/private/var/mobile/Library/Application Support/com.apple.remotemanagementd/RMAdminStore-Local.sqlite",
"/private/var/mobile/Library/Application Support/com.apple.remotemanagementd/RMAdminStore-Cloud.sqlite"]
screen_time_arr = []
for screentime_path in paths_to_screentime_db:
if ios_info.IsValidFilePath(screentime_path):
ProcessSCDbFromPath(ios_info, screen_time_arr, screentime_path)
if screen_time_arr:
log.info("Screen Time data found!")
PrintAll(screen_time_arr, ios_info.output_params, '')
else:
log.info("No Screen Time artifacts found.")
if __name__ == '__main__':
print ("This plugin is a part of a framework and does not run independently on its own!") | mit | bd34654f6ff5566049400040ea39e44f | 41.738916 | 134 | 0.620058 | 3.465841 | false | false | false | false |
ydkhatri/mac_apt | plugins/safari.py | 1 | 32974 | '''
Copyright (c) 2017 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
'''
import io
import os
import logging
import nska_deserialize as nd
from plugins.helpers import macinfo
import plugins.helpers.ccl_bplist as ccl_bplist
from enum import IntEnum
from plugins.helpers.common import CommonFunctions
from plugins.helpers.macinfo import *
from plugins.helpers.writer import *
__Plugin_Name = "SAFARI"
__Plugin_Friendly_Name = "Internet history, downloaded file information, cookies and more from Safari caches"
__Plugin_Version = "2.0"
__Plugin_Description = "Gets internet history, downloaded file information, cookies and more from Safari caches"
__Plugin_Author = "Yogesh Khatri"
__Plugin_Author_Email = "yogesh@swiftforensics.com"
__Plugin_Modes = "IOS,MACOS,ARTIFACTONLY"
__Plugin_ArtifactOnly_Usage = ''
log = logging.getLogger('MAIN.' + __Plugin_Name) # Do not rename or remove this ! This is the logger object
#---- Do not change the variable names in above section ----#
''' Mavericks had History.plist, Yosemite has History.db
<Home_DIR>/Library/Preferences/com.apple.safari.plist
RecentSearchStrings[], SuccessfulLaunchTimestamp, DownloadsPath, HomePage, FrequentlyVisitedSitesCache
<Home_DIR>/Library/Safari/ --> Bookmarks.plist, Downloads.plist, History.plist, Form Values (Encrypted!),
UserNotificationPermissions.plist, RecentlyClosedTabs.plist
LastSession.plist <-- SessionVersion, SessionWindows\[xx]\TabStates\[xx]\[TabTitle & TabURL]
TopSites.plist <-- [BannedURLStrings] , DisplayedSitesLastModified, TopSites\[xx][TopSiteTitle & TopSiteURLString]
Extensions\Extensions.plist <-- Installed Extensions\[xx][Archive File Name & Enabled]
ReadingListArchives/<UUID>/Page.webarchive <-- Plist, get WebResourceURL
BrowserState.db
CloudTabs.db
'''
class SafariItemType(IntEnum):
UNKNOWN = 0
HISTORY = 1
TOPSITE = 2
BOOKMARK = 3
DOWNLOAD = 4
LASTSESSION = 5
RECENTCLOSEDTAB = 6
EXTENSION = 7
GENERAL = 8 # From com.apple.safari.plist
HISTORYDOMAINS = 9
TOPSITE_BANNED = 10
FREQUENTLY_VISITED = 11 # From com.apple.safari.plist
CLOUDTAB = 12
TAB = 13 # From BrowserState
TABHISTORY = 14 # Tab session history from BrowserState
def __str__(self):
return self.name
class SafariItem:
def __init__(self, type, url, name, date, other, user, source):
self.type = type
self.url = url
self.name = name
self.date = date
self.other_info = other
self.user = user
self.source = source
def PrintAll(safari_items, output_params, source_path):
safari_info = [ ('Type',DataType.TEXT),('Name_or_Title',DataType.TEXT),('URL',DataType.TEXT),
('Date', DataType.DATE),('Other_Info', DataType.TEXT),('User', DataType.TEXT),
('Source',DataType.TEXT)
]
data_list = []
for item in safari_items:
url = item.url
if url.startswith('file://'):
url = url[7:]
data_list.append( [ str(item.type), item.name, url, item.date, item.other_info, item.user, item.source ] )
WriteList("safari information", "Safari", data_list, safari_info, output_params, source_path)
def ReadSafariPlist(plist, safari_items, source, user):
'''Read com.apple.safari.plist'''
try:
searches = plist['RecentSearchStrings'] # Mavericks
try:
for search in searches:
si = SafariItem(SafariItemType.GENERAL, '', search, None, 'RECENT_SEARCH', user, source)
safari_items.append(si)
except ValueError as ex:
log.exception('Error reading RecentSearchStrings from plist')
except KeyError: # Not found
pass
try:
searches = plist['RecentWebSearches'] # Yosemite
try:
for search in searches:
si = SafariItem(SafariItemType.GENERAL, '', search.get('SearchString',''),
search.get('Date', None), 'RECENT_SEARCH', user, source)
safari_items.append(si)
except ValueError as ex:
log.exception('Error reading RecentWebSearches from plist')
except KeyError: # Not found
pass
try:
freq_sites = plist['FrequentlyVisitedSitesCache'] # seen in El Capitan
try:
for site in freq_sites:
si = SafariItem(SafariItemType.FREQUENTLY_VISITED, site.get('URL', ''), site.get('Title',''),
None, 'FrequentlyVisitedSitesCache', user, source)
safari_items.append(si)
except ValueError as ex:
log.exception('Error reading FrequentlyVisitedSitesCache from plist')
except KeyError: # Not found
pass
try:
download_path = plist['DownloadsPath']
si = SafariItem(SafariItemType.GENERAL, '', download_path, None, 'DOWNLOADS_PATH', user, source)
safari_items.append(si)
except KeyError: # Not found
pass
try:
home = plist['HomePage']
si = SafariItem(SafariItemType.GENERAL, home, '', None, 'HOME_PAGE', user, source)
safari_items.append(si)
except KeyError: # Not found
pass
try:
last_ext_pref_selected = plist['LastExtensionSelectedInPreferences']
si = SafariItem(SafariItemType.EXTENSION, '', last_ext_pref_selected, None, 'LastExtensionSelectedInPreferences', user, source)
safari_items.append(si)
except KeyError: # Not found
pass
try:
last_root_dir = plist['NSNavLastRootDirectory']
si = SafariItem(SafariItemType.GENERAL, last_root_dir, '', None, 'NSNavLastRootDirectory', user, source)
safari_items.append(si)
except KeyError: # Not found
pass
try:
time = CommonFunctions.ReadMacAbsoluteTime(plist['SuccessfulLaunchTimestamp'])
si = SafariItem(SafariItemType.GENERAL, '', '', time, 'SuccessfulLaunchTimestamp', user, source)
safari_items.append(si)
except KeyError: # Not found
pass
def ProcessSafariPlist(mac_info, source_path, user, safari_items, read_plist_function):
mac_info.ExportFile(source_path, __Plugin_Name, user + "_", False)
success, plist, error = mac_info.ReadPlist(source_path)
if success:
read_plist_function(plist, safari_items, source_path, user)
else:
log.info('Failed to open plist: {}'.format(source_path))
pass
def ReadHistoryDb(conn, safari_items, source_path, user):
try:
conn.row_factory = sqlite3.Row
cursor = conn.execute("select title, url, load_successful, visit_time as time_utc from "
"history_visits left join history_items on history_visits.history_item = history_items.id")
try:
for row in cursor:
try:
si = SafariItem(SafariItemType.HISTORY, row['url'], row['title'],
CommonFunctions.ReadMacAbsoluteTime(row['time_utc']),'', user, source_path)
safari_items.append(si)
except sqlite3.Error as ex:
log.exception ("Error while fetching row data")
except sqlite3.Error as ex:
log.exception ("Db cursor error while reading file " + source_path)
conn.close()
except sqlite3.Error as ex:
log.exception ("Sqlite error")
def GetItemFromCloudDbPlist(plist, item_name):
for dic_item in plist:
for k, v in dic_item.items():
if k == item_name:
return v
return None
def ReadCloudTabsDb(conn, safari_items, source_path, user):
try:
conn.row_factory = sqlite3.Row
cursor = conn.execute(
"""SELECT device_name, tab_uuid, t.system_fields, title, url, is_showing_reader, is_pinned
FROM cloud_tabs t LEFT JOIN cloud_tab_devices d on d.device_uuid=t.device_uuid
ORDER BY device_name""")
try:
for row in cursor:
try:
pinned = row['is_pinned']
system_fields = row['system_fields']
created = ''
modified = ''
if system_fields:
serialized_plist_file_obj = io.BytesIO(system_fields)
try:
deserialized_plist = nd.deserialize_plist(serialized_plist_file_obj)
created = GetItemFromCloudDbPlist(deserialized_plist, 'RecordCtime')
modified = GetItemFromCloudDbPlist(deserialized_plist, 'RecordMtime')
except (nd.DeserializeError, nd.biplist.NotBinaryPlistException,
nd.biplist.InvalidPlistException, plistlib.InvalidFileException,
nd.ccl_bplist.BplistError, ValueError, TypeError, OSError, OverflowError) as ex:
log.exception('plist deserialization error')
si = SafariItem(SafariItemType.CLOUDTAB, row['url'], row['title'], created,
f'Modified={modified}' + (' pinned=1' if pinned else ''),
user, source_path)
safari_items.append(si)
except sqlite3.Error as ex:
log.exception ("Error while fetching row data")
except sqlite3.Error as ex:
log.exception ("Db cursor error while reading file " + source_path)
conn.close()
except sqlite3.Error as ex:
log.exception ("Sqlite error")
def ReadSafariTabsDb(conn, safari_items, source_path, user):
try:
conn.row_factory = sqlite3.Row
cursor = conn.execute(
"""SELECT title, url, local_attributes, date_closed
FROM bookmarks WHERE url not like '' """)
try:
for row in cursor:
try:
local_attributes = row['local_attributes']
last_visit_ended = ''
last_visit_start = ''
if local_attributes:
plist_file_obj = io.BytesIO(local_attributes)
success, plist, error = CommonFunctions.ReadPlist(plist_file_obj)
if success:
last_visit_start = plist.get('LastVisitTime', '')
last_visit_ended = plist.get('DateClosed', '')
else:
log.error(error)
si = SafariItem(SafariItemType.TAB, row['url'], row['title'], last_visit_start,
f'Visit_end={last_visit_ended}',
user, source_path)
safari_items.append(si)
except sqlite3.Error as ex:
log.exception ("Error while fetching row data")
except sqlite3.Error as ex:
log.exception ("Db cursor error while reading file " + source_path)
conn.close()
except sqlite3.Error as ex:
log.exception ("Sqlite error")
def ReadBrowserStateDb(conn, safari_items, source_path, user):
try:
conn.row_factory = sqlite3.Row
cursor = conn.execute(
"""SELECT t.id, url, title, session_data, t.uuid
FROM tabs t LEFT JOIN tab_sessions s on s.tab_uuid=t.uuid""")
try:
for row in cursor:
try:
si = SafariItem(SafariItemType.TAB, row['url'], row['title'], '',
f'Tab UUID={row["uuid"]}', user, source_path)
safari_items.append(si)
plist_data = row['session_data']
if plist_data and len(plist_data) > 10:
f = io.BytesIO(plist_data[4:])
success, plist, error = CommonFunctions.ReadPlist(f)
if success:
history = plist.get('SessionHistory', None)
if history:
#current_session = history.get('SessionHistoryCurrentIndex', 0)
entries = history.get('SessionHistoryEntries', [])
for index, entry in enumerate(entries):
url = entry.get('SessionHistoryEntryURL', '')
title = entry.get('SessionHistoryEntryTitle', '')
if url == row['url']:
continue # same as current tab, skip it
si = SafariItem(SafariItemType.TABHISTORY, url, title, '',
f'Tab UUID={row["uuid"]} index={index}', user, source_path)
safari_items.append(si)
else:
log.error(f'Failed to read plist for tab {row["uuid"]}, {row["id"]}. {error}')
except sqlite3.Error as ex:
log.exception ("Error while fetching row data")
except sqlite3.Error as ex:
log.exception ("Db cursor error while reading file " + source_path)
conn.close()
except sqlite3.Error as ex:
log.exception ("Sqlite error")
def ReadExtensionsPlist(plist, safari_items, source_path, user):
try:
extensions = plist['Installed Extensions']
for item in extensions:
info = item.get('Enabled', '')
if info != '':
info = 'Enabled: ' + str(info)
apple_signed = item.get('Apple-signed', '')
if apple_signed != '':
info = ', '.join([info, 'Apple-signed: ' + str(apple_signed)])
si = SafariItem(SafariItemType.EXTENSION, '', item.get('Archive File Name', ''),
None, info, user, source_path)
safari_items.append(si)
return
except KeyError:
pass
'''Safari 14 extension plist parser'''
try:
for ext_name, ext in plist.items():
info = ''
enabled = ext.get('Enabled', '')
if enabled != '':
info += 'Enabled:' + str(enabled)
for key, val in ext.get('WebsiteAccess', {}).items():
info += f', {key}:{val}'
si = SafariItem(SafariItemType.EXTENSION, '', ext_name,
None, info, user, source_path)
safari_items.append(si)
except (KeyError, ValueError, TypeError) as ex:
log.error("Error reading extensions plist: " + source_path)
def ReadHistoryPlist(plist, safari_items, source_path, user):
try:
version = plist['WebHistoryFileVersion']
if version != 1:
log.warning('WebHistoryFileVersion is {}, this may not parse properly!'.format(version))
except KeyError:
log.error('WebHistoryFileVersion not found')
try:
history_dates = plist['WebHistoryDates']
for item in history_dates:
try:
redirect_urls = ",".join(item.get('redirectURLs', ''))
si = SafariItem(SafariItemType.HISTORY, item.get('',''), item.get('title', ''), \
CommonFunctions.ReadMacAbsoluteTime(item.get('lastVisitedDate', '')), \
'' if (redirect_urls == '') else ('REDIRECT_URLS:' + redirect_urls) , user, source_path) # Skipped visitCount
safari_items.append(si)
except ValueError as ex:
log.error(str(ex))
except KeyError:
log.error('WebHistoryDates not found')
try:
history_domains = plist['WebHistoryDomains.v2']
for item in history_domains:
si = SafariItem(SafariItemType.HISTORYDOMAINS, '', item.get('', ''), None,
'ITEMCOUNT:' + str(item.get('itemCount', 0)) , user, source_path)
safari_items.append(si)
except KeyError:
log.error('WebHistoryDomains.v2 not found')
def ReadDownloadsPlist(plist, safari_items, source_path, user):
try:
downloads = plist['DownloadHistory']
for item in downloads:
si = SafariItem(SafariItemType.DOWNLOAD, item.get('DownloadEntryURL', ''), os.path.basename(item.get('DownloadEntryPath', '')),
None, item.get('DownloadEntryPath', ''), user, source_path) # Skipping bookmark and file sizes
safari_items.append(si)
except KeyError:
log.error('DownloadHistory not found')
def ReadBookmark(bm, path, safari_items, source_path, user):
'''Recursive function'''
bm_title = bm.get('Title', '')
bm_type = bm.get('WebBookmarkType','')
if bm_type == 'WebBookmarkTypeList':
if path == '': # To remove extra '/' at the first one
path = bm_title
else:
path = path + "/" + bm_title
try:
children = bm['Children']
for item in children:
ReadBookmark(item, path, safari_items, source_path, user)
except KeyError:
pass#log.debug('Error fetching bookmark children @ {}'.format(path))
elif bm_type == 'WebBookmarkTypeProxy':
pass# do nothing
elif bm_type == 'WebBookmarkTypeLeaf':
bm_url = bm.get('URLString', '')
bm_title = bm.get('URIDictionary', {}).get('title', '')
bm_date = None
if path.find('com.apple.ReadingList') > 0:
try:
bm_date = bm['ReadingList']['DateAdded']
except KeyError: pass
si = SafariItem(SafariItemType.BOOKMARK, bm_url, bm_title, bm_date, path, user, source_path)
safari_items.append(si)
else:
log.info('Unknown type found in bookmark : {} @ {}'.format(bm_title, path))
def ReadBookmarksPlist(plist, safari_items, source_path, user):
try:
version = plist['WebBookmarkFileVersion']
if version != 1:
log.warning('WebBookmarkFileVersion is {}, this may not parse properly!'.format(version))
except KeyError:
log.error('WebBookmarkFileVersion not found')
ReadBookmark(plist, '', safari_items, source_path, user)
def ReadTopSitesPlist(plist, safari_items, source_path, user):
ts_last_mod_date = None
try:
ts_last_mod_date = plist['DisplayedSitesLastModified']
log.info('Topsites last modified on {}'.format(ts_last_mod_date))
except KeyError:
log.error('DisplayedSitesLastModified not found')
try:
banned = plist['BannedURLStrings']
for item in banned:
si = SafariItem(SafariItemType.TOPSITE_BANNED, item, '', ts_last_mod_date,
'Date represents DisplayedSitesLastModified for all Topsites', user, source_path)
safari_items.append(si)
except KeyError:
log.error('BannedURLStrings not found')
try:
downloads = plist['TopSites']
for item in downloads:
si = SafariItem(SafariItemType.TOPSITE, item.get('TopSiteURLString', ''), item.get('TopSiteTitle', ''),
ts_last_mod_date, 'Date represents DisplayedSitesLastModified for all Topsites', user, source_path)
safari_items.append(si)
except KeyError:
log.error('TopSites not found')
def ReadLastSessionPlist(plist, safari_items, source_path, user):
try:
version = plist['SessionVersion']
if version != '1.0':
log.warning('SessionVersion is {}, this may not parse properly!'.format(version))
except KeyError:
log.error('SessionVersion not found')
try:
session_windows = plist['SessionWindows']
for windows in session_windows:
selectedIndex = windows.get('SelectedTabIndex', None)
index = 0
for tab in windows.get('TabStates', []):
info = 'SELECTED WINDOW' if index == selectedIndex else ''
date_closed = tab.get('DateClosed', '')
log.debug(date_closed)
if date_closed:
if info:
info += ', TAB_CLOSED_DATE=' + str(date_closed)
else:
info = 'TAB_CLOSED_DATE=' + str(date_closed)
si = SafariItem(SafariItemType.LASTSESSION, tab.get('TabURL', ''), tab.get('TabTitle', ''),
CommonFunctions.ReadMacAbsoluteTime(tab.get('LastVisitTime', '')),
info, user, source_path) # Skipping SessionState(its encrypted) & TabIdentifier
safari_items.append(si)
index += 1
except KeyError as ex:
log.error('SessionWindows not found or unable to parse. Error was {}'.format(str(ex)))
def ReadRecentlyClosedTabsPlist(plist, safari_items, source_path, user):
try:
version = plist['ClosedTabOrWindowPersistentStatesVersion']
if version != '1':
log.warning('ClosedTabOrWindowPersistentStatesVersion is {}, this may not parse properly!'.format(version))
except KeyError:
log.error('ClosedTabOrWindowPersistentStatesVersion not found')
try:
tabs = plist['ClosedTabOrWindowPersistentStates']
for tab in tabs:
state_type = tab.get('PersistentStateType', None)
if state_type not in [0, 1]:
log.warning('Unknown PersistentStateType: {}'.format(state_type))
state = tab.get('PersistentState', None)
if state:
date_closed = state.get('DateClosed', None)
private_mode = state.get('IsPrivateWindow', False)
if state_type == 0:
si = SafariItem(SafariItemType.RECENTCLOSEDTAB, state.get('TabURL', ''), state.get('TabTitle', ''),
date_closed, 'PRIVATE MODE' if private_mode else '', user, source_path)
safari_items.append(si)
else: # assume 1 or higher
tab_states = state.get('TabStates', [])
for ts in tab_states:
date_closed = ts.get('DateClosed', date_closed)
ts.get('TabTitle')
si = SafariItem(SafariItemType.RECENTCLOSEDTAB, ts.get('TabURL', ''), ts.get('TabTitle', ''),
date_closed, 'PRIVATE MODE' if private_mode else '', user, source_path)
safari_items.append(si)
else:
log.error('Key PersistentState not present!')
except KeyError as ex:
log.error('ClosedTabOrWindowPersistentStates not found or unable to parse. Error was {}'.format(str(ex)))
def ProcessSafariFolder(mac_info, folder_path, user, safari_items):
files_list = [ ['History.plist', ReadHistoryPlist] , ['Downloads.plist', ReadDownloadsPlist],
['Bookmarks.plist', ReadBookmarksPlist], ['TopSites.plist', ReadTopSitesPlist],
['LastSession.plist', ReadLastSessionPlist], ['Extensions/Extensions.plist', ReadExtensionsPlist],
['RecentlyClosedTabs.plist', ReadRecentlyClosedTabsPlist] ]
for item in files_list:
source_path = folder_path + '/' + item[0]
if mac_info.IsValidFilePath(source_path):
ProcessSafariPlist(mac_info, source_path, user, safari_items, item[1])
else:
log.debug('Safari File not found : {}'.format(source_path))
# Yosemite onwards there is History.db
ReadDbFromImage(mac_info, folder_path + '/History.db', user, safari_items, ReadHistoryDb, 'safari history')
ReadDbFromImage(mac_info, folder_path + '/CloudTabs.db', user, safari_items, ReadCloudTabsDb, 'safari CloudTabs')
ReadDbFromImage(mac_info, folder_path + '/SafariTabs.db', user, safari_items, ReadSafariTabsDb, 'safari Tabs')
ReadDbFromImage(mac_info, folder_path + '/BrowserState.db', user, safari_items, ReadBrowserStateDb, 'safari BrowserState')
def ReadDbFromImage(mac_info, source_path, user, safari_items, processing_func, description):
if mac_info.IsValidFilePath(source_path) and mac_info.GetFileSize(source_path, 0) > 0:
mac_info.ExportFile(source_path, __Plugin_Name, user + "_")
try:
sqlite = SqliteWrapper(mac_info)
conn = sqlite.connect(source_path)
if conn:
processing_func(conn, safari_items, source_path, user)
except (sqlite3.Error, OSError) as ex:
log.exception ("Failed to open {} database '{}', is it a valid SQLITE DB?".format(description, source_path))
def Plugin_Start(mac_info):
'''Main Entry point function for plugin'''
safari_items = []
user_safari_plist_paths = ('{}/Library/Preferences/com.apple.safari.plist',\
'{}/Library/Containers/com.apple.Safari/Data/Library/Preferences/com.apple.Safari.plist')
user_safari_path = '{}/Library/Safari'
user_safari_path_15 = '{}/Library/Containers/com.apple.Safari/Data/Library/Safari' # Safari 15 moved some data here
user_safari_extensions = ('{}/Library/Containers/com.apple.Safari/Data/Library/Safari/AppExtensions/Extensions.plist',\
'{}/Library/Containers/com.apple.Safari/Data/Library/Safari/WebExtensions/Extensions.plist')
processed_paths = []
for user in mac_info.users:
user_name = user.user_name
if user.home_dir == '/private/var/empty': continue # Optimization, nothing should be here!
elif user.home_dir == '/private/var/root': user_name = 'root' # Some other users use the same root folder, we will list such all users as 'root', as there is no way to tell
if user.home_dir in processed_paths: continue # Avoid processing same folder twice (some users have same folder! (Eg: root & daemon))
processed_paths.append(user.home_dir)
for user_safari_plist_path in user_safari_plist_paths:
source_path = user_safari_plist_path.format(user.home_dir)
if mac_info.IsValidFilePath(source_path):
ProcessSafariPlist(mac_info, source_path, user_name, safari_items, ReadSafariPlist)
#else:
# if not user_name.startswith('_'):
# log.debug('File not found: {}'.format(source_path))
source_path = user_safari_path.format(user.home_dir)
if mac_info.IsValidFolderPath(source_path):
ProcessSafariFolder(mac_info, source_path, user_name, safari_items)
source_path = user_safari_path_15.format(user.home_dir)
if mac_info.IsValidFolderPath(source_path):
ProcessSafariFolder(mac_info, source_path, user_name, safari_items)
for ext_path in user_safari_extensions:
source_path = ext_path.format(user.home_dir)
if mac_info.IsValidFilePath(source_path):
ProcessSafariPlist(mac_info, source_path, user_name, safari_items, ReadExtensionsPlist)
if len(safari_items) > 0:
PrintAll(safari_items, mac_info.output_params, '')
else:
log.info('No safari items were found!')
def Plugin_Start_Standalone(input_files_list, output_params):
log.info("Module Started as standalone")
for input_path in input_files_list:
log.debug("Input file passed was: " + input_path)
safari_items = []
if input_path.endswith('.plist'):
try:
success, plist, error = CommonFunctions.ReadPlist(input_path)
if success:
if input_path.lower().endswith('com.apple.safari.plist'):
ReadSafariPlist(plist, safari_items, input_path, '')
elif input_path.endswith('History.plist'):
ReadHistoryPlist(plist, safari_items, input_path, '')
elif input_path.endswith('Downloads.plist'):
ReadDownloadsPlist(plist, safari_items, input_path, '')
elif input_path.endswith('Bookmarks.plist'):
ReadBookmarksPlist(plist, safari_items, input_path, '')
elif input_path.endswith('TopSites.plist'):
ReadTopSitesPlist(plist, safari_items, input_path, '')
elif input_path.endswith('LastSession.plist'):
ReadLastSessionPlist(plist, safari_items, input_path, '')
elif input_path.endswith('Extensions.plist') and not input_path.endswith('KnownExtensions.plist'):
ReadExtensionsPlist(plist, safari_items, input_path, '')
elif input_path.endswith('RecentlyClosedTabs.plist'):
ReadRecentlyClosedTabsPlist(plist, safari_items, input_path, '')
else:
log.error("Unknown plist type encountered: {}".format(os.path.basename(input_path)))
else:
log.error(f'Failed to read plist: {os.path.basename(input_path)} : {error}')
except ValueError as ex:
log.exception('Failed to open file: {}'.format(input_path))
elif input_path.endswith('History.db'):
log.info ("Processing file " + input_path)
try:
conn = CommonFunctions.open_sqlite_db_readonly(input_path)
log.debug ("Opened database successfully")
ReadHistoryDb(conn, safari_items, input_path, '')
except (sqlite3.Error, OSError) as ex:
log.exception ("Failed to open database, is it a valid SQLITE DB?")
elif input_path.endswith('CloudTabs.db'):
log.info ("Processing file " + input_path)
try:
conn = CommonFunctions.open_sqlite_db_readonly(input_path)
log.debug ("Opened database successfully")
ReadCloudTabsDb(conn, safari_items, input_path, '')
except (sqlite3.Error, OSError) as ex:
log.exception ("Failed to open database, is it a valid SQLITE DB?")
elif input_path.endswith('SafariTabs.db'):
log.info ("Processing file " + input_path)
try:
conn = CommonFunctions.open_sqlite_db_readonly(input_path)
log.debug ("Opened database successfully")
ReadSafariTabsDb(conn, safari_items, input_path, '')
except (sqlite3.Error, OSError) as ex:
log.exception ("Failed to open database, is it a valid SQLITE DB?")
elif input_path.endswith('BrowserState.db'):
log.info ("Processing file " + input_path)
try:
conn = CommonFunctions.open_sqlite_db_readonly(input_path)
log.debug ("Opened database successfully")
ReadBrowserStateDb(conn, safari_items, input_path, '')
except (sqlite3.Error, OSError) as ex:
log.exception ("Failed to open database, is it a valid SQLITE DB?")
else:
log.error('Input file {} is not a recognized name of a Safari artifact!'.format(input_path))
if len(safari_items) > 0:
PrintAll(safari_items, output_params, input_path)
else:
log.info('No safari items found in {}'.format(input_path))
def Plugin_Start_Ios(ios_info):
'''Entry point for ios_apt plugin'''
safari_items = []
for app in ios_info.apps:
if app.bundle_display_name.lower() == "safari":
log.debug(f'Safari version {app.bundle_version} found at {app.sandbox_path}')
safari_plist_path = f'{app.sandbox_path}/Library/Preferences/com.apple.mobilesafari.plist'
if ios_info.IsValidFilePath(safari_plist_path):
ProcessSafariPlist(ios_info, safari_plist_path, 'mobile', safari_items, ReadSafariPlist)
break
source_path = '/private/var/mobile/Library/Safari'
if ios_info.IsValidFolderPath(source_path):
ReadDbFromImage(ios_info, source_path + '/History.db', 'mobile', safari_items, ReadHistoryDb, 'safari History')
ReadDbFromImage(ios_info, source_path + '/CloudTabs.db', 'mobile', safari_items, ReadCloudTabsDb, 'safari CloudTabs')
ReadDbFromImage(ios_info, source_path + '/SafariTabs.db', 'mobile', safari_items, ReadSafariTabsDb, 'safari Tabs')
ReadDbFromImage(ios_info, source_path + '/BrowserState.db', 'mobile', safari_items, ReadBrowserStateDb, 'safari BrowserState')
if len(safari_items) > 0:
PrintAll(safari_items, ios_info.output_params, '')
else:
log.info('No safari items were found!')
if __name__ == '__main__':
print ("This plugin is a part of a framework and does not run independently on its own!") | mit | e931c2462a8d17c6358c545d4d2c56ea | 48.736048 | 180 | 0.58816 | 3.988147 | false | false | false | false |
hungpham2511/toppra | toppra/algorithm/reachabilitybased/time_optimal_algorithm.py | 1 | 3023 | from .reachability_algorithm import ReachabilityAlgorithm
import logging
import numpy as np
logger = logging.getLogger(__name__)
class TOPPRA(ReachabilityAlgorithm):
"""Time-Optimal Path Parameterization based on Reachability
Analysis (TOPPRA).
Examples
-----------
>>> instance = algo.TOPPRA([pc_vel, pc_acc], path)
>>> jnt_traj = instance.compute_trajectory() # rest-to-rest motion
>>> instance.problem_data # intermediate result
Parameters
----------
constraint_list: List[:class:`~toppra.constraint.Constraint`]
List of constraints to which the robotic system is subjected to.
path: :class:`.AbstractGeometricPath`
Input geometric path.
gridpoints: Optional[np.ndarray]
Gridpoints for discretization of the geometric path. The start
and end points must agree with the geometric path's
`path_interval`. If omited a gridpoint will be automatically
selected.
solver_wrapper: str, optional
Name of the solver wrapper to use. Possible value are:
- 'seidel'
- 'hotqpoases'
For more details see the solverwrappers documentation.
parametrizer: str, optional
Name of the output parametrizer to use.
Notes
-----
In addition to the given constraints, there are additional
constraints on the solutions enforced by the solver-wrapper.
Therefore, different parametrizations are returned for different
solver wrappers. However, the difference should be very small,
especially for well-conditioned problems.
See also
--------
:class:`toppra.solverwrapper.seidelWrapper`
:class:`toppra.solverwrapper.hotqpOASESSolverWrapper`
"""
def _forward_step(self, i, x, K_next):
""" Compute the highest possible path velocity that is controllable.
Parameters
----------
i: int
Current stage index
x: float
The squared velocity at the current stage.
K_next: list
The lower and upper bounds of the set of controllable squared velocities
in the next stage.
eps: float, optional
A numerical constant to avoid propagating numerical errors.
Returns
-------
optim_var: array
Optimal variable, which has this format (u, x, v).
If this step fails, `optim_var` contains only nans.
"""
# Immediate return
if None in K_next or i < 0 or i > self._N or np.isnan(x) or x is None:
return np.array([np.nan, np.nan])
nV = self.solver_wrapper.get_no_vars()
g_upper = np.zeros(nV)
g_upper[1] = -1
g_upper[0] = -2 * self.solver_wrapper.get_deltas()[i]
# Account for propagating numerical errors
K_next_max = K_next[1]
K_next_min = K_next[0]
optim_var = self.solver_wrapper.solve_stagewise_optim(
i, None, g_upper, x, x, K_next_min, K_next_max
)
return optim_var
| mit | dbca400b757f50203b62a48b95fc8320 | 31.858696 | 84 | 0.631161 | 4.090663 | false | false | false | false |
hungpham2511/toppra | examples/plot_straight_line.py | 1 | 1994 | """
Retime a straight path
===============================
"""
import toppra as ta
import toppra.constraint as constraint
import toppra.algorithm as algo
import numpy as np
import matplotlib.pyplot as plt
import time
time.sleep(0.1)
################################################################################
way_pts, vel_limits, accel_limits = np.array([[0, 0, 1], [0.2, 0.3, 0]]), np.array([0.1, 0.2, 0.3]), np.r_[1.0,2,3]
path_scalars = np.linspace(0, 1, len(way_pts))
path = ta.SplineInterpolator(path_scalars, way_pts)
ss = np.linspace(0, 1, 100)
qs = path(np.linspace(0, 1, 100))
for i in range(way_pts.shape[1]):
plt.plot(ss, qs[:, i])
plt.show()
################################################################################
# Create velocity bounds, then velocity constraint object
vlim = np.vstack((-vel_limits, vel_limits)).T
# Create acceleration bounds, then acceleration constraint object
alim = np.vstack((-accel_limits, accel_limits)).T
pc_vel = constraint.JointVelocityConstraint(vlim)
pc_acc = constraint.JointAccelerationConstraint(
alim, discretization_scheme=constraint.DiscretizationType.Interpolation)
# Setup a parametrization instance. The keyword arguments are
# optional.
instance = algo.TOPPRA([pc_vel, pc_acc], path, solver_wrapper='seidel')
jnt_traj = instance.compute_trajectory(0, 0)
################################################################################
ts_sample = np.linspace(0, jnt_traj.get_duration(), 100)
qs_sample = jnt_traj.eval(ts_sample) # sampled joint positions
qds_sample = jnt_traj.evald(ts_sample) # sampled joint velocities
qdds_sample = jnt_traj.evaldd(ts_sample) # sampled joint accelerations
for i in range(jnt_traj.dof):
# plot the i-th joint trajectory
plt.plot(ts_sample, qds_sample[:, i], c="C{:d}".format(i))
# plot the i-th joint waypoints
# plt.plot(data['t_waypts'], way_pts[:, i], 'x', c="C{:d}".format(i))
plt.xlabel("Time (s)")
plt.ylabel("Joint velocity (rad/s^2)")
plt.show()
| mit | e36f69d72c37aac9f422c445b61ca11b | 35.925926 | 115 | 0.611836 | 3.252855 | false | false | false | false |
hungpham2511/toppra | tests/tests/solverwrapper/test_basic_can_linear.py | 1 | 8575 | """A test suite for solverwrappers that implement solve methods for
canonical linear constraints. Wrapppers considered include:
'cvxpy', 'qpOASES', "ecos", 'hotqpOASES', 'seidel'.
"""
import pytest
import numpy as np
import numpy.testing as npt
import toppra
import toppra.constraint as constraint
import cvxpy
from ..testing_flags import FOUND_CXPY, FOUND_MOSEK, FOUND_OPENRAVEPY
toppra.setup_logging(level="INFO")
class RandomSecondOrderLinearConstraint(constraint.linear_constraint.LinearConstraint):
"""A random Second-Order non-identical constraint.
This contraint is defined solely for testing purposes. It accepts
a degree of freedom, then generates the coefficient randomly.
"""
def __init__(self, dof, discretization_scheme=constraint.DiscretizationType.Collocation):
super(RandomSecondOrderLinearConstraint, self).__init__()
self.dof = dof
self.set_discretization_type(discretization_scheme)
self.identical = False
self._format_string = " Random Second-Order constraint (dof={:d}) \n".format(
self.dof)
def compute_constraint_params(self, path, gridpoints):
N = gridpoints.shape[0] - 1
a = np.random.randn(N + 1, self.dof)
b = np.random.randn(N + 1, self.dof)
c = np.random.randn(N + 1, self.dof)
F = np.random.randn(N + 1, self.dof, self.dof)
g = np.random.rand(N + 1, self.dof)
for i in range(N + 1):
g[i] += F[i].dot(c[i])
if self.discretization_type == constraint.DiscretizationType.Collocation:
return a, b, c, F, g, None, None
elif self.discretization_type == constraint.DiscretizationType.Interpolation:
return constraint.canlinear_colloc_to_interpolate(
a, b, c, F, g, None, None, gridpoints, identical=False)
else:
raise NotImplementedError("Other form of discretization not supported!")
@pytest.fixture(scope='class', params=['vel_accel'])
def basic_init_fixture(request):
""" A fixture for testing basic capability of the solver wrapper.
This test case has only two constraints, one velocity constraint
and one acceleration constraint.
"""
dof = 6
np.random.seed(1) # Use the same randomly generated way pts
way_pts = np.random.randn(4, dof) * 0.6
N = 200
path = toppra.SplineInterpolator(np.linspace(0, 1, 4), way_pts)
ss = np.linspace(0, 1, N + 1)
# Velocity Constraint
vlim_ = np.random.rand(dof) * 10 + 10
vlim = np.vstack((-vlim_, vlim_)).T
pc_vel = constraint.JointVelocityConstraint(vlim)
# Acceleration Constraints
alim_ = np.random.rand(dof) * 10 + 100
alim = np.vstack((-alim_, alim_)).T
pc_acc = constraint.JointAccelerationConstraint(alim)
# random Second Order Constraint, only use for testing
pc_rand = RandomSecondOrderLinearConstraint(dof)
pcs = [pc_vel, pc_acc, pc_rand]
yield pcs, path, ss, vlim, alim
print("\n [TearDown] Finish PP Fixture")
@pytest.mark.parametrize("solver_name", ['cvxpy', 'qpOASES', "ecos", 'hotqpOASES', 'seidel'])
@pytest.mark.parametrize("i", [3, 10, 30])
@pytest.mark.parametrize("H", [np.array([[1.5, 0], [0, 1.0]]), np.zeros((2, 2)), None])
@pytest.mark.parametrize("g", [np.array([0.2, -1]), np.array([0.5, 1]), np.array([2.0, 1])])
@pytest.mark.parametrize("x_ineq", [(0.1, 1), (0.2, 0.2), (0.4, 0.3), (np.nan, np.nan)])
@pytest.mark.skipif(not FOUND_CXPY, reason="This test requires cvxpy to validate results.")
def test_basic_correctness(basic_init_fixture, solver_name, i, H, g, x_ineq):
"""Basic test case for solver wrappers.
The input fixture `basic_init_fixture` has two constraints, one
velocity and one acceleration. Hence, in this test, I directly
formulate an optimization with cvxpy and compare the result with
the result obtained from the solver wrapper.
"""
constraints, path, path_discretization, vlim, alim = basic_init_fixture
if solver_name == "cvxpy":
from toppra.solverwrapper.cvxpy_solverwrapper import cvxpyWrapper
solver = cvxpyWrapper(constraints, path, path_discretization)
elif solver_name == 'qpOASES':
from toppra.solverwrapper.qpoases_solverwrapper import qpOASESSolverWrapper
solver = qpOASESSolverWrapper(constraints, path, path_discretization)
elif solver_name == 'hotqpOASES':
from toppra.solverwrapper.hot_qpoases_solverwrapper import hotqpOASESSolverWrapper
solver = hotqpOASESSolverWrapper(constraints, path, path_discretization)
elif solver_name == 'ecos' and H is None:
from toppra.solverwrapper.ecos_solverwrapper import ecosWrapper
solver = ecosWrapper(constraints, path, path_discretization)
elif solver_name == 'seidel' and H is None:
from toppra.solverwrapper.cy_seidel_solverwrapper import seidelWrapper
solver = seidelWrapper(constraints, path, path_discretization)
else:
return True # Skip all other tests
xmin, xmax = x_ineq
xnext_min = 0
xnext_max = 1
# Results from solverwrapper to test
solver.setup_solver()
result_ = solver.solve_stagewise_optim(i - 2, H, g, xmin, xmax, xnext_min, xnext_max)
result_ = solver.solve_stagewise_optim(i - 1, H, g, xmin, xmax, xnext_min, xnext_max)
solverwrapper_result = solver.solve_stagewise_optim(i, H, g, xmin, xmax, xnext_min, xnext_max)
solver.close_solver()
# Results from cvxpy, used as the actual, desired values
ux = cvxpy.Variable(2)
u = ux[0]
x = ux[1]
_, _, _, _, _, _, xbound = solver.params[0] # vel constraint
a, b, c, F, h, ubound, _ = solver.params[1] # accel constraint
a2, b2, c2, F2, h2, _, _ = solver.params[2] # random constraint
Di = path_discretization[i + 1] - path_discretization[i]
v = a[i] * u + b[i] * x + c[i]
v2 = a2[i] * u + b2[i] * x + c2[i]
cvxpy_constraints = [
x <= xbound[i, 1],
x >= xbound[i, 0],
F * v <= h,
F2[i] * v2 <= h2[i],
x + u * 2 * Di <= xnext_max,
x + u * 2 * Di >= xnext_min,
]
if not np.isnan(xmin):
cvxpy_constraints.append(x <= xmax)
cvxpy_constraints.append(x >= xmin)
if H is not None:
objective = cvxpy.Minimize(0.5 * cvxpy.quad_form(ux, H) + g * ux)
else:
objective = cvxpy.Minimize(g * ux)
problem = cvxpy.Problem(objective, cvxpy_constraints)
problem.solve(verbose=True) # test with the same solver as cvxpywrapper
if problem.status == "optimal":
cvxpy_result = np.array(ux.value).flatten()
solverwrapper_result = np.array(solverwrapper_result).flatten()
npt.assert_allclose(solverwrapper_result, cvxpy_result, atol=5e-2, rtol=1e-5) # Very bad accuracy? why?
else:
assert np.all(np.isnan(solverwrapper_result))
@pytest.mark.parametrize("solver_name", ['cvxpy', 'qpOASES', 'ecos', 'hotqpOASES', 'seidel'])
def test_infeasible_instance(basic_init_fixture, solver_name):
"""If the given parameters are infeasible, the solverwrapper should
terminate gracefully and return a numpy vector [nan, nan].
"""
constraints, path, path_discretization, vlim, alim = basic_init_fixture
if solver_name == "cvxpy":
from toppra.solverwrapper.cvxpy_solverwrapper import cvxpyWrapper
solver = cvxpyWrapper(constraints, path, path_discretization)
elif solver_name == 'qpOASES':
from toppra.solverwrapper.qpoases_solverwrapper import qpOASESSolverWrapper
solver = qpOASESSolverWrapper(constraints, path, path_discretization)
elif solver_name == 'hotqpOASES':
from toppra.solverwrapper.hot_qpoases_solverwrapper import hotqpOASESSolverWrapper
solver = hotqpOASESSolverWrapper(constraints, path, path_discretization)
elif solver_name == 'ecos':
from toppra.solverwrapper.ecos_solverwrapper import ecosWrapper
solver = ecosWrapper(constraints, path, path_discretization)
elif solver_name == 'seidel':
from toppra.solverwrapper.cy_seidel_solverwrapper import seidelWrapper
solver = seidelWrapper(constraints, path, path_discretization)
g = np.r_[0, 1].astype(float)
solver.setup_solver()
result = solver.solve_stagewise_optim(0, None, g, 1.1, 1.0, np.nan, np.nan)
assert np.all(np.isnan(result))
result = solver.solve_stagewise_optim(0, None, g, 1.1, 1.0, 0, -0.5)
assert np.all(np.isnan(result))
result = solver.solve_stagewise_optim(0, None, g, np.nan, np.nan, 0, -0.5)
assert np.all(np.isnan(result))
solver.close_solver()
| mit | 213549fde43ca418910a584b64fdb729 | 43.201031 | 112 | 0.668105 | 3.199627 | false | true | false | false |
ydkhatri/mac_apt | plugins/helpers/hfs_alt.py | 1 | 22333 |
'''
Copyright 2011 Jean-Baptiste B'edrune, Jean Sigwald
Using New BSD License:
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
#
# This code has since been edited to improve HFS parsing, add lzvn/lzfse support
# and is now a part of the mac_apt framework
#
import os
import mmap
import sys
import struct
import tempfile
import zlib
import pytsk3
import logging
from plugins.helpers.common import CommonFunctions
from plugins.helpers.btree import AttributesTree, CatalogTree, ExtentsOverflowTree
from plugins.helpers.structs import *
log = logging.getLogger('MAIN.HELPERS.HFS_ALT')
lzfse_capable = False
try:
import liblzfse
lzfse_capable = True
except ImportError:
print("liblzfse not found. Won't decompress lzfse/lzvn streams")
def write_file(filename,data):
f = open(filename, "wb")
f.write(data)
f.close()
def lzvn_decompress(compressed_stream, compressed_size, uncompressed_size): #TODO: Move to a class!
'''Adds Prefix and Postfix bytes as required by decompressor,
then decompresses and returns uncompressed bytes buffer
'''
header = b'bvxn' + struct.pack('<I', uncompressed_size) + struct.pack('<I', compressed_size)
footer = b'bvx$'
return liblzfse.decompress(header + compressed_stream + footer)
class HFSFile(object):
def __init__(self, volume, hfsplusfork, fileID, deleted=False):
self.volume = volume
self.blockSize = volume.blockSize
self.fileID = fileID
self.totalBlocks = hfsplusfork.totalBlocks
self.logicalSize = hfsplusfork.logicalSize
self.extents = []
self.deleted = deleted
b = 0
for extent in hfsplusfork.HFSPlusExtentDescriptor:
self.extents.append(extent)
b += extent.blockCount
while b != hfsplusfork.totalBlocks:
#log.debug("extents overflow {}".format(b))
k,v = volume.getExtentsOverflowForFile(fileID, b)
if not v:
log.debug("extents overflow missing, startblock={}".format(b))
break
for extent in v:
self.extents.append(extent)
b += extent.blockCount
def copyOutFile(self, outputfile, truncate=True):
f = open(outputfile, "wb")
for i in range(self.totalBlocks):
f.write(self.readBlock(i))
if truncate:
f.truncate(self.logicalSize)
f.close()
'''def readAllBuffer(self, truncate=True):
r = b""
for i in range(self.totalBlocks):
r += self.readBlock(i)
if truncate:
r = r[:self.logicalSize]
return r
'''
def readAllBuffer(self, truncate=True, output_file=None):
'''Write to output_file if valid, else return a buffer of data.
Warning: If file size > 200 MiB, b'' is returned, file data is only written to output_file.
'''
r = b""
bs = self.volume.blockSize
blocks_max = 52428800 // bs # 50MB
for extent in self.extents:
if extent.blockCount == 0: continue
#if not self.deleted and self.fileID != kHFSAllocationFileID and not self.volume.isBlockInUse(lba):
# log.debug("FAIL, block "0x{:x}" not marked as used".format(n))
if extent.blockCount > blocks_max:
counter = blocks_max
remaining_blocks = extent.blockCount
start_address = extent.startBlock * bs
while remaining_blocks > 0:
num_blocks_to_read = min(blocks_max, remaining_blocks)
size = num_blocks_to_read * bs
data = self.volume.read(start_address, size)
if output_file:
output_file.write(data)
elif self.logicalSize < 209715200: # 200MiB
r += data
remaining_blocks -= num_blocks_to_read
start_address += size
else:
data = self.volume.read(extent.startBlock * bs, bs * extent.blockCount)
if output_file:
output_file.write(data)
elif self.logicalSize < 209715200: # 200MiB
r += data
if truncate:
if output_file:
output_file.truncate(self.logicalSize)
elif self.logicalSize < 209715200: # 200MiB
r = r[:self.logicalSize]
return r
def processBlock(self, block, lba):
return block
def readBlock(self, n):
bs = self.volume.blockSize
if n*bs > self.logicalSize:
raise ValueError("BLOCK OUT OF BOUNDS")
bc = 0
for extent in self.extents:
bc += extent.blockCount
if n < bc:
lba = extent.startBlock+(n-(bc-extent.blockCount))
if not self.deleted and self.fileID != kHFSAllocationFileID and not self.volume.isBlockInUse(lba):
raise ValueError("FAIL, block %x not marked as used" % n)
return self.processBlock(self.volume.read(lba*bs, bs), lba)
return b""
class HFSCompressedResourceFork(HFSFile):
def __init__(self, volume, hfsplusfork, fileID, compression_type, uncompressed_size):
super(HFSCompressedResourceFork,self).__init__(volume, hfsplusfork, fileID)
block0 = self.readBlock(0)
self.compression_type = compression_type
self.uncompressed_size = uncompressed_size
if compression_type in [8, 12]: # 8 is lzvn, 12 is lzfse
#only tested for 8
self.header = HFSPlusCmpfLZVNRsrcHead.parse(block0)
#print(self.header)
else:
self.header = HFSPlusCmpfRsrcHead.parse(block0)
#print(self.header)
self.blocks = HFSPlusCmpfRsrcBlockHead.parse(block0[self.header.headerSize:])
log.debug("HFSCompressedResourceFork numBlocks:{}".format(self.blocks.numBlocks))
#HAX, readblock not implemented
def readAllBuffer(self, truncate=True, output_file=None):
'''Warning: If output size > 200 MiB, b'' is returned, file data is only written to output_file.'''
if self.compression_type in [7, 8, 11, 12] and not lzfse_capable:
raise ValueError('LZFSE/LZVN compression detected, no decompressor available!')
if self.logicalSize >= 209715200:
temp_file = tempfile.SpooledTemporaryFile(209715200)
super(HFSCompressedResourceFork, self).readAllBuffer(True, temp_file)
temp_file.seek(0)
buff = mmap.mmap(temp_file.fileno(), 0) # memory mapped file to access as buffer
else:
buff = super(HFSCompressedResourceFork, self).readAllBuffer()
r = b""
if self.compression_type in [7, 11]: # lzvn or lzfse # Does it ever go here????
raise ValueError("Did not expect type " + str(self.compression_type) + " in resource fork")
try:
# The following is only for lzvn, not encountered lzfse yet!
data_start = self.header.headerSize
compressed_stream = buff[data_start:self.header.totalSize]
decompressed = lzvn_decompress(compressed_stream, self.header.totalSize - self.header.headerSize, self.uncompressed_size)
if output_file: output_file.write(decompressed)
elif self.uncompressed_size < 209715200: r += decompressed
except liblzfse.error as ex:
raise ValueError("Exception from lzfse_lzvn decompressor")
elif self.compression_type in [8, 12]: # lzvn or lzfse in 64k chunks
try:
# The following is only for lzvn, not encountered lzfse yet!
full_uncomp = self.uncompressed_size
chunk_uncomp = 65536
i = 0
src_offset = self.header.headerSize
for offset in self.header.chunkOffsets:
compressed_size = offset - src_offset
data = buff[src_offset:offset] #input_file.read(compressed_size)
src_offset = offset
if full_uncomp <= 65536:
chunk_uncomp = full_uncomp
else:
chunk_uncomp = 65536
if len(self.header.chunkOffsets) == i + 1: # last chunk
chunk_uncomp = full_uncomp - (65536 * i)
if chunk_uncomp < compressed_size and data[0] == 0x06:
decompressed = data[1:]
else:
decompressed = lzvn_decompress(data, compressed_size, chunk_uncomp)
if output_file: output_file.write(decompressed)
elif self.uncompressed_size < 209715200: r += decompressed
i += 1
except liblzfse.error as ex:
raise ValueError("Exception from lzfse_lzvn decompressor")
else:
base = self.header.headerSize + 4
for b in self.blocks.HFSPlusCmpfRsrcBlockArray:
decompressed = zlib.decompress(buff[base+b.offset:base+b.offset+b.size])
if output_file: output_file.write(decompressed)
elif self.uncompressed_size < 209715200: r += decompressed
if self.logicalSize >= 209715200:
mmap.close()
temp_file.close()
return r
class HFSVolume(object):
def __init__(self, pytsk_image, offset=0):
self.img = pytsk_image
self.offset = offset
try:
data = self.read(0, 0x1000)
self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
assert self.header.signature == 0x4858 or self.header.signature == 0x482B
except AssertionError:
raise ValueError("Not an HFS+ image")
#self.is_hfsx = self.header.signature == 0x4858
self.blockSize = self.header.blockSize
self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID)
self.allocationBitmap = self.allocationFile.readAllBuffer()
self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID)
self.extentsTree = ExtentsOverflowTree(self.extentsFile)
self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID)
self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID)
self.catalogTree = CatalogTree(self.catalogFile)
self.xattrTree = AttributesTree(self.xattrFile)
self.hasJournal = self.header.attributes & (1 << kHFSVolumeJournaledBit)
def read(self, offset, size):
return self.img.read(self.offset + offset, size)
def volumeID(self):
return struct.pack(">LL", self.header.finderInfo[6], self.header.finderInfo[7])
def isBlockInUse(self, block):
thisByte = self.allocationBitmap[block // 8]
return (thisByte & (1 << (7 - (block % 8)))) != 0
def unallocatedBlocks(self):
for i in range(self.header.totalBlocks):
if not self.isBlockInUse(i):
yield i, self.read(i*self.blockSize, self.blockSize)
def getExtentsOverflowForFile(self, fileID, startBlock, forkType=kForkTypeData):
return self.extentsTree.searchExtents(fileID, forkType, startBlock)
def getXattr(self, fileID, name):
return self.xattrTree.searchXattr(fileID, name)
def getFileByPath(self, path):
return self.catalogTree.getRecordFromPath(path)
def getFinderDateAdded(self, path):
k,v = self.catalogTree.getRecordFromPath(path)
if k and v.recordType == kHFSPlusFileRecord:
return v.data.ExtendedFileInfo.finderDateAdded
elif k and v.recordType == kHFSPlusFolderRecord:
return v.data.ExtendedFolderInfo.finderDateAdded
return 0
def listFolderContents(self, path):
k,v = self.catalogTree.getRecordFromPath(path)
if not k or v.recordType != kHFSPlusFolderRecord:
return
for k,v in self.catalogTree.getFolderContents(v.data.folderID):
if v.recordType == kHFSPlusFolderRecord:
print(v.data.folderID, getString(k) + "/")
elif v.recordType == kHFSPlusFileRecord:
print(v.data.fileID, getString(k))
def listFinderData(self, path):
'''Returns finder data'''
finder_data = {}
k,v = self.catalogTree.getRecordFromPath(path)
date_added = 0
if k and v.recordType == kHFSPlusFileRecord:
date_added = v.data.ExtendedFileInfo.finderDateAdded
if v.data.FileInfo.fileType: finder_data['fileType'] = v.data.FileInfo.fileType
if v.data.FileInfo.fileCreator: finder_data['fileCreator'] = v.data.FileInfo.fileCreator
if v.data.FileInfo.finderFlags: finder_data['finderFlags'] = v.data.FileInfo.finderFlags
if v.data.ExtendedFileInfo.extendedFinderFlags: finder_data['extendedFinderFlags'] = v.data.ExtendedFileInfo.extendedFinderFlags
elif k and v.recordType == kHFSPlusFolderRecord:
date_added = v.data.ExtendedFolderInfo.finderDateAdded
if v.data.FolderInfo.finderFlags: finder_data['FinderFlags'] = v.data.FolderInfo.finderFlags
if v.data.ExtendedFolderInfo.extendedFinderFlags: finder_data['extendedFinderFlags'] = v.data.ExtendedFolderInfo.extendedFinderFlags
if date_added: finder_data['DateAdded'] = date_added
return finder_data
def getCnidForPath(self, path):
k,v = self.catalogTree.getRecordFromPath(path)
if not v:
raise ValueError("Path not found")
if k and v.recordType == kHFSPlusFileRecord:
return v.data.fileID
elif k and v.recordType == kHFSPlusFolderThreadRecord:
return v.data.folderID
def getXattrsByPath(self, path):
file_id = self.getCnidForPath(path)
return self.xattrTree.getAllXattrs(file_id)
def getXattrByPath(self, path, name):
file_id = self.getCnidForPath(path)
return self.getXattr(file_id, name)
''' Compression type in Xattr as per apple:
Source: https://opensource.apple.com/source/copyfile/copyfile-138/copyfile.c.auto.html
case 3: /* zlib-compressed data in xattr */
case 4: /* 64k chunked zlib-compressed data in resource fork */
case 7: /* LZVN-compressed data in xattr */
case 8: /* 64k chunked LZVN-compressed data in resource fork */
case 9: /* uncompressed data in xattr (similar to but not identical to CMP_Type1) */
case 10: /* 64k chunked uncompressed data in resource fork */
case 11: /* LZFSE-compressed data in xattr */
case 12: /* 64k chunked LZFSE-compressed data in resource fork */
/* valid compression type, we want to copy. */
break;
case 5: /* specifies de-dup within the generation store. Don't copy decmpfs xattr. */
copyfile_debug(3, "compression_type <5> on attribute com.apple.decmpfs for src file %s is not copied.",
s->src ? s->src : "(null string)");
continue;
case 6: /* unused */
'''
def readFile(self, path, output_file=None):
'''Reads file specified by 'path' and copies it out into output_file if valid, else returns as string.
Warning: If file is too large, over 200 MiB, then it will return b'', and only write to output_file.
'''
k,v = self.catalogTree.getRecordFromPath(path)
if not v:
raise ValueError("File not found")
data = b''
assert v.recordType == kHFSPlusFileRecord
xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
if xattr:
decmpfs = HFSPlusDecmpfs.parse(xattr)
log.debug("decmpfs.compression_type={}".format(str(decmpfs.compression_type)))
if decmpfs.compression_type == 1:
data = xattr[16:]
if output_file: output_file.write(data)
elif decmpfs.compression_type == 3:
if decmpfs.uncompressed_size == len(xattr) - 16:
data = xattr[16:]
else:
data = zlib.decompress(xattr[16:])
if output_file: output_file.write(data)
elif decmpfs.compression_type == 4:
f = HFSCompressedResourceFork(self, v.data.resourceFork, v.data.fileID, decmpfs.compression_type, decmpfs.uncompressed_size)
data = f.readAllBuffer(True, output_file)
elif decmpfs.compression_type in [7, 11]:
if xattr[16] == 0x06: # perhaps even 0xF?
data = xattr[17:] #tested OK
else: #tested OK
uncompressed_size = struct.unpack('<I', xattr[8:12])[0]
compressed_size = len(xattr) - 16
compressed_stream = xattr[16:]
data = lzvn_decompress(compressed_stream, compressed_size, uncompressed_size)
if output_file: output_file.write(data)
elif decmpfs.compression_type in [8, 12]:
# tested for type 8 , OK
f = HFSCompressedResourceFork(self, v.data.resourceFork, v.data.fileID, decmpfs.compression_type, decmpfs.uncompressed_size)
data = f.readAllBuffer(True, output_file)
if output_file: output_file.write(data)
else:
f = HFSFile(self, v.data.dataFork, v.data.fileID)
data = f.readAllBuffer(True, output_file)
return data
def readJournal(self):
jb = self.read(self.header.journalInfoBlock * self.blockSize, self.blockSize)
jib = JournalInfoBlock.parse(jb)
return self.read(jib.offset,jib.size)
def GetFileMACTimesFromFileRecord(self, v):
times = { 'c_time':None, 'm_time':None, 'cr_time':None, 'a_time':None }
catalog_file = v.data
times['c_time'] = CommonFunctions.ReadMacHFSTime(catalog_file.attributeModDate)
times['m_time'] = CommonFunctions.ReadMacHFSTime(catalog_file.contentModDate)
times['cr_time'] = CommonFunctions.ReadMacHFSTime(catalog_file.createDate)
times['a_time'] = CommonFunctions.ReadMacHFSTime(catalog_file.accessDate)
return times
def GetFileMACTimes(self, file_path):
'''
Returns dictionary {c_time, m_time, cr_time, a_time}
where cr_time = created time and c_time = Last time inode/mft modified
'''
k,v = self.catalogTree.getRecordFromPath(file_path)
if k and v.recordType in (kHFSPlusFileRecord, kHFSPlusFolderRecord):
return self.GetFileMACTimesFromFileRecord(v)
raise ValueError("Path not found or not file/folder!")
def IsValidFilePath(self, path):
'''Check if a file path is valid, does not check for folders!'''
k,v = self.catalogTree.getRecordFromPath(path)
if not v:
return False
return v.recordType == kHFSPlusFileRecord #TODO: Check for hard links , sym links?
def IsValidFolderPath(self, path):
'''Check if a folder path is valid'''
k,v = self.catalogTree.getRecordFromPath(path)
if not v:
return False
return v.recordType == kHFSPlusFolderRecord #TODO: Check for hard links , sym links?
def IsSymbolicLink(self, path):
'''Check if a path points to a file/folder or symbolic link'''
mode = self.GetFileMode(path)
if mode:
return (mode & S_IFLNK) == S_IFLNK
return False
def GetFileSizeFromFileRecord(self, v):
xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
if xattr:
decmpfs = HFSPlusDecmpfs.parse(xattr)
return decmpfs.uncompressed_size #TODO verify for all cases!
else:
return v.data.dataFork.logicalSize
def GetFileSize(self, path):
'''For a given file path, gets logical file size'''
k,v = self.catalogTree.getRecordFromPath(path)
if k and v.recordType == kHFSPlusFileRecord:
return self.GetFileSizeFromFileRecord(v)
else:
raise ValueError("Path not found")
def GetUserAndGroupID(self, path):
k,v = self.catalogTree.getRecordFromPath(path)
if k and v.recordType in (kHFSPlusFileRecord, kHFSPlusFolderRecord):
return (v.data.HFSPlusBSDInfo.ownerID, v.data.HFSPlusBSDInfo.groupID)
else:
raise ValueError("Path not found")
def GetFileMode(self, path):
'''Returns the file or folder's fileMode '''
k,v = self.catalogTree.getRecordFromPath(path)
if k and v and v.recordType in (kHFSPlusFileRecord, kHFSPlusFolderRecord):
return v.data.HFSPlusBSDInfo.fileMode
else:
raise ValueError("Path not found or not a file/folder") | mit | 9270e420ea4fdde70b2c5bbee01a2f13 | 46.927039 | 755 | 0.62889 | 3.870537 | false | false | false | false |
hungpham2511/toppra | tests/tests/interpolators/test_find_gridpoints.py | 1 | 1640 | import toppra
import toppra.interpolator
import numpy as np
import matplotlib.pyplot as plt
import pytest
@pytest.fixture(params=[[0, 1], [1.5, 2.7]])
def path(request):
start, end = request.param
waypoints = [[0, 0.3, 0.5], [1, 2, 3], [0.0, 0.1, 0.2], [0, 0.5, 0]]
ss = np.linspace(start, end, len(waypoints))
path = toppra.interpolator.SplineInterpolator(ss, waypoints)
yield path, waypoints
def test_basic_usage(path):
path, waypoints = path
gridpoints_ept = toppra.interpolator.propose_gridpoints(path, 1e-2)
assert gridpoints_ept[0] == path.path_interval[0]
assert gridpoints_ept[-1] == path.path_interval[1]
# The longest segment should be smaller than 0.1. This is to
# ensure a reasonable response.
assert np.max(np.diff(gridpoints_ept)) < 0.05
# # visualize ###############################################################
# ss_full = np.linspace(path.path_interval[0], path.path_interval[1], 100)
# for i in range(len(waypoints[0])):
# plt.plot(ss_full, path(ss_full)[:, i], '--', c='C%d' % i)
# plt.plot(gridpoints_ept, path(gridpoints_ept)[:, i], '-o', c='C%d' % i)
# plt.show()
def test_number_of_points_(path):
path, waypoints = path
gridpoints_ept = toppra.interpolator.propose_gridpoints(path, 1.0, min_nb_points=100) # large bounds
assert len(gridpoints_ept) > 100
def test_hard_path_difficult_to_approximate_within_iterations(path):
"""The given setting makes the approximation fails."""
path, _ = path
with pytest.raises(ValueError):
toppra.interpolator.propose_gridpoints(path, max_iteration=2)
| mit | 93a1d42c3f1c277478ee2f2131cb655f | 35.444444 | 106 | 0.635366 | 3.065421 | false | true | false | false |
ydkhatri/mac_apt | plugins/dockitems.py | 1 | 6877 | '''
Copyright (c) 2018 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
dockitems.py
---------------
Reads the dock plist file for each user.
'''
import logging
from plugins.helpers.common import CommonFunctions
from plugins.helpers.macinfo import *
from plugins.helpers.writer import *
__Plugin_Name = "DOCKITEMS"
__Plugin_Friendly_Name = "Dock Items"
__Plugin_Version = "1.0"
__Plugin_Description = "Reads the Dock plist for every user"
__Plugin_Author = "Adam Ferrante"
__Plugin_Author_Email = "adam@ferrante.io"
__Plugin_Modes = "MACOS,ARTIFACTONLY"
__Plugin_ArtifactOnly_Usage = 'Provide the plist file located at /Users/<USER>/Library/Preferences/com.apple.dock.plist'
log = logging.getLogger('MAIN.' + __Plugin_Name) # Do not rename or remove this ! This is the logger object
#---- Do not change the variable names in above section ----#
class DockItem:
def __init__(self, file_label, parent_mod_date, file_mod_date, recent_used, file_type, file_data, guid, user, source_path):
self.file_label = file_label
if parent_mod_date and (parent_mod_date > 0xFFFFFFFF): # On High Sierra and above..
parent_mod_date = parent_mod_date & 0xFFFFFFFF # Killing upper 32 bits!
# Upper 32 bits maybe the finer resolution (microseconds?).
if file_mod_date and (file_mod_date > 0xFFFFFFFF): # On High Sierra and above..
file_mod_date = file_mod_date & 0xFFFFFFFF # Killing upper 32 bits!
self.parent_mod_date = CommonFunctions.ReadMacHFSTime(parent_mod_date)
self.file_mod_date = CommonFunctions.ReadMacHFSTime(file_mod_date)
self.recent_used = recent_used
self.file_type = file_type
self.file_path = file_data
self.guid = guid
self.user = user
self.path = source_path
def PrintAll(docks, output_params, input_path=''):
dock_info = [ ('File Label',DataType.TEXT),
('Parent Modified',DataType.TEXT),('File Modified',DataType.DATE),
('Recently Used',DataType.TEXT),
('File Type',DataType.TEXT),('File Path',DataType.TEXT),
('GUID',DataType.TEXT),
('User',DataType.TEXT),('Source',DataType.TEXT)
]
log.info (str(len(docks)) + " user dock item(s) found")
dock_list_final = []
for item in docks:
single_dock_item = [item.file_label, item.parent_mod_date, item.file_mod_date,
item.recent_used, item.file_type, item.file_path,
item.guid,
item.user, item.path
]
dock_list_final.append(single_dock_item)
WriteList("Dock Information", "Dock Items", dock_list_final, dock_info, output_params, input_path)
def GetPath(file_data):
if file_data:
path = file_data.get("_CFURLString", "")
if path.startswith("file://"):
return path[7:]
else:
return path
return ""
def GetDockItemsPlistFromImage(mac_info, plist_path):
success, plist, error = mac_info.ReadPlist(plist_path)
if success:
return plist
else:
log.error(error)
return None
def ParseDockItemsPlist(plist, docks, user_name, plist_path):
'''Parse plist and add items to docks list'''
for key in ['persistent-others', 'persistent-apps', 'recent-apps']:
if plist.get(key, None) != None:
try:
for item in plist[key]:
tile_data = item.get('tile-data', None)
if tile_data:
instance = DockItem(tile_data.get('file-label', ''),
tile_data.get('parent-mod-date', None),
tile_data.get('file-mod-date', None),
'Yes' if key=='recent-apps' else '',
tile_data.get('file-type', ''),
GetPath(tile_data.get('file-data', None)),
item.get('GUID', ''),
user_name, plist_path)
docks.append(instance)
else:
log.warning('No tile-data found!! Perhaps a newer format?')
except ValueError:
log.exception("Exception while processing {}".format(key))
else:
log.debug('Key {} not found!'.format(key))
def Plugin_Start(mac_info):
'''Main Entry point function for plugin'''
dock_items_path = '{}/Library/Preferences/com.apple.dock.plist' # PList within each users directory.
docks = []
processed_paths = []
for user in mac_info.users:
user_name = user.user_name
if user.home_dir == '/private/var/empty': continue # Optimization, nothing should be here!
elif user.home_dir == '/private/var/root': user_name = 'root' # Some other users use the same root folder, we will list such all users as 'root', as there is no way to tell
if user.home_dir in processed_paths: continue # Avoid processing same folder twice (some users have same folder! (Eg: root & daemon))
processed_paths.append(user.home_dir)
source_path = dock_items_path.format(user.home_dir) # Set a variable to the path of all user dock plist files.
if mac_info.IsValidFilePath(source_path): # Determine if the above path is valid.
mac_info.ExportFile(source_path, __Plugin_Name, user_name + "_", False)
plist = GetDockItemsPlistFromImage(mac_info, source_path)
if plist:
ParseDockItemsPlist(plist, docks, user_name, source_path)
if len(docks) > 0:
PrintAll(docks, mac_info.output_params, '')
else:
log.info('No dock items found')
def ReadDockPlistFile(input_file, docks):
success, plist, error = CommonFunctions.ReadPlist(input_file)
if success:
ParseDockItemsPlist(plist, docks, '', input_file)
else:
log.error ("Could not open plist, error was : " + error)
def Plugin_Start_Standalone(input_files_list, output_params):
log.info("Module Started as standalone")
for input_path in input_files_list:
log.debug("Input file passed was: " + input_path)
docks = []
ReadDockPlistFile(input_path, docks)
if len(docks) > 0:
PrintAll(docks, output_params, input_path)
else:
log.info('No dock items found in {}'.format(input_path))
if __name__ == '__main__':
print ("This plugin is a part of a framework and does not run independently on its own!") | mit | b00170f83420fcd32826bed0a673495c | 41.9875 | 180 | 0.58732 | 3.887507 | false | false | false | false |
hungpham2511/toppra | setup.py | 1 | 4039 | from setuptools import setup, Extension
from distutils.command.install import install
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy as np
import sys
NAME = "toppra"
with open("VERSION", "r", encoding='UTF-8') as file_:
VERSION = file_.read()
DESCR = "toppra: time-optimal parametrization of trajectories for robots subject to constraints."
with open("README.md", "r", encoding='UTF-8') as file_:
LONG_DESCRIPTION = file_.read()
URL = "https://github.com/hungpham2511/toppra"
# setup requirements
if sys.version[0] == '2':
with open("requirements.txt", "r") as f:
REQUIRES = ["scipy==0.18.0", "numpy", "matplotlib",
# only required on python2.7
"pathlib2", "enum34", "strip_hints", "typing"]
DEV_REQUIRES = [line.strip() for line in f if line.strip()]
else:
with open("requirements3.txt", "r") as f:
REQUIRES = ["scipy>0.18", "numpy", "matplotlib"]
DEV_REQUIRES = [line.strip() for line in f if line.strip()]
AUTHOR = "Hung Pham"
EMAIL = "hungpham2511@gmail.com"
LICENSE = "MIT"
SRC_DIR = "toppra"
PACKAGES = ["toppra",
"toppra.constraint",
"toppra.algorithm",
"toppra.algorithm.reachabilitybased",
"toppra.solverwrapper",
"toppra.cpp"]
ext_1 = Extension(SRC_DIR + "._CythonUtils",
[SRC_DIR + "/_CythonUtils.pyx"],
extra_compile_args=['-O1'],
libraries=[],
include_dirs=[np.get_include()])
ext_2 = Extension(SRC_DIR + ".solverwrapper.cy_seidel_solverwrapper",
[SRC_DIR + "/solverwrapper/cy_seidel_solverwrapper.pyx"],
extra_compile_args=['-O1'],
include_dirs=[np.get_include()])
EXTENSIONS = [ext_1, ext_2]
SETUP_REQUIRES = ["numpy", "cython"]
if sys.version[0] == '2' or sys.version[:3] == '3.5':
SETUP_REQUIRES = ["numpy", "cython", "strip_hints"]
# custom install command: strip type-hints before installing toppra
# for python2.7 and pthon3.5
class install2(install):
def run(self, *args, **kwargs):
# stripping
if sys.version[0] == '2' or sys.version[:3] == '3.5':
from strip_hints import strip_file_to_string
import glob
import os.path
def process_file(f):
print(os.path.abspath(f))
out = strip_file_to_string(f)
with open(f, 'w') as fh:
fh.write(out)
for f in glob.glob("%s/*/toppra/*/*.py" % self.build_base):
process_file(f)
for f in glob.glob("%s/*/toppra/*.py" % self.build_base):
process_file(f)
print(os.path.abspath("."))
print(os.path.abspath(self.build_base))
# install new files
install.run(self, *args, **kwargs)
if __name__ == "__main__":
setup(
# Dependencies installed when running `pip install .`
install_requires=REQUIRES,
setup_requires=["numpy", "cython"],
extras_require={
# Dependencies installed when running `pip install -e .[dev]`
# NOTE: This is deprecated in favour of the simpler workflow
# of installing from requirements3.txt before installing
# this pkg.
'dev': DEV_REQUIRES
},
packages=PACKAGES,
zip_safe=False,
name=NAME,
version=VERSION,
description=DESCR,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
url=URL,
license=LICENSE,
# This is used to build the Cython modules. Will be run
# automatically if not found by pip. Otherwise run
#
# python setup.py build
#
# to trigger manually.
cmdclass={"build_ext": build_ext, "install": install2},
ext_modules=cythonize(EXTENSIONS)
)
| mit | e6b6c8f9107467d8f2915fbcff9e9746 | 33.228814 | 97 | 0.572419 | 3.628931 | false | false | false | false |
ebmdatalab/openprescribing | openprescribing/pipeline/management/commands/fetch_prescribing_data.py | 2 | 1255 | import os
import requests
from django.conf import settings
from django.core.management import BaseCommand
from openprescribing.utils import mkdir_p
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("year", type=int)
parser.add_argument("month", type=int)
def handle(self, year, month, **kwargs):
rsp = requests.get(
"https://opendata.nhsbsa.net/api/3/action/package_show?id=english-prescribing-data-epd"
)
resources = rsp.json()["result"]["resources"]
urls = [
r["url"]
for r in resources
if r["name"] == "EPD_{year}{month:02d}".format(year=year, month=month)
]
assert len(urls) == 1, urls
rsp = requests.get(urls[0], stream=True)
assert rsp.ok
dir_path = os.path.join(
settings.PIPELINE_DATA_BASEDIR,
"prescribing_v2",
"{year}_{month:02d}".format(year=year, month=month),
)
mkdir_p(dir_path)
filename = "epd_{year}{month:02d}.csv".format(year=year, month=month)
with open(os.path.join(dir_path, filename), "wb") as f:
for block in rsp.iter_content(32 * 1024):
f.write(block)
| mit | 93fad071cfdd24aa7f11dfa0edac52a5 | 30.375 | 99 | 0.587251 | 3.535211 | false | false | false | false |
ebmdatalab/openprescribing | openprescribing/matrixstore/build/sort_and_merge_gzipped_csv_files.py | 2 | 4179 | import csv
import os
from pipes import quote
import subprocess
class InvalidHeaderError(Exception):
pass
def sort_and_merge_gzipped_csv_files(
# CSV files to sort (which may or may not be gzipped)
input_filenames,
# Output file
output_filename,
# Column names to sort by
sort_columns,
):
"""
Given a list of CSV files, sort the rows by the supplied column names and
write the result to `output_filename`
Input files may be gzipped or not (either will work). The output is
always gzipped.
We shell out to the `sort` command for this as it much more efficient than
trying to do this in Python and can transparently handle sorting files that
are many times too large to fit in memory.
Note that `sort` doesn't really parse CSV, it just splits on commas; so
this function won't work where the CSV contains commas -- at least, where
these are to the left of the columns which are being sorted on.
"""
header_line = get_header_line(input_filenames)
sort_column_indices = get_column_indices(header_line, sort_columns)
# Construct a shell pipeline to read all input files and sort in the
# correct order, outputing the header line first
pipeline = "( {read_files} ) | ( echo {header_line}; {sort_by_columns} )".format(
read_files=read_files(input_filenames, skip_lines=1),
header_line=quote(header_line),
sort_by_columns=sort_by_columns(sort_column_indices),
)
pipeline += " | gzip"
pipeline += " > {}".format(quote(output_filename))
env = os.environ.copy()
# For much faster string comparison when sorting
env["LANG"] = "C"
subprocess.check_call(pipeline, shell=True, env=env)
def read_files(filenames, skip_lines=None, max_lines=None):
"""
Return command to read all supplied files (which may or may not be
gzipped), optionally skipping a number of leading and trailing lines
"""
return "; ".join(
[
read_file(filename, skip_lines=skip_lines, max_lines=max_lines)
for filename in filenames
]
)
def read_file(filename, skip_lines=None, max_lines=None):
"""
Return command to read a file (which may or may not be gzipped), optionally
skipping a number of leading and trailing lines
"""
# The `--force` flag means that non-gzipped files are handled transparently
# as if the command was just `cat`
command = "gzip --decompress --force --to-stdout --quiet {}".format(quote(filename))
if skip_lines is not None:
command += " | tail -n +{}".format(int(skip_lines) + 1)
if max_lines is not None:
command += " | head -n {}".format(int(max_lines))
return command
def sort_by_columns(column_indices):
"""
Return a `sort` command string configured to sort a CSV file by the supplied column
indices
"""
sort_keys = ["--key={0},{0}".format(i + 1) for i in column_indices]
return "sort --field-separator=, {}".format(" ".join(sort_keys))
def get_header_line(filenames):
"""
Return the first line of one of the files and check it is consistent across
all files
"""
pipeline = "({read_files}) 2>/dev/null".format(
read_files=read_files(filenames, max_lines=1)
)
header_lines = (
subprocess.check_output(pipeline, shell=True).decode("utf8").splitlines()
)
header_line = header_lines[0]
for n, filename in enumerate(filenames):
other_line = header_lines[n]
if other_line != header_line:
raise InvalidHeaderError(
"Input files do not have identical headers:\n\n"
"{}: {}\n{}: {}".format(filenames[0], header_line, filename, other_line)
)
return header_line
def get_column_indices(header_line, columns):
"""
Take a CSV header line and a list of columns and return the indices of
those columns (or raise InvalidHeaderError)
"""
headers = next(csv.reader([header_line]))
try:
return [headers.index(column) for column in columns]
except ValueError as e:
raise InvalidHeaderError("{} of headers: {}".format(e, header_line))
| mit | ab1b792b85f0e5df0d3e794b4674e3de | 34.117647 | 88 | 0.652309 | 3.931326 | false | false | false | false |
html5lib/html5lib-python | html5lib/treebuilders/etree_lxml.py | 42 | 14754 | """Module for supporting the lxml.etree library. The idea here is to use as much
of the native library as possible, without using fragile hacks like custom element
names that break between releases. The downside of this is that we cannot represent
all possible trees; specifically the following are known to cause problems:
Text or comments as siblings of the root element
Docypes with no name
When any of these things occur, we emit a DataLossWarning
"""
from __future__ import absolute_import, division, unicode_literals
# pylint:disable=protected-access
import warnings
import re
import sys
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
from . import base
from ..constants import DataLossWarning
from .. import constants
from . import etree as etree_builders
from .. import _ihatexml
import lxml.etree as etree
from six import PY3, binary_type
fullTree = True
tag_regexp = re.compile("{([^}]*)}(.*)")
comment_type = etree.Comment("asd").tag
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self._childNodes = []
def appendChild(self, element):
last = self._elementTree.getroot()
for last in self._elementTree.getroot().itersiblings():
pass
last.addnext(element._element)
def _getChildNodes(self):
return self._childNodes
childNodes = property(_getChildNodes)
def testSerializer(element):
rv = []
infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
def serializeElement(element, indent=0):
if not hasattr(element, "tag"):
if hasattr(element, "getroot"):
# Full tree case
rv.append("#document")
if element.docinfo.internalDTD:
if not (element.docinfo.public_id or
element.docinfo.system_url):
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
else:
dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
element.docinfo.root_name,
element.docinfo.public_id,
element.docinfo.system_url)
rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
next_element = element.getroot()
while next_element.getprevious() is not None:
next_element = next_element.getprevious()
while next_element is not None:
serializeElement(next_element, indent + 2)
next_element = next_element.getnext()
elif isinstance(element, str) or isinstance(element, bytes):
# Text in a fragment
assert isinstance(element, str) or sys.version_info[0] == 2
rv.append("|%s\"%s\"" % (' ' * indent, element))
else:
# Fragment case
rv.append("#document-fragment")
for next_element in element:
serializeElement(next_element, indent + 2)
elif element.tag == comment_type:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
else:
assert isinstance(element, etree._Element)
nsmatch = etree_builders.tag_regexp.match(element.tag)
if nsmatch is not None:
ns = nsmatch.group(1)
tag = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append("|%s<%s %s>" % (' ' * indent, prefix,
infosetFilter.fromXmlName(tag)))
else:
rv.append("|%s<%s>" % (' ' * indent,
infosetFilter.fromXmlName(element.tag)))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
name = infosetFilter.fromXmlName(name)
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = infosetFilter.fromXmlName(name)
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
implementation = etree
def __init__(self, namespaceHTMLElements, fullTree=False):
builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
self.namespaceHTMLElements = namespaceHTMLElements
class Attributes(MutableMapping):
def __init__(self, element):
self._element = element
def _coerceKey(self, key):
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
return name
def __getitem__(self, key):
value = self._element._element.attrib[self._coerceKey(key)]
if not PY3 and isinstance(value, binary_type):
value = value.decode("ascii")
return value
def __setitem__(self, key, value):
self._element._element.attrib[self._coerceKey(key)] = value
def __delitem__(self, key):
del self._element._element.attrib[self._coerceKey(key)]
def __iter__(self):
return iter(self._element._element.attrib)
def __len__(self):
return len(self._element._element.attrib)
def clear(self):
return self._element._element.attrib.clear()
class Element(builder.Element):
def __init__(self, name, namespace):
name = infosetFilter.coerceElement(name)
builder.Element.__init__(self, name, namespace=namespace)
self._attributes = Attributes(self)
def _setName(self, name):
self._name = infosetFilter.coerceElement(name)
self._element.tag = self._getETreeTag(
self._name, self._namespace)
def _getName(self):
return infosetFilter.fromXmlName(self._name)
name = property(_getName, _setName)
def _getAttributes(self):
return self._attributes
def _setAttributes(self, value):
attributes = self.attributes
attributes.clear()
attributes.update(value)
attributes = property(_getAttributes, _setAttributes)
def insertText(self, data, insertBefore=None):
data = infosetFilter.coerceCharacters(data)
builder.Element.insertText(self, data, insertBefore)
def cloneNode(self):
element = type(self)(self.name, self.namespace)
if self._element.attrib:
element._element.attrib.update(self._element.attrib)
return element
class Comment(builder.Comment):
def __init__(self, data):
data = infosetFilter.coerceComment(data)
builder.Comment.__init__(self, data)
def _setData(self, data):
data = infosetFilter.coerceComment(data)
self._element.text = data
def _getData(self):
return self._element.text
data = property(_getData, _setData)
self.elementClass = Element
self.commentClass = Comment
# self.fragmentClass = builder.DocumentFragment
base.TreeBuilder.__init__(self, namespaceHTMLElements)
def reset(self):
base.TreeBuilder.reset(self)
self.insertComment = self.insertCommentInitial
self.initial_comments = []
self.doctype = None
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._elementTree
else:
return self.document._elementTree.getroot()
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(list(element))
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
if not name:
warnings.warn("lxml cannot represent empty doctype", DataLossWarning)
self.doctype = None
else:
coercedName = self.infosetFilter.coerceElement(name)
if coercedName != name:
warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning)
doctype = self.doctypeClass(coercedName, publicId, systemId)
self.doctype = doctype
def insertCommentInitial(self, data, parent=None):
assert parent is None or parent is self.document
assert self.document._elementTree is None
self.initial_comments.append(data)
def insertCommentMain(self, data, parent=None):
if (parent == self.document and
self.document._elementTree.getroot()[-1].tag == comment_type):
warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
super(TreeBuilder, self).insertComment(data, parent)
def insertRoot(self, token):
# Because of the way libxml2 works, it doesn't seem to be possible to
# alter information like the doctype after the tree has been parsed.
# Therefore we need to use the built-in parser to create our initial
# tree, after which we can add elements like normal
docStr = ""
if self.doctype:
assert self.doctype.name
docStr += "<!DOCTYPE %s" % self.doctype.name
if (self.doctype.publicId is not None or
self.doctype.systemId is not None):
docStr += (' PUBLIC "%s" ' %
(self.infosetFilter.coercePubid(self.doctype.publicId or "")))
if self.doctype.systemId:
sysid = self.doctype.systemId
if sysid.find("'") >= 0 and sysid.find('"') >= 0:
warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning)
sysid = sysid.replace("'", 'U00027')
if sysid.find("'") >= 0:
docStr += '"%s"' % sysid
else:
docStr += "'%s'" % sysid
else:
docStr += "''"
docStr += ">"
if self.doctype.name != token["name"]:
warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning)
docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
root = etree.fromstring(docStr)
# Append the initial comments:
for comment_token in self.initial_comments:
comment = self.commentClass(comment_token["data"])
root.addprevious(comment._element)
# Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Give the root element the right name
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
root.tag = etree_tag
# Add the root element to the internal child/open data structures
root_element = self.elementClass(name, namespace)
root_element._element = root
self.document._childNodes.append(root_element)
self.openElements.append(root_element)
# Reset to the default insert comment function
self.insertComment = self.insertCommentMain
| mit | be6dbfb52903223ac5b519d2213857a5 | 36.637755 | 121 | 0.559374 | 4.579143 | false | false | false | false |
pytorch/fairseq | fairseq/benchmark/dummy_lm.py | 1 | 2757 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Optional
import torch
from .dummy_dataset import DummyDataset
from fairseq.data import Dictionary
from fairseq.dataclass import FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class DummyLMConfig(FairseqDataclass):
dict_size: int = 49996
dataset_size: int = 100000
tokens_per_sample: int = field(
default=512, metadata={"help": "max sequence length"}
)
add_bos_token: bool = False
batch_size: Optional[int] = II("dataset.batch_size")
max_tokens: Optional[int] = II("dataset.max_tokens")
max_target_positions: int = II("task.tokens_per_sample")
@register_task("dummy_lm", dataclass=DummyLMConfig)
class DummyLMTask(FairseqTask):
def __init__(self, cfg: DummyLMConfig):
super().__init__(cfg)
# load dictionary
self.dictionary = Dictionary()
for i in range(cfg.dict_size):
self.dictionary.add_symbol("word{}".format(i))
self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8
logger.info("dictionary: {} types".format(len(self.dictionary)))
seq = torch.arange(cfg.tokens_per_sample + 1) + self.dictionary.pad() + 1
self.dummy_src = seq[:-1]
self.dummy_tgt = seq[1:]
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if self.cfg.batch_size is not None:
bsz = self.cfg.batch_size
else:
bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample)
self.datasets[split] = DummyDataset(
{
"id": 1,
"net_input": {
"src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]),
"src_lengths": torch.full(
(bsz,), self.cfg.tokens_per_sample, dtype=torch.long
),
},
"target": torch.stack([self.dummy_tgt for _ in range(bsz)]),
"nsentences": bsz,
"ntokens": bsz * self.cfg.tokens_per_sample,
},
num_items=self.cfg.dataset_size,
item_size=self.cfg.tokens_per_sample,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| mit | 05a54fb37928834c79c0d581b092fe58 | 32.216867 | 84 | 0.606456 | 3.745924 | false | false | false | false |
pytorch/fairseq | tests/test_sequence_scorer.py | 1 | 4150 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import unittest
import tests.utils as test_utils
import torch
from fairseq.sequence_scorer import SequenceScorer
class TestSequenceScorer(unittest.TestCase):
def test_sequence_scorer(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
eos = d.eos()
w1 = 4
w2 = 5
# construct dataloader
data = [
{
"source": torch.LongTensor([w1, w2, eos]),
"target": torch.LongTensor([w1, w2, w1, eos]),
},
{
"source": torch.LongTensor([w2, eos]),
"target": torch.LongTensor([w2, w1, eos]),
},
{
"source": torch.LongTensor([w2, eos]),
"target": torch.LongTensor([w2, eos]),
},
]
data_itr = test_utils.dummy_dataloader(data)
# specify expected output probabilities
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 0.6, 0.4], # sentence 1
[0.0, unk, 0.4, 0.6], # sentence 2
[0.0, unk, 0.7, 0.3], # sentence 3
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 0.2, 0.7], # sentence 1
[0.0, unk, 0.8, 0.2], # sentence 2
[0.7, unk, 0.1, 0.2], # sentence 3
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2
[0.10, unk, 0.50, 0.4], # sentence 1
[0.15, unk, 0.15, 0.7], # sentence 2
[0.00, unk, 0.00, 0.0], # sentence 3
]
),
# step 3:
torch.FloatTensor(
[
# eos w1 w2
[0.9, unk, 0.05, 0.05], # sentence 1
[0.0, unk, 0.00, 0.0], # sentence 2
[0.0, unk, 0.00, 0.0], # sentence 3
]
),
]
expected_scores = [
[0.6, 0.7, 0.5, 0.9], # sentence 1
[0.6, 0.8, 0.15], # sentence 2
[0.3, 0.7], # sentence 3
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
scorer = SequenceScorer(task.target_dictionary)
for sample in data_itr:
hypos = task.inference_step(scorer, [model], sample)
for id, hypos_id in zip(sample["id"].tolist(), hypos):
self.assertHypoTokens(hypos_id[0], data[id]["target"])
self.assertHypoScore(hypos_id[0], expected_scores[id])
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypo["score"]), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| mit | dff053c46d83fc9f6ad7a78a9d46e5ae | 33.583333 | 76 | 0.479759 | 3.540956 | false | true | false | false |
pytorch/fairseq | examples/speech_synthesis/utils.py | 1 | 3357 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from scipy.interpolate import interp1d
import torchaudio
from fairseq.tasks.text_to_speech import (
batch_compute_distortion, compute_rms_dist
)
def batch_mel_spectral_distortion(
y1, y2, sr, normalize_type="path", mel_fn=None
):
"""
https://arxiv.org/pdf/2011.03568.pdf
Same as Mel Cepstral Distortion, but computed on log-mel spectrograms.
"""
if mel_fn is None or mel_fn.sample_rate != sr:
mel_fn = torchaudio.transforms.MelSpectrogram(
sr, n_fft=int(0.05 * sr), win_length=int(0.05 * sr),
hop_length=int(0.0125 * sr), f_min=20, n_mels=80,
window_fn=torch.hann_window
).to(y1[0].device)
offset = 1e-6
return batch_compute_distortion(
y1, y2, sr, lambda y: torch.log(mel_fn(y) + offset).transpose(-1, -2),
compute_rms_dist, normalize_type
)
# This code is based on
# "https://github.com/bastibe/MAPS-Scripts/blob/master/helper.py"
def _same_t_in_true_and_est(func):
def new_func(true_t, true_f, est_t, est_f):
assert type(true_t) is np.ndarray
assert type(true_f) is np.ndarray
assert type(est_t) is np.ndarray
assert type(est_f) is np.ndarray
interpolated_f = interp1d(
est_t, est_f, bounds_error=False, kind='nearest', fill_value=0
)(true_t)
return func(true_t, true_f, true_t, interpolated_f)
return new_func
@_same_t_in_true_and_est
def gross_pitch_error(true_t, true_f, est_t, est_f):
"""The relative frequency in percent of pitch estimates that are
outside a threshold around the true pitch. Only frames that are
considered pitched by both the ground truth and the estimator (if
applicable) are considered.
"""
correct_frames = _true_voiced_frames(true_t, true_f, est_t, est_f)
gross_pitch_error_frames = _gross_pitch_error_frames(
true_t, true_f, est_t, est_f
)
return np.sum(gross_pitch_error_frames) / np.sum(correct_frames)
def _gross_pitch_error_frames(true_t, true_f, est_t, est_f, eps=1e-8):
voiced_frames = _true_voiced_frames(true_t, true_f, est_t, est_f)
true_f_p_eps = [x + eps for x in true_f]
pitch_error_frames = np.abs(est_f / true_f_p_eps - 1) > 0.2
return voiced_frames & pitch_error_frames
def _true_voiced_frames(true_t, true_f, est_t, est_f):
return (est_f != 0) & (true_f != 0)
def _voicing_decision_error_frames(true_t, true_f, est_t, est_f):
return (est_f != 0) != (true_f != 0)
@_same_t_in_true_and_est
def f0_frame_error(true_t, true_f, est_t, est_f):
gross_pitch_error_frames = _gross_pitch_error_frames(
true_t, true_f, est_t, est_f
)
voicing_decision_error_frames = _voicing_decision_error_frames(
true_t, true_f, est_t, est_f
)
return (np.sum(gross_pitch_error_frames) +
np.sum(voicing_decision_error_frames)) / (len(true_t))
@_same_t_in_true_and_est
def voicing_decision_error(true_t, true_f, est_t, est_f):
voicing_decision_error_frames = _voicing_decision_error_frames(
true_t, true_f, est_t, est_f
)
return np.sum(voicing_decision_error_frames) / (len(true_t))
| mit | ff1914efc5354b73f935fe0bffdff812 | 32.237624 | 78 | 0.645815 | 2.837701 | false | false | false | false |
pytorch/fairseq | examples/MMPT/mmpt/utils/shardedtensor.py | 1 | 1410 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import pickle
import numpy as np
class ShardedTensor(object):
def __init__(self, data, starts):
self.data = data
self.starts = starts
assert self.starts[0] == 0
assert self.starts[-1] == len(self.data)
assert (self.starts[1:] >= self.starts[:-1]).all()
assert (self.starts > -1).all()
@staticmethod
def from_list(xs):
starts = np.full((len(xs) + 1,), -1, dtype=np.long)
data = np.concatenate(xs, axis=0)
starts[0] = 0
for i, x in enumerate(xs):
starts[i + 1] = starts[i] + x.shape[0]
assert (starts > -1).all()
return ShardedTensor(data, starts)
def __getitem__(self, i):
return self.data[self.starts[i] : self.starts[i + 1]]
def __len__(self):
return len(self.starts) - 1
def lengths(self):
return self.starts[1:] - self.starts[:-1]
def save(self, path):
np.save(path + "_starts", self.starts)
np.save(path + "_data", self.data)
@staticmethod
def load(path, mmap_mode=None):
starts = np.load(path + "_starts.npy", mmap_mode)
data = np.load(path + "_data.npy", mmap_mode)
return ShardedTensor(data, starts)
| mit | f240e4fc1b4ebe06f60d2856d011221f | 29.652174 | 65 | 0.584397 | 3.439024 | false | false | false | false |
pytorch/fairseq | fairseq/modules/quantization/pq/utils.py | 1 | 13493 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import re
from operator import attrgetter, itemgetter
import torch
import numpy as np
import torch.distributed as dist
import torch.nn as nn
from .modules import PQConv2d, PQEmbedding, PQLinear
from .pq import PQ
def quantize_model_(
model,
size_tracker,
layers_to_quantize,
block_sizes_config,
n_centroids_config,
step=0,
n_iter=15,
eps=1e-6,
max_tentatives=100,
remove_weights=False,
verbose=True,
state_dict=None,
):
"""
Quantize a model in-place by stages. All the targeted
layers are replaced by their quantized counterpart,
and the model is ready for the finetuning of the
centroids in a standard training loop (no modifications
required). Note that we do not quantize biases.
Args:
- model: a nn.Module
- size_tracker: useful for tracking quatization statistics
- layers_to_quantize: a list containing regexps for
filtering the layers to quantize at each stage according
to their name (as in model.named_parameters())
- block_sizes_config: dict like
{
'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}),
'Linear': ('in_features', {'*': 8})
}
For instance, all conv2d layers with kernel size 3x3 have
a block size of 9 and all Linear layers are quantized with
a block size of 8, irrespective of their size.
- n_centroids_config: dict like
{
'Conv2d': ('kernel_size', {'*': 256}),
'Linear': ('in_features', {'*': 256})
}
For instance, all conv2d layers are quantized with 256 centroids
- step: the layers to quantize inplace corresponding
to layers_to_quantize[step]
"""
quantized_layers = get_layers(
model, layers_to_quantize[step], remove_weights=remove_weights
)
for layer in quantized_layers:
# book-keeping
is_master_process = (not dist.is_initialized()) or (
dist.is_initialized() and dist.get_rank() == 0
)
verbose = verbose and is_master_process
# get block size and centroids
module = attrgetter(layer)(model)
block_size = get_param(module, layer, block_sizes_config)
n_centroids = get_param(module, layer, n_centroids_config)
if verbose:
logging.info(
f"Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids"
)
# quantize layer
weight = module.weight.data.clone()
is_bias = "bias" in [x[0] for x in module.named_parameters()]
bias = module.bias.data.clone() if is_bias else None
quantizer = PQ(
weight,
block_size,
n_centroids=n_centroids,
n_iter=n_iter,
eps=eps,
max_tentatives=max_tentatives,
verbose=verbose,
)
# quantization performed on all GPUs with same seed
quantizer.encode()
centroids = quantizer.centroids.contiguous()
assignments = quantizer.assignments.contiguous()
# If n_iter = 0 and state_dict is provided, then
# we initialize random assignments and centroids to
# random values of the appropriate dimensions
# because the quantized model parameters will
# overwritten by the state_dict later on.
if n_iter == 0 and state_dict:
# Initialize random centroids of the correct size
centroids = torch.rand(centroids.size())
centroids.cuda()
# Get counts and assignment keys from layer in loaded checkpoint.
counts_key = layer + "." + "counts"
assignment_key = layer + "." + "assignments"
# Get number of different bins to include.
counts = list(state_dict[counts_key].shape)[0]
print(layer)
print(state_dict[counts_key])
print(counts)
# Initialize random assignments of the correct size
# with an appropriate number of bins.
num_assignments = list(state_dict[assignment_key].shape)[0]
num_extra = num_assignments - counts
print(num_assignments)
print(num_extra)
assignments_bins = torch.arange(counts)
assignments_rand = torch.randint(0, counts - 1, (num_extra,))
assignments = torch.cat((assignments_bins, assignments_rand), 0)
# assignments = assignments.type(torch.IntTensor)
assignments.cuda()
print("assignments")
print(assignments)
# broadcast results to make sure weights are up-to-date
if dist.is_initialized():
dist.broadcast(centroids, 0)
dist.broadcast(assignments, 0)
# instantiate the quantized counterpart
if isinstance(module, nn.Linear):
out_features, in_features = map(
lambda k: module.__dict__[k], ["out_features", "in_features"]
)
quantized_module = PQLinear(
centroids, assignments, bias, in_features, out_features
)
elif isinstance(module, nn.Embedding):
num_embeddings, embedding_dim = map(
lambda k: module.__dict__[k], ["num_embeddings", "embedding_dim"]
)
quantized_module = PQEmbedding(
centroids, assignments, num_embeddings, embedding_dim
)
elif isinstance(module, nn.Conv2d):
out_channels, in_channels, kernel_size = map(
lambda k: module.__dict__[k],
["out_channels", "in_channels", "kernel_size"],
)
stride, padding, dilation, groups, padding_mode = map(
lambda k: module.__dict__[k],
["stride", "padding", "dilation", "groups", "padding_mode"],
)
quantized_module = PQConv2d(
centroids,
assignments,
bias,
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
padding_mode=padding_mode,
)
else:
raise ValueError(f"Module {module} not yet supported for quantization")
# replace layer by its quantized counterpart
attrsetter(layer)(model, quantized_module)
# update statistics
size_tracker.update(weight, block_size, n_centroids)
# return name of quantized layers
return quantized_layers
def get_layers(model, filter_regexp, remove_weights=False):
"""
Filters out the layers according to a regexp. Note that
we omit biases.
Args:
- model: a nn.Module
- filter_regexp: a regexp to filter the layers to keep
according to their name in model.named_parameters().
For instance, the regexp:
down_layers\\.[123456]\\.(conv[12]|identity\\.conv))
is keeping blocks down_layers from 1 to 6, and inside
each block is keeping conv1, conv2 and identity.conv.
Remarks:
- We add (module\\.)? at the beginning of the regexp to
account for the possible use of nn.parallel.DataParallel
"""
# get all parameter names
all_layers = map(itemgetter(0), model.named_parameters())
# remove biases
all_layers = filter(lambda x: "bias" not in x, all_layers)
# remove .weight in all other names (or .weight_orig is spectral norm)
all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers)
# remove weights indicates whether the weights extension should be removed, in addition to
# weight_orig and weight extension on names
if remove_weights:
all_layers = map(lambda x: x.replace(".weights", ""), all_layers)
all_layers = map(lambda x: x.replace(".weight", ""), all_layers)
# return filtered layers
filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")"
r = re.compile(filter_regexp)
return list(filter(r.match, all_layers))
def get_param(module, layer_name, param_config):
"""
Given a quantization configuration, get the right parameter
for the module to be quantized.
Args:
- module: a nn.Module
- layer_name: the name of the layer
- param_config: a dict like
{
'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}),
'Linear': ('in_features', {'*': 8})
}
For instance, all conv2d layers with kernel size 3x3 have
a block size of 9 and all Linear layers are quantized with
a block size of 8, irrespective of their size.
Remarks:
- if 'fuzzy_name' is passed as a parameter, layers whose layer_name
include 'fuzzy_name' will be assigned the given parameter.
In the following example, conv.expand layers will have a block
size of 9 while conv.reduce will have a block size of 4 and all
other layers will have a block size of 2.
{
'Conv2d': ('fuzzy_name', {'expand': 9, 'reduce': 4, '*': 2}),
'Linear': ('fuzzy_name', {'classifier': 8, 'projection': 4})
}
"""
layer_type = module.__class__.__name__
if layer_type not in param_config:
raise KeyError(f"Layer type {layer_type} not in config for layer {module}")
feature, params = param_config[module.__class__.__name__]
if feature != "fuzzy_name":
feature_value = str(getattr(module, feature))
if feature_value not in params:
if "*" in params:
feature_value = "*"
else:
raise KeyError(
f"{feature}={feature_value} not in config for layer {module}"
)
else:
feature_values = [name for name in params if name in layer_name]
if len(feature_values) == 0:
if "*" in params:
feature_value = "*"
else:
raise KeyError(f"name={layer_name} not in config for {module}")
else:
feature_value = feature_values[0]
return params[feature_value]
class SizeTracker(object):
"""
Class to keep track of the compressed network size with iPQ.
Args:
- model: a nn.Module
Remarks:
- The compressed size is the sum of three components
for each layer in the network:
(1) Storing the centroids given by iPQ in fp16
(2) Storing the assignments of the blocks in int8
(3) Storing all non-compressed elements such as biases
- This cost in only valid if we use 256 centroids (then
indexing can indeed by done with int8).
"""
def __init__(self, model):
self.model = model
self.size_non_compressed_model = self.compute_size()
self.size_non_quantized = self.size_non_compressed_model
self.size_index = 0
self.size_centroids = 0
self.n_quantized_layers = 0
def compute_size(self):
"""
Computes the size of the model (in MB).
"""
res = 0
for _, p in self.model.named_parameters():
res += p.numel()
return res * 4 / 1024 / 1024
def update(self, W, block_size, n_centroids):
"""
Updates the running statistics when quantizing a new layer.
"""
# bits per weights
bits_per_weight = np.log2(n_centroids) / block_size
self.n_quantized_layers += 1
# size of indexing the subvectors of size block_size (in MB)
size_index_layer = bits_per_weight * W.numel() / 8 / 1024 / 1024
self.size_index += size_index_layer
# size of the centroids stored in float16 (in MB)
size_centroids_layer = n_centroids * block_size * 2 / 1024 / 1024
self.size_centroids += size_centroids_layer
# size of non-compressed layers, e.g. LayerNorms or biases (in MB)
size_uncompressed_layer = W.numel() * 4 / 1024 / 1024
self.size_non_quantized -= size_uncompressed_layer
def __repr__(self):
size_compressed = (
self.size_index + self.size_centroids + self.size_non_quantized
)
compression_ratio = self.size_non_compressed_model / size_compressed # NOQA
return (
f"Non-compressed model size: {self.size_non_compressed_model:.2f} MB. "
f"After quantizing {self.n_quantized_layers} layers, size "
f"(indexing + centroids + other): {self.size_index:.2f} MB + "
f"{self.size_centroids:.2f} MB + {self.size_non_quantized:.2f} MB = "
f"{size_compressed:.2f} MB, compression ratio: {compression_ratio:.2f}x"
)
def attrsetter(*items):
def resolve_attr(obj, attr):
attrs = attr.split(".")
head = attrs[:-1]
tail = attrs[-1]
for name in head:
obj = getattr(obj, name)
return obj, tail
def g(obj, val):
for attr in items:
resolved_obj, resolved_attr = resolve_attr(obj, attr)
setattr(resolved_obj, resolved_attr, val)
return g
| mit | 29731f637b83185323ff97530532a55e | 34.885638 | 100 | 0.584674 | 4.128825 | false | false | false | false |
pytorch/fairseq | examples/MMPT/mmpt/tasks/retritask.py | 1 | 8413 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import pickle
import random
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from ..processors import (
ShardedHow2MetaProcessor,
ShardedVideoProcessor,
ShardedTextProcessor,
VariedLenAligner,
)
from ..datasets import MMDataset
from .task import Task
from ..modules import vectorpool
from ..evaluators.predictor import Predictor
from ..utils import set_seed, get_local_rank, get_world_size
class RetriTask(Task):
"""abstract class for task with retrival."""
def reshape_subsample(self, sample):
for key in sample:
if torch.is_tensor(sample[key]):
sample[key] = self.flat_subsample(sample[key])
return sample
def flat_subsample(self, tensor):
if tensor.size(0) == 1:
tensor = tensor.squeeze(0)
return tensor
def build_dataloader(self):
"""called by `get_batch_iterator` in fairseqmmtask. """
# TODO: hard-code dataloader for retri for now and configurable in .yaml.
# reuse the `train.lst`.
self.config.dataset.split = "train"
meta_processor = ShardedHow2MetaProcessor(self.config.dataset)
video_processor = ShardedVideoProcessor(self.config.dataset)
text_processor = ShardedTextProcessor(self.config.dataset)
aligner = VariedLenAligner(self.config.dataset)
aligner.subsampling = self.config.dataset.clip_per_video
self.retri_data = MMDataset(
meta_processor, video_processor, text_processor, aligner
)
retri_sampler = DistributedSampler(self.retri_data)
infer_scale = 16
batch_size = self.config.dataset.num_video_per_batch \
* infer_scale
self.retri_dataloader = DataLoader(
self.retri_data,
collate_fn=self.retri_data.collater,
batch_size=batch_size,
shuffle=False,
sampler=retri_sampler,
num_workers=self.config.fairseq.dataset.num_workers
)
return self.retri_dataloader
def retrive_candidates(self, epoch, dataloader=None):
if get_local_rank() == 0:
print("running retrieval model.")
out_dir = os.path.join(
self.config.fairseq.checkpoint.save_dir, "retri")
os.makedirs(out_dir, exist_ok=True)
if not os.path.isfile(
os.path.join(
out_dir, "batched_e" + str(epoch) + "_videos0.pkl")
):
if dataloader is None:
dataloader = self.retri_dataloader
self.model.eval()
self.model.is_train = False
assert self.retri_data.meta_processor.data == \
self.train_data.meta_processor.data # video_ids not mutated.
self._retri_predict(epoch, dataloader)
self.model.train()
self.model.is_train = True
torch.distributed.barrier()
output = self._retri_sync(epoch, out_dir)
torch.distributed.barrier()
self.train_data.meta_processor.set_candidates(output)
return output
class VideoRetriTask(RetriTask):
"""RetriTask on video level."""
def reshape_subsample(self, sample):
if (
hasattr(self.config.dataset, "clip_per_video")
and self.config.dataset.clip_per_video is not None
and self.config.dataset.clip_per_video > 1
):
for key in sample:
if torch.is_tensor(sample[key]):
sample[key] = self.flat_subsample(sample[key])
return sample
def flat_subsample(self, tensor):
if tensor.size(0) == 1:
tensor = tensor.squeeze(0)
return Task.flat_subsample(self, tensor)
def _retri_predict(self, epoch, dataloader):
set_seed(epoch)
# save for retrival.
predictor = VideoPredictor(self.config)
predictor.predict_loop(
self.model, dataloader)
set_seed(epoch) # get the same text clips.
# retrival.
retri_predictor = VideoRetriPredictor(
self.config)
retri_predictor.predict_loop(
self.model, predictor.vecpool.retriver, epoch)
del predictor
del retri_predictor
def _retri_sync(self, epoch, out_dir):
# gpu do the same merge.
batched_videos = []
for local_rank in range(get_world_size()):
fn = os.path.join(
out_dir,
"batched_e" + str(epoch) + "_videos" + str(local_rank) + ".pkl")
with open(fn, "rb") as fr:
batched_videos.extend(pickle.load(fr))
print(
"[INFO] batched_videos",
len(batched_videos), len(batched_videos[0]))
return batched_videos
class VideoPredictor(Predictor):
def __init__(self, config):
vectorpool_cls = getattr(vectorpool, config.vectorpool_cls)
self.vecpool = vectorpool_cls(config)
def predict_loop(
self,
model,
dataloader,
early_stop=-1,
):
with torch.no_grad():
if get_local_rank() == 0:
dataloader = tqdm(dataloader)
for batch_idx, batch in enumerate(dataloader):
if batch_idx == early_stop:
break
self(batch, model)
return self.finalize()
def __call__(self, sample, model, **kwargs):
param = next(model.parameters())
dtype = param.dtype
device = param.device
subsample = sample["vfeats"].size(1)
sample = self.to_ctx(sample, device, dtype)
for key in sample:
if torch.is_tensor(sample[key]):
size = sample[key].size()
if len(size) >= 2:
batch_size = size[0] * size[1]
expanded_size = (
(batch_size,) + size[2:] if len(size) > 2
else (batch_size,)
)
sample[key] = sample[key].view(expanded_size)
outputs = model(**sample)
sample.update(outputs)
self.vecpool(sample, subsample)
def finalize(self):
print("[INFO]", self.vecpool)
if not self.vecpool.retriver.db.is_trained:
self.vecpool.retriver.finalize_training()
return self.vecpool.retriver
class VideoRetriPredictor(Predictor):
"""
Online Retrieval Predictor for Clips (used by RetriTask).
TODO: merge this with VisPredictor?
"""
def __init__(self, config):
self.pred_dir = os.path.join(
config.fairseq.checkpoint.save_dir,
"retri")
self.num_cands = config.num_cands
self.num_video_per_batch = config.dataset.num_video_per_batch
def predict_loop(
self,
model,
retriver,
epoch,
early_stop=-1
):
# a fake loop that only try to recover video vector
# from video_id.
batched_videos = []
# obtain available video_ids.
video_ids = list(retriver.videoid_to_vectoridx.keys())
dataloader = random.sample(
video_ids,
len(video_ids) // self.num_video_per_batch
)
if get_local_rank() == 0:
dataloader = tqdm(dataloader)
for batch_idx, batch in enumerate(dataloader):
# batch is one video id.
if batch_idx == early_stop:
break
video_ids = retriver.search_by_video_ids(
[batch], self.num_cands)[0]
if len(video_ids) > self.num_video_per_batch:
# we moved the center to make cluster robust.
video_ids = random.sample(video_ids, self.num_video_per_batch)
batched_videos.append(video_ids)
return self.finalize(batched_videos, epoch)
def finalize(self, batched_videos, epoch):
fn = os.path.join(
self.pred_dir,
"batched_e" + str(epoch) + "_videos" + str(get_local_rank()) + ".pkl")
with open(fn, "wb") as fw:
pickle.dump(batched_videos, fw, pickle.HIGHEST_PROTOCOL)
return batched_videos
| mit | 50ecdec595bc228c32be58024e8cf554 | 32.252964 | 82 | 0.582907 | 3.838047 | false | true | false | false |
ic-labs/django-icekit | glamkit_collections/contrib/work_creator/migrations/0004_auto_20161026_1828.py | 2 | 2607 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_plugins_image', '0008_auto_20160920_2114'),
('gk_collections_work_creator', '0003_auto_20161026_1606'),
]
operations = [
migrations.CreateModel(
name='WorkImage',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('show_title', models.BooleanField(default=False)),
('show_caption', models.BooleanField(default=True)),
('title_override', models.CharField(blank=True, max_length=512)),
('caption_override', models.TextField(blank=True)),
('order', models.PositiveIntegerField(default=0, help_text=b'Which order to show this image in the set of images.')),
('image', models.ForeignKey(to='icekit_plugins_image.Image', help_text='An image from the image library.')),
],
options={
'ordering': ('order',),
},
),
migrations.CreateModel(
name='WorkImageType',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=255)),
],
options={
'verbose_name': 'Image type',
},
),
migrations.AlterModelOptions(
name='workcreator',
options={'verbose_name': 'Work-Creator relation', 'ordering': ('order', '-is_primary')},
),
migrations.AlterField(
model_name='workcreator',
name='is_primary',
field=models.BooleanField(verbose_name=b'Primary?', default=True),
),
migrations.AlterField(
model_name='workcreator',
name='order',
field=models.PositiveIntegerField(default=0, help_text=b'Which order to show this creator in the list of creators.'),
),
migrations.AddField(
model_name='workimage',
name='type',
field=models.ForeignKey(blank=True, null=True, to='gk_collections_work_creator.WorkImageType'),
),
migrations.AddField(
model_name='workimage',
name='work',
field=models.ForeignKey(to='gk_collections_work_creator.WorkBase'),
),
]
| mit | 8d2aa09354806dc07800fa9932275735 | 39.107692 | 133 | 0.559264 | 4.426146 | false | false | false | false |
ic-labs/django-icekit | glamkit_collections/contrib/work_creator/plugins/artwork/models.py | 2 | 1766 | from glamkit_collections.contrib.work_creator.models import WorkBase
from django.db import models
class Artwork(WorkBase):
medium_display = models.CharField(
blank=True,
max_length=255,
help_text='A display field for information concerning the '
'material/media & support of the object'
)
# how big is it
dimensions_is_two_dimensional = models.BooleanField(
blank=True,
default=False,
help_text="A flag for rapid categorization of the object as "
"essentially two-dimensional or three-dimensional. "
"Used when generating the Preview scale drawing."
)
dimensions_display = models.CharField(
blank=True,
max_length=255,
help_text='A display field that contains the dimensions of the object '
'- the Display Height, Width, and Depth.'
)
dimensions_extent = models.CharField(
blank=True,
max_length=255,
help_text='A field to record the extent of the object represented by '
'the dimensions in the object record, '
'e.g., "image (w/o frame)," "overall (incl. pedestal)."'
)
dimensions_width_cm = models.FloatField(
blank=True,
null=True,
help_text='The measurement of the object\'s width, in metres'
)
dimensions_height_cm = models.FloatField(
blank=True,
null=True, help_text="ditto height"
)
dimensions_depth_cm = models.FloatField(
blank=True,
null=True, help_text="ditto depth"
)
dimensions_weight_kg = models.FloatField(
blank=True,
null=True,
help_text="The measurement of the object\'s width, in kilograms"
) | mit | f572841f7ba5b2a98d06ae60950bce8e | 34.34 | 79 | 0.61325 | 4.069124 | false | false | false | false |
ic-labs/django-icekit | glamkit_collections/migrations/0001_initial.py | 2 | 2089 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=255)),
('iso_country', django_countries.fields.CountryField(max_length=2, blank=True)),
('continent', models.CharField(choices=[((b'AS', b'Asia'), (b'AS', b'Asia')), ((b'AF', b'Africa'), (b'AF', b'Africa')), ((b'NA', b'North America'), (b'NA', b'North America')), ((b'SA', b'South America'), (b'SA', b'South America')), ((b'EU', b'Europe'), (b'EU', b'Europe')), ((b'AN', b'Antarctica'), (b'AN', b'Antarctica')), ((b'OC', b'Oceania'), (b'OC', b'Oceania'))], null=True, blank=True, max_length=31)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GeographicLocation',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('state_province', models.CharField(verbose_name=b'State or province', max_length=255, blank=True)),
('city', models.CharField(max_length=255, blank=True)),
('neighborhood', models.CharField(max_length=255, blank=True)),
('colloquial_historical', models.CharField(max_length=255, help_text=b'The colloquial or historical name of the place, e.g., "East Bay"', blank=True)),
('country', models.ForeignKey(null=True, to='glamkit_collections.Country', blank=True)),
],
options={
'ordering': ('colloquial_historical', 'country', 'state_province', 'city', 'neighborhood'),
},
),
]
| mit | 766a374f06f51e653d3c4d3269b2af09 | 49.95122 | 424 | 0.564385 | 3.697345 | false | false | false | false |
thorrak/fermentrack | brewpi-script/scriptlibs/pinList.py | 1 | 11009 | # Copyright 2013 BrewPi
# This file is part of BrewPi.
# BrewPi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# BrewPi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with BrewPi. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import json
def getPinList(boardType, shieldType):
if boardType == "leonardo" and shieldType == "revC":
pinList = [{'val': 6, 'text': ' 6 (Act 1)', 'type': 'act'},
{'val': 5, 'text': ' 5 (Act 2)', 'type': 'act'},
{'val': 2, 'text': ' 2 (Act 3)', 'type': 'act'},
{'val': 23, 'text': 'A5 (Act 4)', 'type': 'act'},
{'val': 4, 'text': ' 4 (Door)', 'type': 'door'},
{'val': 22, 'text': 'A4 (OneWire)', 'type': 'onewire'},
{'val': 3, 'text': ' 3', 'type': 'beep'},
{'val': 7, 'text': ' 7', 'type': 'rotary'},
{'val': 8, 'text': ' 8', 'type': 'rotary'},
{'val': 9, 'text': ' 9', 'type': 'rotary'},
{'val': 10, ' text': '10', 'type': 'spi'},
{'val': 0, 'text': ' 0', 'type': 'free'},
{'val': 1, 'text': ' 1', 'type': 'free'},
{'val': 11, ' text': '11', 'type': 'free'},
{'val': 12, ' text': '12', 'type': 'free'},
{'val': 13, ' text': '13', 'type': 'free'},
{'val': 18, 'text': 'A0', 'type': 'free'},
{'val': 19, 'text': 'A1', 'type': 'free'},
{'val': 20, 'text': 'A2', 'type': 'free'},
{'val': 21, 'text': 'A3', 'type': 'free'}]
elif boardType == "uno" and shieldType == "revC":
pinList = [{'val': 0, 'text': ' 0', 'type': 'serial'},
{'val': 1, 'text': ' 1', 'type': 'serial'},
{'val': 2, 'text': ' 2 (Act 3)', 'type': 'act'},
{'val': 3, 'text': ' 3', 'type': 'beep'},
{'val': 4, 'text': ' 4 (Door)', 'type': 'door'},
{'val': 5, 'text': ' 5 (Act 2)', 'type': 'act'},
{'val': 6, 'text': ' 6 (Act 1)', 'type': 'act'},
{'val': 7, 'text': ' 7', 'type': 'rotary'},
{'val': 8, 'text': ' 8', 'type': 'rotary'},
{'val': 9, 'text': ' 9', 'type': 'rotary'},
{'val': 10, ' text': '10', 'type': 'spi'},
{'val': 11, ' text': '11', 'type': 'spi'},
{'val': 12, ' text': '12', 'type': 'spi'},
{'val': 13, ' text': '13', 'type': 'spi'},
{'val': 14, 'text': 'A0', 'type': 'free'},
{'val': 15, 'text': 'A1', 'type': 'free'},
{'val': 16, 'text': 'A2', 'type': 'free'},
{'val': 17, 'text': 'A3', 'type': 'free'},
{'val': 18, 'text': 'A4 (OneWire)', 'type': 'onewire'},
{'val': 19, 'text': 'A5 (Act 4)', 'type': 'act'}]
elif boardType == "uno" and shieldType == "I2C":
pinList = [{'val': 0, 'text': ' 0', 'type': 'serial'},
{'val': 1, 'text': ' 1', 'type': 'serial'},
{'val': 2, 'text': ' 2 (Act 3)', 'type': 'act'},
{'val': 3, 'text': ' 3 (Alarm)', 'type': 'beep'},
{'val': 4, 'text': ' 4 (Door)', 'type': 'door'},
{'val': 5, 'text': ' 5 (Act 1)', 'type': 'act'},
{'val': 6, 'text': ' 6 (Act 2)', 'type': 'act'},
{'val': 7, 'text': ' 7', 'type': 'rotary'},
{'val': 8, 'text': ' 8', 'type': 'rotary'},
{'val': 9, 'text': ' 9', 'type': 'rotary'},
{'val': 10, 'text': '10 (Act 4)', 'type': 'act'},
{'val': 11, 'text': '11', 'type': 'free'},
{'val': 12, 'text': '12', 'type': 'free'},
{'val': 13, 'text': '13', 'type': 'free'},
{'val': 14, 'text': 'A0 (OneWire)', 'type': 'onewire'},
{'val': 15, 'text': 'A1 (OneWire)', 'type': 'free'},
{'val': 16, 'text': 'A2 (OneWire)', 'type': 'free'},
{'val': 17, 'text': 'A3 (Act 4)', 'type': 'act'},
{'val': 18, 'text': 'A4 (SDA)', 'type': 'i2c'},
{'val': 19, 'text': 'A5 (SCL)', 'type': 'i2c'}]
elif boardType == "leonardo" and shieldType == "revA":
pinList = [{'val': 6, 'text': ' 6 (Cool)', 'type': 'act'},
{'val': 5, 'text': ' 5 (Heat)', 'type': 'act'},
{'val': 4, 'text': ' 4 (Door)', 'type': 'door'},
{'val': 22, 'text': 'A4 (OneWire)', 'type': 'onewire'},
{'val': 23, 'text': 'A5 (OneWire1)', 'type': 'onewire'},
{'val': 3, 'text': ' 3', 'type': 'beep'},
{'val': 7, 'text': ' 7', 'type': 'rotary'},
{'val': 8, 'text': ' 8', 'type': 'rotary'},
{'val': 9, 'text': ' 9', 'type': 'rotary'},
{'val': 10, ' text': '10', 'type': 'spi'},
{'val': 0, 'text': ' 0', 'type': 'free'},
{'val': 1, 'text': ' 1', 'type': 'free'},
{'val': 2, 'text': ' 2', 'type': 'free'},
{'val': 11, ' text': '11', 'type': 'free'},
{'val': 12, ' text': '12', 'type': 'free'},
{'val': 13, ' text': '13', 'type': 'free'},
{'val': 18, 'text': 'A0', 'type': 'free'},
{'val': 19, 'text': 'A1', 'type': 'free'},
{'val': 20, 'text': 'A2', 'type': 'free'},
{'val': 21, 'text': 'A3', 'type': 'free'}]
elif boardType == "uno" and shieldType == "revA":
pinList = [{'val': 6, 'text': ' 6 (Cool)', 'type': 'act'},
{'val': 5, 'text': ' 5 (Heat)', 'type': 'act'},
{'val': 4, 'text': ' 4 (Door)', 'type': 'door'},
{'val': 18, 'text': 'A4 (OneWire)', 'type': 'onewire'},
{'val': 19, 'text': 'A5 (OneWire1)', 'type': 'onewire'},
{'val': 3, 'text': ' 3', 'type': 'beep'},
{'val': 7, 'text': ' 7', 'type': 'rotary'},
{'val': 8, 'text': ' 8', 'type': 'rotary'},
{'val': 9, 'text': ' 9', 'type': 'rotary'},
{'val': 10, ' text': '10', 'type': 'spi'},
{'val': 11, ' text': '11', 'type': 'spi'},
{'val': 12, ' text': '12', 'type': 'spi'},
{'val': 13, ' text': '13', 'type': 'spi'},
{'val': 0, 'text': ' 0', 'type': 'serial'},
{'val': 1, 'text': ' 1', 'type': 'serial'},
{'val': 2, 'text': ' 2', 'type': 'free'},
{'val': 14, 'text': 'A0', 'type': 'free'},
{'val': 15, 'text': 'A1', 'type': 'free'},
{'val': 16, 'text': 'A2', 'type': 'free'},
{'val': 17, 'text': 'A3', 'type': 'free'}]
elif boardType == "leonardo" and shieldType == "diy":
pinList = [{'val': 12, 'text': ' 12 (Cool)', 'type': 'act'},
{'val': 13, 'text': ' 13 (Heat)', 'type': 'act'},
{'val': 23, 'text': ' A5 (Door)', 'type': 'door'},
{'val': 10, 'text': '10 (OneWire)', 'type': 'onewire'},
{'val': 11, 'text': '11 (OneWire1)', 'type': 'onewire'},
{'val': 0, 'text': ' 0', 'type': 'rotary'},
{'val': 1, 'text': ' 1', 'type': 'rotary'},
{'val': 2, 'text': ' 2', 'type': 'rotary'},
{'val': 3, 'text': ' 3', 'type': 'display'},
{'val': 4, ' text': '4', 'type': 'display'},
{'val': 5, ' text': '5', 'type': 'display'},
{'val': 6, ' text': '6', 'type': 'display'},
{'val': 7, ' text': '7', 'type': 'display'},
{'val': 8, ' text': '8', 'type': 'display'},
{'val': 9, ' text': '9', 'type': 'display'},
{'val': 18, 'text': 'A0', 'type': 'free'},
{'val': 19, 'text': 'A1', 'type': 'free'},
{'val': 20, 'text': 'A2', 'type': 'free'},
{'val': 21, 'text': 'A3', 'type': 'free'},
{'val': 22, 'text': 'A4', 'type': 'free'}]
elif (boardType == "core" or boardType =="photon") \
and (shieldType == "V1" or shieldType == "V2"):
pinList = [{'val': 17, 'text': 'Output 0 (A7)', 'type': 'act'},
{'val': 16, 'text': 'Output 1 (A6)', 'type': 'act'},
{'val': 11, 'text': 'Output 2 (A1)', 'type': 'act'},
{'val': 10, 'text': 'Output 3 (A0)', 'type': 'act'},
{'val': 0, 'text': 'OneWire', 'type': 'onewire'}]
elif (boardType == "esp8266"): # Note - Excluding shield definition for now
pinList = [{'val': 16, 'text': ' D0 (Heat)', 'type': 'act'},
{'val': 14, 'text': ' D5 (Cool)', 'type': 'act'},
{'val': 13, 'text': ' D7 (Door)', 'type': 'door'},
{'val': 12, 'text': 'D6 (OneWire)', 'type': 'onewire'},
{'val': 0, 'text': 'D3 (Buzzer)', 'type': 'beep'},]
elif (boardType == "esp32"): # Note - Excluding shield definition for now
pinList = [{'val': 25, 'text': ' 25 (Heat)', 'type': 'act'},
{'val': 26, 'text': ' 26 (Cool)', 'type': 'act'},
{'val': 13, 'text': ' 34 (Door)', 'type': 'door'},
{'val': 13, 'text': '13 (OneWire)', 'type': 'onewire'}, ]
# {'val': 0, 'text': 'D3 (Buzzer)', 'type': 'beep'}, ]
else:
print('Unknown controller or board type')
pinList = {}
return pinList
def getPinListJson(boardType, shieldType):
try:
pinList = getPinList(boardType, shieldType)
return json.dumps(pinList)
except json.JSONDecodeError:
print("Cannot process pin list JSON")
return 0
def pinListTest():
print(getPinListJson("leonardo", "revA"))
print(getPinListJson("leonardo", "revC"))
print(getPinListJson("uno", "revA"))
print(getPinListJson("uno", "revC"))
print(getPinListJson("uno", "I2C"))
print(getPinListJson("core", "V1"))
print(getPinListJson("core", "V2"))
print(getPinListJson("photon", "V1"))
print(getPinListJson("photon", "V2"))
if __name__ == "__main__":
pinListTest()
| mit | 60cecad4e42760b0ab1bb4d0b9479a0c | 56.041451 | 80 | 0.39068 | 3.277464 | false | false | false | false |
thorrak/fermentrack | firmware_flash/models.py | 1 | 20289 | from __future__ import unicode_literals
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.utils import timezone
import os.path
import requests
import logging
from . import fhash
from constance import config
try:
from fermentrack_django import settings
except:
from fermentrack_com import \
settings # This file is a direct copy of what I'm using for fermentrack.com. Simplifying keeping things in sync.
logger = logging.getLogger(__name__)
FERMENTRACK_COM_URL = "https://www.fermentrack.com"
MODEL_VERSION = 3
def check_model_version():
try:
url = FERMENTRACK_COM_URL + "/api/model_version/"
response = requests.get(url)
data = response.json()
except:
return False
return data
def get_model_version():
return MODEL_VERSION
class DeviceFamily(models.Model):
class Meta:
verbose_name = "Device Family"
verbose_name_plural = "Device Families"
FLASH_ARDUINO = "avrdude"
FLASH_ESP = "esptool"
FLASH_CHOICES = (
(FLASH_ARDUINO, "Avrdude (Arduino)"),
(FLASH_ESP, "Esptool (ESP8266)")
)
DETECT_ARDUINO = "arduino"
DETECT_ESP8266 = "esp8266"
DETECT_PARTICLE = "particle"
DETECT_ESP32 = "esp32"
DETECT_CHOICES = (
(DETECT_ARDUINO, "Arduino"),
(DETECT_ESP8266, "ESP8266"),
(DETECT_PARTICLE, "Particle (Spark/Core)"),
(DETECT_ESP32, "ESP32"),
)
name = models.CharField(max_length=30, blank=False, null=False, help_text="The name of the device family")
flash_method = models.CharField(max_length=30, choices=FLASH_CHOICES, default=FLASH_ARDUINO)
detection_family = models.CharField(max_length=30, choices=DETECT_CHOICES, default=DETECT_ARDUINO)
def __str__(self):
return self.name
@staticmethod
def load_from_website():
try:
url = FERMENTRACK_COM_URL + "/api/firmware_family_list/"
response = requests.get(url)
data = response.json()
except:
return False
if len(data) > 0:
# If we got data, clear out the cache of DeviceFamilies
DeviceFamily.objects.all().delete()
# Then loop through the data we received and recreate it again
for row in data:
try:
# This gets wrapped in a try/except as I don't want this failing if the local copy of Fermentrack
# is slightly behind what is available at Fermentrack.com (eg - if there are new device families)
newDevice = DeviceFamily(name=row['name'], flash_method=row['flash_method'], id=row['id'],
detection_family=row['detection_family'])
newDevice.save()
except:
pass
return True # DeviceFamily table is updated
return False # We didn't get data back from Fermentrack.com, or there was an error
def file_suffix(self):
# file_suffix is used to determine the local filename for the firmware file
if self.flash_method == self.FLASH_ARDUINO:
return ".hex"
elif self.flash_method == self.FLASH_ESP:
return ".bin"
else:
return None
class Firmware(models.Model):
class Meta:
verbose_name = "Firmware"
verbose_name_plural = "Firmware" # I don't care if this is ambiguous, it bothers me.
WEIGHT_CHOICES = (
(1, "1 (Highest)"),
(2, "2"),
(3, "3"),
(4, "4"),
(5, "5"),
(6, "6"),
(7, "7"),
(8, "8"),
(9, "9 (Lowest)"),
)
name = models.CharField(max_length=128, blank=False, null=False, help_text="The name of the firmware")
family = models.ForeignKey('DeviceFamily', on_delete=models.CASCADE)
version = models.CharField(max_length=20, default="0.0", help_text="The major version number")
revision = models.CharField(max_length=20, default="", help_text="The minor revision number", blank=True)
variant = models.CharField(max_length=80, default="", blank=True,
help_text="The firmware 'variant' (if applicable)")
is_fermentrack_supported = models.BooleanField(default=False,
help_text="Is this firmware officially supported by Fermentrack?")
in_error = models.BooleanField(default=False, help_text="Is there an error with this firmware that should "
"prevent it from being downloaded?")
description = models.TextField(default="", blank=True, null=False, help_text="The description of the firmware")
variant_description = models.TextField(default="", blank=True, null=False,
help_text="The description of the variant")
post_install_instructions = models.TextField(default="", blank=True, null=False,
help_text="Instructions to be displayed to the user after installation")
download_url = models.CharField(max_length=255, default="", blank=True, null=False,
help_text="The URL at which the firmware can be downloaded")
download_url_partitions = models.CharField(max_length=255, default="", blank=True, null=False,
help_text="The URL at which the partitions binary can be downloaded (ESP32 only, optional)")
download_url_spiffs = models.CharField(max_length=255, default="", blank=True, null=False,
help_text="The URL at which the SPIFFS binary can be downloaded (optional)")
download_url_bootloader = models.CharField(max_length=255, default="", blank=True, null=False,
help_text="The URL at which the bootloader binary can be downloaded (ESP32 only, optional)")
download_url_otadata = models.CharField(max_length=255, default="", blank=True, null=False,
help_text="The URL at which the OTA Dta binary can be downloaded (ESP32 only, optional)")
spiffs_address = models.CharField(max_length=12, default="", blank=True, null=False,
help_text="The flash address the SPIFFS data should be flashed to")
otadata_address = models.CharField(max_length=12, default="", blank=True, null=False,
help_text="The flash address the SPIFFS data should be flashed to (ESP32 only)")
weight = models.IntegerField(default=5, help_text="Weight for sorting (Lower weights rise to the top)",
choices=WEIGHT_CHOICES)
checksum = models.CharField(max_length=64, help_text="SHA256 checksum of the file (for checking validity)",
default="", blank=True)
checksum_partitions = models.CharField(max_length=64, help_text="SHA256 checksum of the partitions file (for checking validity)",
default="", blank=True)
checksum_spiffs = models.CharField(max_length=64, help_text="SHA256 checksum of the SPIFFS file (for checking validity)",
default="", blank=True)
checksum_bootloader = models.CharField(max_length=64, help_text="SHA256 checksum of the bootloader file (for checking validity)",
default="", blank=True)
checksum_otadata = models.CharField(max_length=64, help_text="SHA256 checksum of the otadata file (for checking validity)",
default="", blank=True)
project = models.ForeignKey('Project', on_delete=models.SET_NULL, default=None, null=True)
def __str__(self):
return self.name + " - " + self.version + " - " + self.revision + " - " + self.variant
@staticmethod
def load_from_website():
try:
url = FERMENTRACK_COM_URL + "/api/firmware_list/all/"
response = requests.get(url)
data = response.json()
except:
return False
if len(data) > 0:
# If we got data, clear out the cache of Firmware
Firmware.objects.all().delete()
# Then loop through the data we received and recreate it again
for row in data:
newFirmware = Firmware(
name=row['name'], version=row['version'], revision=row['revision'], family_id=row['family_id'],
variant=row['variant'], is_fermentrack_supported=row['is_fermentrack_supported'],
in_error=row['in_error'], description=row['description'],
variant_description=row['variant_description'], download_url=row['download_url'],weight=row['weight'],
download_url_partitions=row['download_url_partitions'],
download_url_spiffs=row['download_url_spiffs'], checksum=row['checksum'],
checksum_partitions=row['checksum_partitions'], checksum_spiffs=row['checksum_spiffs'],
spiffs_address=row['spiffs_address'], project_id=row['project_id'],
download_url_bootloader=row['download_url_bootloader'],
checksum_bootloader=row['checksum_bootloader'],
download_url_otadata=row['download_url_otadata'],
otadata_address=row['otadata_address'], checksum_otadata=row['checksum_otadata'],
)
newFirmware.save()
return True # Firmware table is updated
return False # We didn't get data back from Fermentrack.com, or there was an error
def local_filename(self, bintype):
def stripslashes(string):
return string.replace('\\', '').replace('/', '')
fname_base = stripslashes(self.family.name) + " - " + stripslashes(self.name) + " - "
fname_base += "v" + stripslashes(self.version) + "r" + stripslashes(self.revision)
if len(self.variant) > 0:
fname_base += " -- " + stripslashes(self.variant)
fname_base += " - " + stripslashes(bintype) # For SPIFFS, Partition, etc.
fname_base += self.family.file_suffix()
return fname_base
@classmethod
def local_filepath(cls):
return settings.ROOT_DIR / "firmware_flash" / "firmware"
def full_filepath(self, bintype):
return self.local_filepath() / self.local_filename(bintype)
@classmethod
def download_file(cls, full_path, url, checksum, check_checksum, force_download):
if os.path.isfile(full_path):
if force_download: # If we're just going to force the download anyways, just kill the file
os.remove(full_path)
elif checksum == fhash.hash_of_file(full_path): # If the file already exists check the checksum
# The file is valid - return the path
return True
else:
# The checksum check failed - Kill the file
os.remove(full_path)
if len(url) < 12: # If we don't have a URL, we can't download anything
return False
# So either we don't have a downloaded copy (or it's invalid). Let's download a new one.
r = requests.get(url, stream=True)
with open(full_path, str("wb")) as f:
for chunk in r.iter_content():
f.write(chunk)
# Now, let's check that the file is valid (but only if check_checksum is true)
if check_checksum:
if os.path.isfile(full_path):
# If the file already exists check the checksum (and delete if it fails)
if checksum != fhash.hash_of_file(full_path):
os.remove(full_path)
return False
else:
return False
# The file is valid (or we aren't checking checksums). Return the path.
return True
def download_to_file(self, check_checksum=True, force_download=False):
# If this is a multi-part firmware (ESP32, with partitions or SPIFFS) then download the additional parts.
if len(self.download_url_partitions) > 12:
if not self.download_file(self.full_filepath("partitions"), self.download_url_partitions,
self.checksum_partitions, check_checksum, force_download):
return False
if len(self.download_url_spiffs) > 12 and len(self.spiffs_address) > 2:
if not self.download_file(self.full_filepath("spiffs"), self.download_url_spiffs,
self.checksum_spiffs, check_checksum, force_download):
return False
if len(self.download_url_bootloader) > 12:
if not self.download_file(self.full_filepath("bootloader"), self.download_url_bootloader,
self.checksum_bootloader, check_checksum, force_download):
return False
if len(self.download_url_otadata) > 12 and len(self.otadata_address) > 2:
if not self.download_file(self.full_filepath("otadata"), self.download_url_otadata,
self.checksum_otadata, check_checksum, force_download):
return False
# Always download the main firmware
return self.download_file(self.full_filepath("firmware"), self.download_url, self.checksum, check_checksum, force_download)
class Board(models.Model):
class Meta:
verbose_name = "Board"
verbose_name_plural = "Boards"
WEIGHT_CHOICES = (
(1, "1 (Highest)"),
(2, "2"),
(3, "3"),
(4, "4"),
(5, "5"),
(6, "6"),
(7, "7"),
(8, "8"),
(9, "9 (Lowest)"),
)
name = models.CharField(max_length=128, blank=False, null=False, help_text="The name of the board")
family = models.ForeignKey('DeviceFamily', on_delete=models.CASCADE)
description = models.TextField(default="", blank=True, null=False, help_text="The description of the board")
weight = models.IntegerField(default=5, help_text="Weight for sorting (Lower weights rise to the top)",
choices=WEIGHT_CHOICES)
flash_options_json = models.TextField(default="", blank=True, null=False,
help_text="A JSON list containing options to pass to subprocess")
def __str__(self):
return self.name + " - " + str(self.family)
@staticmethod
def load_from_website():
try:
url = FERMENTRACK_COM_URL + "/api/board_list/all/"
response = requests.get(url)
data = response.json()
except:
return False
if len(data) > 0:
# If we got data, clear out the cache of Firmware
Board.objects.all().delete()
# Then loop through the data we received and recreate it again
for row in data:
try:
# This gets wrapped in a try/except as I don't want this failing if the local copy of Fermentrack
# is slightly behind what is available at Fermentrack.com (eg - if there are new device families)
newBoard = Board(
name=row['name'], family_id=row['family_id'], description=row['description'], weight=row['weight'],
flash_options_json=row['flash_options_json'], id=row['id'],
)
newBoard.save()
except:
pass
return True # Board table is updated
return False # We didn't get data back from Fermentrack.com, or there was an error
class FlashRequest(models.Model):
STATUS_QUEUED = 'queued'
STATUS_RUNNING = 'running'
STATUS_FINISHED = 'finished'
STATUS_FAILED = 'failed'
STATUS_CHOICES = (
(STATUS_QUEUED, 'Queued'),
(STATUS_RUNNING, 'Running'),
(STATUS_FINISHED, 'Finished'),
(STATUS_FAILED, 'Failed'),
)
# huey_task_id = models.CharField(max_length=64, help_text="Task ID used within Huey for tracking status")
status = models.CharField(max_length=32, default=STATUS_QUEUED)
firmware_to_flash = models.ForeignKey('Firmware', on_delete=models.CASCADE, help_text="Firmware to flash")
board_type = models.ForeignKey('Board', on_delete=models.CASCADE, help_text="Board type being flashed")
serial_port = models.CharField(max_length=255, help_text="Path to the serial device used with the flash tool")
result_text = models.CharField(max_length=255, default=None, blank=True, null=True,
help_text="String explaining the result status")
flash_output = models.TextField(null=True, blank=True, default=None, help_text="Output from the flash tool")
created = models.DateTimeField(help_text="The date this flash request was created", auto_now_add=True)
def fail(self, result_text, flash_output=""):
""" FlashRequest.fail is just a fast way to set the status & result text and save the object """
self.result_text = result_text
self.flash_output = flash_output
self.status = self.STATUS_FAILED
self.save()
return True
def succeed(self, result_text, flash_output=""):
""" FlashRequest.succeed is just a fast way to set the status & result text and save the object """
self.result_text = result_text
self.flash_output = flash_output
self.status = self.STATUS_FINISHED
self.save()
return True
class Project(models.Model):
class Meta:
verbose_name = "Project"
verbose_name_plural = "Projects"
WEIGHT_CHOICES = (
(1, "1 (Highest)"),
(2, "2"),
(3, "3"),
(4, "4"),
(5, "5"),
(6, "6"),
(7, "7"),
(8, "8"),
(9, "9 (Lowest)"),
)
name = models.CharField(max_length=128, blank=False, null=False,
help_text="The name of the project the firmware is associated with")
description = models.TextField(default="", blank=True, null=False, help_text="The description of the project")
project_url = models.CharField(max_length=255, default="", blank=True, null=False,
help_text="The URL for the project associated with the firmware")
documentation_url = models.CharField(max_length=255, default="", blank=True, null=False,
help_text="The URL for documentation/help on the firmware (if any)")
support_url = models.CharField(max_length=255, default="", blank=True, null=False,
help_text="The URL for support (if any, generally a forum thread)")
weight = models.IntegerField(default=5, help_text="Weight for sorting (Lower weights rise to the top)",
choices=WEIGHT_CHOICES)
show_in_standalone_flasher = models.BooleanField(default=False, help_text="Should this show standalone flash app?")
def __str__(self):
return self.name
@staticmethod
def load_from_website():
try:
url = FERMENTRACK_COM_URL + "/api/project_list/all/"
response = requests.get(url)
data = response.json()
except:
return False
if len(data) > 0:
# If we got data, clear out the cache of Firmware
Project.objects.all().delete()
# Then loop through the data we received and recreate it again
for row in data:
newProject = Project(
name=row['name'], project_url=row['project_url'], documentation_url=row['documentation_url'], weight=row['weight'],
support_url=row['support_url'], id=row['id'], description=row['description']
)
newProject.save()
return True # Project table is updated
return False # We didn't get data back from Fermentrack.com, or there was an error
| mit | 26692e6053ab1e45047ccfd4bf1ef4d1 | 43.010846 | 139 | 0.590369 | 4.107085 | false | false | false | false |
thorrak/fermentrack | app/udev_integration.py | 1 | 1892 | from __future__ import print_function
try:
import pyudev
pyudev_available = True
except:
pyudev_available = False
import sys
def get_platform():
platforms = {
'linux': 'Linux',
'linux1': 'Linux',
'linux2': 'Linux',
'darwin': 'OS X',
'win32': 'Windows'
}
if sys.platform not in platforms:
return sys.platform
return platforms[sys.platform]
def valid_platform_for_udev():
if get_platform() != "Linux":
return False
else:
return pyudev_available
# get_serial_from_node() takes a "node" (/dev/TTYUSB0) and returns the serial number of the device (Silicon_Labs_CP2104_USB_to_UART_Bridge_Controller_011E8348)
def get_serial_from_node(device_node):
try:
context = pyudev.Context()
for device in context.list_devices(subsystem="tty"):
if device.device_node == device_node:
return device.get("ID_SERIAL")
except:
# We weren't able to use pyudev (possibly because of an invalid operating system)
pass
return None
# get_node_from_serial() takes a udev serial number, and retuns the associated node (if found)
def get_node_from_serial(device_serial):
try:
context = pyudev.Context()
for device in context.list_devices(subsystem="tty"):
if device.get("ID_SERIAL", "") == device_serial:
return device.device_node
except:
# We weren't able to use pyudev (possibly because of an invalid operating system)
pass
return None
# The following was used for testing during development
if __name__ == "__main__":
print(get_platform())
context = pyudev.Context()
serial_from_node = get_serial_from_node("/dev/ttyUSB0")
node_from_serial = get_node_from_serial(serial_from_node)
print(u'{} ({})'.format(serial_from_node, node_from_serial))
| mit | 5ac91bccd535c7a82329c28dcfe13215 | 27.238806 | 159 | 0.632135 | 3.60381 | false | false | false | false |
thorrak/fermentrack | app/decorators.py | 1 | 3846 | from functools import wraps
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.shortcuts import resolve_url
import urllib.parse
from constance import config # For the explicitly user-configurable stuff
from django.contrib.auth.decorators import user_passes_test
# There is really nothing that would prevent me from hijacking user_passes_test from the Django decorators here.
def constance_check(test_func, next_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
A wrapper for views that check specific constance settings. Only used for site_is_configured below.
The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if test_func():
# If the test function we were passed returns true, just return the view
return view_func(request, *args, **kwargs)
# Otherwise, build the redirect
path = request.build_absolute_uri()
resolved_setup_url = resolve_url(next_url or settings.CONSTANCE_SETUP_URL)
# If the setup url is the same scheme and net location then just
# use the path as the "next" url.
setup_scheme, setup_netloc = urllib.parse.urlparse(resolved_setup_url)[:2]
current_scheme, current_netloc = urllib.parse.urlparse(path)[:2]
if ((not setup_scheme or setup_scheme == current_scheme) and
(not setup_netloc or setup_netloc == current_netloc)):
path = request.get_full_path()
# TODO - Change this to redirect, not redirect to login
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(path, resolved_setup_url, redirect_field_name)
return _wrapped_view
return decorator
def site_is_configured(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
def check_constance_is_configured():
return config.USER_HAS_COMPLETED_CONFIGURATION
actual_decorator = constance_check(
check_constance_is_configured,
next_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def login_if_required_for_dashboard(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting to the log-in page if necessary -
but only if REQUIRE_LOGIN_FOR_DASHBOARD is set True in Constance.
"""
def authenticated_test(u):
if config.REQUIRE_LOGIN_FOR_DASHBOARD:
return u.is_authenticated
else:
return True
actual_decorator = user_passes_test(
authenticated_test,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def gravity_support_enabled(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
def check_gravity_support_enabled():
return config.GRAVITY_SUPPORT_ENABLED
actual_decorator = constance_check(
check_gravity_support_enabled,
next_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
| mit | 019f5235479b92cca398ce5fc05a9d1a | 38.244898 | 112 | 0.682007 | 4.157838 | false | true | false | false |
thorrak/fermentrack | external_push/migrations/0006_Grainfather_Support.py | 1 | 1953 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2019-12-23 13:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gravity', '0004_BrewersFriend_Support'),
('external_push', '0005_ThingSpeak_Support'),
]
operations = [
migrations.CreateModel(
name='GrainfatherPushTarget',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('active', 'Active'), ('disabled', 'Disabled'), ('error', 'Error')], default='active', help_text='Status of this push target', max_length=24)),
('push_frequency', models.IntegerField(choices=[(901, '15 minutes'), (1801, '30 minutes'), (3601, '1 hour')], default=900, help_text='How often to push data to the target')),
('logging_url', models.CharField(default='', help_text='Grainfather Logging URL', max_length=256)),
('gf_name', models.CharField(default='', help_text='Grainfather brew id (number)', max_length=256)),
('error_text', models.TextField(blank=True, default='', help_text='The error (if any) encountered on the last push attempt', null=True)),
('last_triggered', models.DateTimeField(auto_now_add=True, help_text='The last time we pushed data to this target')),
('gravity_sensor_to_push', models.ForeignKey(help_text='Gravity Sensor to push (create one push target per sensor to push)', on_delete=django.db.models.deletion.CASCADE, related_name='grainfather_push_target', to='gravity.GravitySensor')),
],
options={
'verbose_name': 'Grainfather Push Target',
'verbose_name_plural': 'Grainfather Push Targets',
},
),
]
| mit | 1972c2be8389fd78434bd97a18933b54 | 56.441176 | 255 | 0.62468 | 3.953441 | false | false | false | false |
thorrak/fermentrack | app/api/clog.py | 1 | 4679 | import os
from django.http import HttpResponse
from django.conf import settings
from app.models import BrewPiDevice
from pathlib import Path
def get_filepath_to_log(device_type, logfile="", device_id=None) -> Path or None:
# get_filepath_to_log is being broken out so that we can use it in help/other templates to display which log file
# is being loaded
if device_type == "brewpi":
try:
device = BrewPiDevice.objects.get(id=device_id)
log_filename = 'dev-{}-{}.log'.format(str(device.circus_parameter()).lower(), logfile)
except:
# Unable to load the device
raise ValueError("No brewpi device with id {}".format(device_id))
elif device_type == "spawner":
log_filename = 'fermentrack-processmgr.log'
elif device_type == "fermentrack":
log_filename = 'fermentrack-stderr.log'
elif device_type == "ispindel":
log_filename = 'ispindel_raw_output.log'
elif device_type == "huey":
log_filename = f'huey-{logfile}.log' # Logfile is stderr or stdout
elif device_type == "upgrade":
log_filename = 'upgrade.log'
elif device_type == "circusd":
log_filename = 'circusd.log'
else:
return None
# Once we've determined the filename from logfile and device_type, let's open it up & read it in
logfile_path = settings.ROOT_DIR / 'log' / log_filename
return logfile_path
def get_device_log_combined(req, return_type, device_type, logfile, device_id=None, lines=100):
"""Read the log files created by circus for spawned controllers"""
# TODO - THIS IS A HACK. This needs to be fixed properly, but that will require some refactoring
if(device_type=="upgrade"):
lines = 1000
# Although the urlpattern checks if the logfile type is valid, this gets used in the filename we're reading so
# recheck it here just to be safe.
valid_logfile_types = ['stdout', 'stderr']
if logfile not in valid_logfile_types:
return HttpResponse("File type {} not a valid log file to read".format(device_id), status=500)
# Device_type determines the other part of the logfile to read. Valid options are:
# brewpi - A BrewPiDevice object
# gravity - A specific gravity sensor object
# spawner - the circus spawner (not the daemon)
# fermentrack - Fermentrack itself
# ispindel - iSpindel raw log
# upgrade - The log of the upgrade process (from Git)
# huey - The Huey (task manager) logs
# circusd - The log for Circusd itself
valid_device_types = ['brewpi', 'gravity', 'spawner', 'fermentrack', 'ispindel', 'upgrade', 'huey', 'circusd']
if device_type not in valid_device_types:
# TODO - Log this
return HttpResponse("Cannot read log files for devices of type {} ".format(device_type), status=500)
# Load the full path to the logfile, then open it and load the file itself
logfile_path = get_filepath_to_log(device_type, logfile, device_id)
try:
logfile_fd = open(logfile_path)
ret = tail(logfile_fd, int(lines))
logfile_fd.close()
except (IOError) as e:
# Generally if we hit this the log file doesn't exist
return HttpResponse("Error opening {} logfile: {}".format(logfile_path, str(e)), status=500)
# Now that we have the log loaded, format the output to match what the user wants (either text or json)
if return_type == "text":
return HttpResponse(ret, content_type="text/plain")
elif return_type == "json":
new_ret = []
# TODO: This is probably too hacky, but only matters if we end up using it :)
for line in ret:
new_ret.append("{" + line.split(" {")[1])
return HttpResponse(new_ret, content_type="application/json")
else:
return HttpResponse("Invalid log output type: {} ".format(return_type), status=500)
def tail(f, lines=1, _buffer=4098):
"""Tail a file and get X lines from the end"""
lines_found = []
# block counter will be multiplied by buffer
# to get the block size from the end
block_counter = -1
# loop until we find X lines
while len(lines_found) < lines:
try:
f.seek(block_counter * _buffer, os.SEEK_END)
except IOError: # either file is too small, or too many lines requested
f.seek(0)
lines_found = f.readlines()
break
lines_found = f.readlines()
# we found enough lines, get out
if len(lines_found) > lines:
break
# decrement the block counter to get the
# next X bytes
block_counter -= 1
return lines_found[-lines:]
| mit | bbb5671ee940bc8e020d10dfd244f1b0 | 41.153153 | 117 | 0.645223 | 3.825838 | false | false | false | false |
thorrak/fermentrack | gravity/tilt/TiltHydrometer.py | 1 | 11713 | import datetime
from typing import List, Dict, TYPE_CHECKING
from collections import deque
from gravity.models import TiltConfiguration, GravityLogPoint, GravitySensor
# from asgiref.sync import sync_to_async
class TiltHydrometer(object):
# These are all the UUIDs currently available as Tilt colors
tilt_colors = {
'Red': "a495bb10-c5b1-4b44-b512-1370f02d74de",
'Green': "a495bb20-c5b1-4b44-b512-1370f02d74de",
'Black': "a495bb30-c5b1-4b44-b512-1370f02d74de",
'Purple': "a495bb40-c5b1-4b44-b512-1370f02d74de",
'Orange': "a495bb50-c5b1-4b44-b512-1370f02d74de",
'Blue': "a495bb60-c5b1-4b44-b512-1370f02d74de",
'Yellow': "a495bb70-c5b1-4b44-b512-1370f02d74de",
'Pink': "a495bb80-c5b1-4b44-b512-1370f02d74de",
} # type: Dict[str, str]
# color_lookup is created at first use in color_lookup
color_lookup_table = {} # type: Dict[str, str]
color_lookup_table_no_dash = {} # type: Dict[str, str]
def __init__(self, color: str):
self.color = color # type: str
# The smoothing_window is set in the TiltConfiguration object - just defaulting it here for now
self.smoothing_window = 60 # type: int
self.gravity_list = deque(maxlen=self.smoothing_window) # type: deque[float]
self.temp_list = deque(maxlen=self.smoothing_window) # type: deque[int]
self.last_value_received = datetime.datetime.now() - self._cache_expiry_seconds() # type: datetime.datetime
self.last_saved_value = datetime.datetime.now() # type: datetime.datetime
self.gravity = 0.0 # type: float
self.raw_gravity = 0.0 # type: float
# Note - temp is always in fahrenheit
self.temp = 0 # type: int
self.raw_temp = 0 # type: int
self.rssi = 0 # type: int
# v3 and newer Tilts use the tx_pwr field to send the battery life
self.sends_battery = False # type: bool
self.weeks_on_battery = 0 # type: int
self.firmware_version = 0
# Tilt Pros are determined when we receive a gravity reading > 5000
self.tilt_pro = False # type: bool
self.obj = None # type: TiltConfiguration
# Let's load the object from Fermentrack as part of the initialization
self.load_obj_from_fermentrack()
if self.obj is not None:
self.temp_format = self.obj.sensor.temp_format
else:
self.temp_format = GravitySensor.TEMP_FAHRENHEIT # Defaulting to Fahrenheit as that's what the Tilt sends
def __str__(self):
return self.color
def _cache_expiry_seconds(self) -> datetime.timedelta:
# Assume we get 1 out of every 4 readings
return datetime.timedelta(seconds=(self.smoothing_window * 1.2 * 4))
def _cache_expired(self) -> bool:
if self.obj is not None:
# The other condition we want to explicitly clear the cache is if the temp format has changed between what
# was loaded from the sensor object & what we previously had cached when the object was loaded
if self.temp_format != self.obj.sensor.temp_format:
# Clear the cached temp/gravity values &
self.temp_format = self.obj.sensor.temp_format # Cache the new temp format
return True
return self.last_value_received <= datetime.datetime.now() - self._cache_expiry_seconds()
def _add_to_list(self, gravity, temp):
# This adds a gravity/temp value to the list for smoothing/averaging
if self._cache_expired():
# The cache expired (we lost contact with the Tilt for too long). Clear the lists.
self.gravity_list.clear()
self.temp_list.clear()
# Thankfully, deque enforces queue length, so all we need to do is add the value
self.last_value_received = datetime.datetime.now()
self.gravity_list.append(gravity)
self.temp_list.append(temp)
def should_save(self) -> bool:
if self.obj is None:
return False
return self.last_saved_value <= datetime.datetime.now() - datetime.timedelta(seconds=(self.obj.polling_frequency))
# def process_ibeacon_info(self, ibeacon_info: IBeaconAdvertisement, rssi):
# self.raw_gravity = ibeacon_info.minor / 1000
# if self.obj is None:
# # If there is no TiltConfiguration object set, just use the raw gravity the Tilt provided
# self.gravity = self.raw_gravity
# else:
# # Otherwise, apply the calibration
# self.gravity = self.obj.apply_gravity_calibration(self.raw_gravity)
#
# # Temps are always provided in degrees fahrenheit - Convert to Celsius if required
# # Note - convert_temp_to_sensor returns as a tuple (with units) - we only want the degrees not the units
# self.raw_temp, _ = self.obj.sensor.convert_temp_to_sensor_format(ibeacon_info.major,
# GravitySensor.TEMP_FAHRENHEIT)
# self.temp = self.raw_temp
# self.rssi = rssi
# self._add_to_list(self.gravity, self.temp)
def process_decoded_values(self, sensor_gravity: int, sensor_temp: int, rssi: int, tx_pwr: int):
if sensor_temp == 999:
# For the latest Tilts, this is now actually a special code indicating that the gravity is the version info.
# Regardless of whether or not we end up doing anything with that information, we definitely do not want to
# add it to the list
self.firmware_version = sensor_gravity
return
if sensor_gravity >= 5000:
# Tilt Pro support
self.tilt_pro = True
self.raw_gravity = sensor_gravity / 10000
usable_temp = sensor_temp / 10
else:
# Tilt "Classic" support
self.tilt_pro = False
self.raw_gravity = sensor_gravity / 1000
usable_temp = sensor_temp
# v3 Tilts send battery age in weeks using the tx_pwr field, but they have a hack in place to maintain
# compatibility with iPhones where they alternate sending "197" (unsigned) or "-59" (signed) with the actual
# number of weeks since the battery was changed. If we see the 197 (-59) then we'll set "sends_battery" to true
# and then update the weeks_on_battery the next time we see a beacon
if tx_pwr == 197:
self.sends_battery = True
elif self.sends_battery:
self.weeks_on_battery = tx_pwr
if self.obj is None:
# If there is no TiltConfiguration object set, just use the raw gravity the Tilt provided
self.gravity = self.raw_gravity
self.raw_temp = usable_temp
else:
# Otherwise, apply the calibration
self.gravity = self.obj.apply_gravity_calibration(self.raw_gravity)
# Temps are always provided in degrees fahrenheit - Convert to Celsius if required
# Note - convert_temp_to_sensor returns as a tuple (with units) - we only want the degrees not the units
self.raw_temp, _ = self.obj.sensor.convert_temp_to_sensor_format(usable_temp,
GravitySensor.TEMP_FAHRENHEIT)
self.temp = self.raw_temp
self.rssi = rssi
self._add_to_list(self.gravity, self.temp)
def smoothed_gravity(self):
# Return the average gravity in gravity_list
if len(self.gravity_list) <= 0:
return None
grav_total = 0
for grav in self.gravity_list:
grav_total += grav
return round(grav_total / len(self.gravity_list), 3) # Average it out & round
def smoothed_temp(self):
# Return the average temp in temp_list
if len(self.temp_list) <= 0:
return None
temp_total = 0
for temp in self.temp_list:
temp_total += temp
return round(temp_total / len(self.temp_list), 3) # Average it out & round
@classmethod
def color_lookup(cls, color):
if len(cls.color_lookup_table) <= 0:
cls.color_lookup_table = {cls.tilt_colors[x]: x for x in cls.tilt_colors}
if len(cls.color_lookup_table_no_dash) <= 0:
cls.color_lookup_table_no_dash = {cls.tilt_colors[x].replace("-",""): x for x in cls.tilt_colors}
if color in cls.color_lookup_table:
return cls.color_lookup_table[color]
elif color in cls.color_lookup_table_no_dash:
return cls.color_lookup_table_no_dash[color]
else:
return None
def print_data(self):
print("{} Tilt: {} ({}) / {} F".format(self.color, self.smoothed_gravity(), self.gravity, self.temp))
# @sync_to_async
def load_obj_from_fermentrack(self, obj: TiltConfiguration = None):
if obj is None:
# If we weren't handed the object itself, try to load it
try:
obj = TiltConfiguration.objects.get(color=self.color,
connection_type=TiltConfiguration.CONNECTION_BLUETOOTH)
except:
# TODO - Rewrite this slightly
self.obj = None
return False
# If the smoothing window changed, just recreate the deque objects
if obj.smoothing_window_vals != self.smoothing_window:
self.smoothing_window = obj.smoothing_window_vals
self.gravity_list = deque(maxlen=self.smoothing_window)
self.temp_list = deque(maxlen=self.smoothing_window)
self.obj = obj
# @sync_to_async
def save_value_to_fermentrack(self, verbose=False):
if self.obj is None:
# If we don't have a TiltConfiguration object loaded, we can't save the data point
if verbose:
print("{} Tilt: No object loaded for this color".format(self.color))
return False
if self._cache_expired():
if verbose:
print("{} Tilt: Cache is expired/No data available to save".format(self.color))
return False
if self.smoothed_gravity() is None or self.smoothed_temp() is None:
if verbose:
print("{} Tilt: No data available to save".format(self.color))
return False
# TODO - Test that temp_format actually works as intended here
new_point = GravityLogPoint(
gravity=self.smoothed_gravity(),
gravity_latest=self.gravity,
temp=self.smoothed_temp(),
temp_latest=self.temp,
temp_format=self.obj.sensor.temp_format,
temp_is_estimate=False,
associated_device=self.obj.sensor,
)
if self.obj.sensor.active_log is not None:
new_point.associated_log = self.obj.sensor.active_log
new_point.save()
# Also, set/save the RSSI/Raw Temp/Raw Gravity so we can load it for debugging
self.obj.rssi = self.rssi
self.obj.raw_gravity = self.raw_gravity
self.obj.raw_temp = self.raw_temp
self.obj.tilt_pro = self.tilt_pro
self.obj.sends_battery = self.sends_battery
self.obj.weeks_on_battery = self.weeks_on_battery
self.obj.firmware_version = self.firmware_version
self.obj.save_extras_to_redis()
self.last_saved_value = datetime.datetime.now()
if verbose:
print("{} Tilt: Logging {}".format(self.color, self.smoothed_gravity()))
else:
if verbose:
print("No data received.") | mit | 5262c17012e6807dea6cb5b3fa5d3d5a | 42.546468 | 122 | 0.61086 | 3.659169 | false | false | false | false |
pycassa/pycassa | ez_setup.py | 73 | 8596 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import tarfile
import optparse
import subprocess
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "0.9.6"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of setuptools (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U setuptools'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "setuptools-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
| mit | f875f3d1c93a33701fad72b117b0cff7 | 32.317829 | 78 | 0.616799 | 4.031895 | false | false | false | false |
pycassa/pycassa | pycassa/logging/pool_stats_logger.py | 1 | 3708 | import pycassa_logger
import logging
import threading
import functools
def sync(lock_name):
def wrapper(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
lock = getattr(self, lock_name)
try:
lock.acquire()
return f(self, *args, **kwargs)
finally:
lock.release()
return wrapped
return wrapper
class StatsLogger(object):
"""
Basic stats logger that increment counts. You can plot these as `COUNTER` or
`DERIVED` (RRD) or apply derivative (graphite) except for ``opened``, which tracks
the currently opened connections.
Usage::
>>> pool = ConnectionPool(...)
>>> stats_logger = StatsLogger()
>>> pool.add_listener(stats_logger)
>>>
>>> # use the pool for a while...
>>> import pprint
>>> pprint.pprint(stats_logger.stats)
{'at_max': 0,
'checked_in': 401,
'checked_out': 403,
'created': {'failure': 0, 'success': 0},
'disposed': {'failure': 0, 'success': 0},
'failed': 1,
'list': 0,
'opened': {'current': 2, 'max': 2},
'recycled': 0}
Get your stats as ``stats_logger.stats`` and push them to your metrics
system.
"""
def __init__(self):
#some callbacks are already locked by pool_lock, it's just simpler to have a global here for all operations
self.lock = threading.Lock()
self.reset()
@sync('lock')
def reset(self):
""" Reset all counters to 0 """
self._stats = {
'created': {
'success': 0,
'failure': 0,
},
'checked_out': 0,
'checked_in': 0,
'opened': {
'current': 0,
'max': 0
},
'disposed': {
'success': 0,
'failure': 0
},
'recycled': 0,
'failed': 0,
'list': 0,
'at_max': 0
}
def name_changed(self, new_logger):
self.logger = new_logger
@sync('lock')
def connection_created(self, dic):
level = pycassa_logger.levels[dic.get('level', 'info')]
if level <= logging.INFO:
self._stats['created']['success'] += 1
else:
self._stats['created']['failure'] += 1
@sync('lock')
def connection_checked_out(self, dic):
self._stats['checked_out'] += 1
self._update_opened(1)
@sync('lock')
def connection_checked_in(self, dic):
self._stats['checked_in'] += 1
self._update_opened(-1)
def _update_opened(self, value):
self._stats['opened']['current'] += value
if self._stats['opened']['current'] > self._stats['opened']['max']:
self._stats['opened']['max'] = self._stats['opened']['current']
@sync('lock')
def connection_disposed(self, dic):
level = pycassa_logger.levels[dic.get('level', 'info')]
if level <= logging.INFO:
self._stats['disposed']['success'] += 1
else:
self._stats['disposed']['failure'] += 1
@sync('lock')
def connection_recycled(self, dic):
self._stats['recycled'] += 1
@sync('lock')
def connection_failed(self, dic):
self._stats['failed'] += 1
@sync('lock')
def obtained_server_list(self, dic):
self._stats['list'] += 1
@sync('lock')
def pool_disposed(self, dic):
pass
@sync('lock')
def pool_at_max(self, dic):
self._stats['at_max'] += 1
@property
def stats(self):
return self._stats
| mit | db1fb511ba9117c8cf3f68ba87eaec19 | 25.869565 | 115 | 0.507012 | 3.961538 | false | false | false | false |
pycassa/pycassa | pycassa/types.py | 1 | 8980 | """
Data type definitions that are used when converting data to and from
the binary format that the data will be stored in.
In addition to the default classes included here, you may also define
custom types by creating a new class that extends :class:`~.CassandraType`.
For example, IntString, which stores an arbitrary integer as a string, may
be defined as follows:
.. code-block:: python
>>> class IntString(pycassa.types.CassandraType):
...
... @staticmethod
... def pack(intval):
... return str(intval)
...
... @staticmethod
... def unpack(strval):
... return int(strval)
"""
import calendar
from datetime import datetime
import pycassa.marshal as marshal
__all__ = ('CassandraType', 'BytesType', 'LongType', 'IntegerType',
'AsciiType', 'UTF8Type', 'TimeUUIDType', 'LexicalUUIDType',
'CounterColumnType', 'DoubleType', 'FloatType', 'DecimalType',
'BooleanType', 'DateType', 'OldPycassaDateType',
'IntermediateDateType', 'CompositeType',
'UUIDType', 'DynamicCompositeType', 'TimestampType')
class CassandraType(object):
"""
A data type that Cassandra is aware of and knows
how to validate and sort. All of the other classes in this
module are subclasses of this class.
If `reversed` is true and this is used as a column comparator,
the columns will be sorted in reverse order.
The `default` parameter only applies to use of this
with ColumnFamilyMap, where `default` is used if a row
does not contain a column corresponding to this item.
"""
def __init__(self, reversed=False, default=None):
self.reversed = reversed
self.default = default
if not hasattr(self.__class__, 'pack'):
self.pack = marshal.packer_for(self.__class__.__name__)
if not hasattr(self.__class__, 'unpack'):
self.unpack = marshal.unpacker_for(self.__class__.__name__)
def __str__(self):
return self.__class__.__name__ + "(reversed=" + str(self.reversed).lower() + ")"
class BytesType(CassandraType):
""" Stores data as a byte array """
pass
class LongType(CassandraType):
""" Stores data as an 8 byte integer """
pass
class IntegerType(CassandraType):
"""
Stores data as a variable-length integer. This
is a more compact format for storing small integers
than :class:`~.LongType`, and the limits
on the size of the integer are much higher.
.. versionchanged:: 1.2.0
Prior to 1.2.0, this was always stored as a 4 byte
integer.
"""
pass
class Int32Type(CassandraType):
""" Stores data as a 4 byte integer """
pass
class AsciiType(CassandraType):
""" Stores data as ASCII text """
pass
class UTF8Type(CassandraType):
""" Stores data as UTF8 encoded text """
pass
class UUIDType(CassandraType):
""" Stores data as a type 1 or type 4 UUID """
pass
class TimeUUIDType(CassandraType):
""" Stores data as a version 1 UUID """
pass
class LexicalUUIDType(CassandraType):
""" Stores data as a non-version 1 UUID """
pass
class CounterColumnType(CassandraType):
""" A 64bit counter column """
pass
class DoubleType(CassandraType):
""" Stores data as an 8 byte double """
pass
class FloatType(CassandraType):
""" Stores data as a 4 byte float """
pass
class DecimalType(CassandraType):
"""
Stores an unlimited precision decimal number. `decimal.Decimal`
objects are used by pycassa to represent these objects.
"""
pass
class BooleanType(CassandraType):
""" Stores data as a 1 byte boolean """
pass
class DateType(CassandraType):
"""
An 8 byte timestamp. This will be returned
as a :class:`datetime.datetime` instance by pycassa. Either
:class:`datetime` instances or timestamps will be accepted.
.. versionchanged:: 1.7.0
Prior to 1.7.0, datetime objects were expected to be in
local time. In 1.7.0 and beyond, naive datetimes are
assumed to be in UTC and tz-aware objects will be
automatically converted to UTC for storage in Cassandra.
"""
pass
TimestampType = DateType
def _to_timestamp(v, use_micros=False):
# Expects Value to be either date or datetime
if use_micros:
scale = 1e6
micro_scale = 1.0
else:
scale = 1e3
micro_scale = 1e3
try:
converted = calendar.timegm(v.utctimetuple())
converted = (converted * scale) + \
(getattr(v, 'microsecond', 0) / micro_scale)
except AttributeError:
# Ints and floats are valid timestamps too
if type(v) not in marshal._number_types:
raise TypeError('DateType arguments must be a datetime or timestamp')
converted = v * scale
return long(converted)
class OldPycassaDateType(CassandraType):
"""
This class can only read and write the DateType format
used by pycassa versions 1.2.0 to 1.5.0.
This formats store the number of microseconds since the
unix epoch, rather than the number of milliseconds, which
is what cassandra-cli and other clients supporting DateType
use.
.. versionchanged:: 1.7.0
Prior to 1.7.0, datetime objects were expected to be in
local time. In 1.7.0 and beyond, naive datetimes are
assumed to be in UTC and tz-aware objects will be
automatically converted to UTC for storage in Cassandra.
"""
@staticmethod
def pack(v, *args, **kwargs):
ts = _to_timestamp(v, use_micros=True)
return marshal._long_packer.pack(ts)
@staticmethod
def unpack(v):
ts = marshal._long_packer.unpack(v)[0] / 1e6
return datetime.utcfromtimestamp(ts)
class IntermediateDateType(CassandraType):
"""
This class is capable of reading either the DateType
format by pycassa versions 1.2.0 to 1.5.0 or the correct
format used in pycassa 1.5.1+. It will only write the
new, correct format.
This type is a good choice when you are using DateType
as the validator for non-indexed column values and you are
in the process of converting from thee old format to
the new format.
It almost certainly *should not be used* for row keys,
column names (if you care about the sorting), or column
values that have a secondary index on them.
.. versionchanged:: 1.7.0
Prior to 1.7.0, datetime objects were expected to be in
local time. In 1.7.0 and beyond, naive datetimes are
assumed to be in UTC and tz-aware objects will be
automatically converted to UTC for storage in Cassandra.
"""
@staticmethod
def pack(v, *args, **kwargs):
ts = _to_timestamp(v, use_micros=False)
return marshal._long_packer.pack(ts)
@staticmethod
def unpack(v):
raw_ts = marshal._long_packer.unpack(v)[0] / 1e3
try:
return datetime.utcfromtimestamp(raw_ts)
except ValueError:
# convert from bad microsecond format to millis
corrected_ts = raw_ts / 1e3
return datetime.utcfromtimestamp(corrected_ts)
class CompositeType(CassandraType):
"""
A type composed of one or more components, each of
which have their own type. When sorted, items are
primarily sorted by their first component, secondarily
by their second component, and so on.
Each of `*components` should be an instance of
a subclass of :class:`CassandraType`.
.. seealso:: :ref:`composite-types`
"""
def __init__(self, *components):
self.components = components
def __str__(self):
return "CompositeType(" + ", ".join(map(str, self.components)) + ")"
@property
def pack(self):
return marshal.get_composite_packer(composite_type=self)
@property
def unpack(self):
return marshal.get_composite_unpacker(composite_type=self)
class DynamicCompositeType(CassandraType):
"""
A type composed of one or more components, each of
which have their own type. When sorted, items are
primarily sorted by their first component, secondarily
by their second component, and so on.
Unlike CompositeType, DynamicCompositeType columns
need not all be of the same structure. Each column
can be composed of different component types.
Components are specified using a 2-tuple made up of
a comparator type and value. Aliases for comparator
types can optionally be specified with a dictionary
during instantiation.
"""
def __init__(self, *aliases):
self.aliases = {}
for alias in aliases:
if isinstance(alias, dict):
self.aliases.update(alias)
def __str__(self):
aliases = []
for k, v in self.aliases.iteritems():
aliases.append(k + '=>' + str(v))
return "DynamicCompositeType(" + ", ".join(aliases) + ")"
| mit | d7b4fa3811e0589e40188de65829a499 | 29.753425 | 88 | 0.653786 | 4.072562 | false | false | false | false |
pycassa/pycassa | pycassa/columnfamilymap.py | 1 | 11089 | """
Provides a way to map an existing class of objects to a column family.
This can help to cut down boilerplate code related to converting
objects to a row format and back again. ColumnFamilyMap is primarily
useful when you have one "object" per row.
.. seealso:: :mod:`pycassa.types` for selecting data types for object
attributes and infomation about creating custom data
types.
"""
from pycassa.types import CassandraType
from pycassa.columnfamily import ColumnFamily
import pycassa.util as util
import inspect
__all__ = ['ColumnFamilyMap']
def create_instance(cls, **kwargs):
instance = cls()
map(lambda (k,v): setattr(instance, k, v), kwargs.iteritems())
return instance
class ColumnFamilyMap(ColumnFamily):
"""
Maps an existing class to a column family. Class fields become columns,
and instances of that class can be represented as rows in standard column
families or super columns in super column families.
"""
def __init__(self, cls, pool, column_family, raw_columns=False, **kwargs):
"""
Instances of `cls` are returned from :meth:`get()`, :meth:`multiget()`,
:meth:`get_range()` and :meth:`get_indexed_slices()`.
`pool` is a :class:`~pycassa.pool.ConnectionPool` that will be used
in the same way a :class:`~.ColumnFamily` uses one.
`column_family` is the name of a column family to tie to `cls`.
If `raw_columns` is ``True``, all columns will be fetched into the
`raw_columns` field in requests.
"""
ColumnFamily.__init__(self, pool, column_family, **kwargs)
self.cls = cls
self.autopack_names = False
self.raw_columns = raw_columns
self.dict_class = util.OrderedDict
self.defaults = {}
self.fields = []
for name, val_type in inspect.getmembers(self.cls):
if name != 'key' and isinstance(val_type, CassandraType):
self.fields.append(name)
self.column_validators[name] = val_type
self.defaults[name] = val_type.default
if hasattr(self.cls, 'key') and isinstance(self.cls.key, CassandraType):
self.key_validation_class = self.cls.key
def combine_columns(self, columns):
combined_columns = columns
if self.raw_columns:
combined_columns['raw_columns'] = columns
for column, default in self.defaults.items():
combined_columns.setdefault(column, default)
return combined_columns
def get(self, key, *args, **kwargs):
"""
Creates one or more instances of `cls` from the row with key `key`.
The fields that are retreived may be specified using `columns`, which
should be a list of column names.
If the column family is a super column family, a list of `cls`
instances will be returned, one for each super column. If
the `super_column` parameter is not supplied, then `columns`
specifies which super columns will be used to create instances
of `cls`. If the `super_column` parameter *is* supplied, only
one instance of `cls` will be returned; if `columns` is specified
in this case, only those attributes listed in `columns` will be fetched.
All other parameters behave the same as in :meth:`.ColumnFamily.get()`.
"""
if 'columns' not in kwargs and not self.super and not self.raw_columns:
kwargs['columns'] = self.fields
columns = ColumnFamily.get(self, key, *args, **kwargs)
if self.super:
if 'super_column' not in kwargs:
vals = self.dict_class()
for super_column, subcols in columns.iteritems():
combined = self.combine_columns(subcols)
vals[super_column] = create_instance(self.cls, key=key,
super_column=super_column, **combined)
return vals
combined = self.combine_columns(columns)
return create_instance(self.cls, key=key,
super_column=kwargs['super_column'],
**combined)
combined = self.combine_columns(columns)
return create_instance(self.cls, key=key, **combined)
def multiget(self, *args, **kwargs):
"""
Like :meth:`get()`, but a list of keys may be specified.
The result of multiget will be a dictionary where the keys
are the keys from the `keys` argument, minus any missing rows.
The value for each key in the dictionary will be the same as
if :meth:`get()` were called on that individual key.
"""
if 'columns' not in kwargs and not self.super and not self.raw_columns:
kwargs['columns'] = self.fields
kcmap = ColumnFamily.multiget(self, *args, **kwargs)
ret = self.dict_class()
for key, columns in kcmap.iteritems():
if self.super:
if 'super_column' not in kwargs:
vals = self.dict_class()
for super_column, subcols in columns.iteritems():
combined = self.combine_columns(subcols)
vals[super_column] = create_instance(self.cls, key=key, super_column=super_column, **combined)
ret[key] = vals
else:
combined = self.combine_columns(columns)
ret[key] = create_instance(self.cls, key=key, super_column=kwargs['super_column'], **combined)
else:
combined = self.combine_columns(columns)
ret[key] = create_instance(self.cls, key=key, **combined)
return ret
def get_range(self, *args, **kwargs):
"""
Get an iterator over instances in a specified key range.
Like :meth:`multiget()`, whether a single instance or multiple
instances are returned per-row when the column family is a super
column family depends on what parameters are passed.
For an explanation of how :meth:`get_range` works and a description
of the parameters, see :meth:`.ColumnFamily.get_range()`.
Example usage with a standard column family:
.. code-block:: python
>>> pool = pycassa.ConnectionPool('Keyspace1')
>>> usercf = pycassa.ColumnFamily(pool, 'Users')
>>> cfmap = pycassa.ColumnFamilyMap(MyClass, usercf)
>>> users = cfmap.get_range(row_count=2, columns=['name', 'age'])
>>> for key, user in users:
... print user.name, user.age
Miles Davis 84
Winston Smith 42
"""
if 'columns' not in kwargs and not self.super and not self.raw_columns:
kwargs['columns'] = self.fields
for key, columns in ColumnFamily.get_range(self, *args, **kwargs):
if self.super:
if 'super_column' not in kwargs:
vals = self.dict_class()
for super_column, subcols in columns.iteritems():
combined = self.combine_columns(subcols)
vals[super_column] = create_instance(self.cls, key=key, super_column=super_column, **combined)
yield vals
else:
combined = self.combine_columns(columns)
yield create_instance(self.cls, key=key, super_column=kwargs['super_column'], **combined)
else:
combined = self.combine_columns(columns)
yield create_instance(self.cls, key=key, **combined)
def get_indexed_slices(self, *args, **kwargs):
"""
Fetches a list of instances that satisfy an index clause. Similar
to :meth:`get_range()`, but uses an index clause instead of a key range.
See :meth:`.ColumnFamily.get_indexed_slices()` for
an explanation of the parameters.
"""
assert not self.super, "get_indexed_slices() is not " \
"supported by super column families"
if 'columns' not in kwargs and not self.raw_columns:
kwargs['columns'] = self.fields
for key, columns in ColumnFamily.get_indexed_slices(self, *args, **kwargs):
combined = self.combine_columns(columns)
yield create_instance(self.cls, key=key, **combined)
def _get_instance_as_dict(self, instance, columns=None):
fields = columns or self.fields
instance_dict = {}
for field in fields:
val = getattr(instance, field, None)
if val is not None and not isinstance(val, CassandraType):
instance_dict[field] = val
if self.super:
instance_dict = {instance.super_column: instance_dict}
return instance_dict
def insert(self, instance, columns=None, timestamp=None, ttl=None,
write_consistency_level=None):
"""
Insert or update stored instances.
`instance` should be an instance of `cls` to store.
The `columns` parameter allows to you specify which attributes of
`instance` should be inserted or updated. If left as ``None``, all
attributes will be inserted.
"""
if columns is None:
fields = self.fields
else:
fields = columns
insert_dict = self._get_instance_as_dict(instance, columns=fields)
return ColumnFamily.insert(self, instance.key, insert_dict,
timestamp=timestamp, ttl=ttl,
write_consistency_level=write_consistency_level)
def batch_insert(self, instances, timestamp=None, ttl=None,
write_consistency_level=None):
"""
Insert or update stored instances.
`instances` should be a list containing instances of `cls` to store.
"""
insert_dict = dict(
[(instance.key, self._get_instance_as_dict(instance))
for instance in instances]
)
return ColumnFamily.batch_insert(self, insert_dict,
timestamp=timestamp, ttl=ttl,
write_consistency_level=write_consistency_level)
def remove(self, instance, columns=None, write_consistency_level=None):
"""
Removes a stored instance.
The `columns` parameter is a list of columns that should be removed.
If this is left as the default value of ``None``, the entire stored
instance will be removed.
"""
if self.super:
return ColumnFamily.remove(self, instance.key,
super_column=instance.super_column,
columns=columns,
write_consistency_level=write_consistency_level)
else:
return ColumnFamily.remove(self, instance.key, columns,
write_consistency_level=write_consistency_level)
| mit | a46a010f13016d0588fc5e46f14465b5 | 39.619048 | 118 | 0.593651 | 4.421451 | false | false | false | false |
jungmannlab/picasso | picasso/gaussmle.py | 1 | 22027 | """
picasso/gaussmle
~~~~~~~~~~~~~~~~
Maximum likelihood fits for single particle localization
:authors: Joerg Schnitzbauer, Maximilian Thomas Strauss, 2016-2018
:copyright: Copyright (c) 2016-2018 Jungmann Lab, MPI of Biochemistry
"""
import numpy as _np
import numba as _numba
import math as _math
import multiprocessing as _multiprocessing
import threading as _threading
from concurrent import futures as _futures
GAMMA = _np.array([1.0, 1.0, 0.5, 1.0, 1.0, 1.0])
@_numba.jit(nopython=True, nogil=True)
def _sum_and_center_of_mass(spot, size):
x = 0.0
y = 0.0
_sum_ = 0.0
for i in range(size):
for j in range(size):
x += spot[i, j] * i
y += spot[i, j] * j
_sum_ += spot[i, j]
x /= _sum_
y /= _sum_
return _sum_, y, x
@_numba.jit(nopython=True, nogil=True)
def mean_filter(spot, size):
filtered_spot = _np.zeros_like(spot)
for k in range(size):
for l in range(size):
min_m = _np.maximum(0, k - 1)
max_m = _np.minimum(size, k + 2)
min_n = _np.maximum(0, l - 1)
max_n = _np.minimum(size, l + 2)
N = (max_m - min_m) * (max_n - min_n)
Nsum = 0.0
for m in range(min_m, max_m):
for n in range(min_n, max_n):
Nsum += spot[m, n]
filtered_spot[k, l] = Nsum / N
return filtered_spot
@_numba.jit(nopython=True, nogil=True)
def _initial_sigmas(spot, y, x, size):
size_half = int(size / 2)
sum_deviation_y = 0.0
sum_deviation_x = 0.0
sum_y = 0.0
sum_x = 0.0
for i in range(size):
d2 = (i - size_half) ** 2
sum_deviation_y += spot[i, size_half] * d2
sum_deviation_x += spot[size_half, i] * d2
sum_y += spot[i, size_half]
sum_x += spot[size_half, i]
sy = _np.sqrt(sum_deviation_y / sum_y)
sx = _np.sqrt(sum_deviation_x / sum_x)
if ~_np.isfinite(sy):
sy = 0.01
if ~_np.isfinite(sx):
sx = 0.01
if sx == 0:
sx = 0.01
if sy == 0:
sy = 0.01
return sy, sx
@_numba.jit(nopython=True, nogil=True)
def _initial_parameters(spot, size):
sum, y, x = _sum_and_center_of_mass(spot, size)
bg = _np.min(mean_filter(spot, size))
photons = sum - size * size * bg
photons_sane = _np.maximum(1.0, photons)
sy, sx = _initial_sigmas(spot - bg, y, x, size)
return x, y, photons_sane, bg, sx, sy
@_numba.jit(nopython=True, nogil=True)
def _initial_theta_sigma(spot, size):
theta = _np.zeros(5, dtype=_np.float32)
theta[0], theta[1], theta[2], theta[3], sx, sy = _initial_parameters(spot, size)
theta[4] = (sx + sy) / 2
return theta
@_numba.jit(nopython=True, nogil=True)
def _initial_theta_sigmaxy(spot, size):
theta = _np.zeros(6, dtype=_np.float32)
theta[0], theta[1], theta[2], theta[3], theta[4], theta[5] = _initial_parameters(
spot, size
)
return theta
@_numba.vectorize(nopython=True)
def _erf(x):
"""Currently not needed, but might be useful for a CUDA implementation"""
ax = _np.abs(x)
if ax < 0.5:
t = x * x
top = (
(
(
(0.771058495001320e-04 * t - 0.133733772997339e-02) * t
+ 0.323076579225834e-01
)
* t
+ 0.479137145607681e-01
)
* t
+ 0.128379167095513e00
) + 1.0
bot = (
(0.301048631703895e-02 * t + 0.538971687740286e-01) * t
+ 0.375795757275549e00
) * t + 1.0
return x * (top / bot)
if ax < 4.0:
top = (
(
(
(
(
(-1.36864857382717e-07 * ax + 5.64195517478974e-01) * ax
+ 7.21175825088309e00
)
* ax
+ 4.31622272220567e01
)
* ax
+ 1.52989285046940e02
)
* ax
+ 3.39320816734344e02
)
* ax
+ 4.51918953711873e02
) * ax + 3.00459261020162e02
bot = (
(
(
(
((1.0 * ax + 1.27827273196294e01) * ax + 7.70001529352295e01)
* ax
+ 2.77585444743988e02
)
* ax
+ 6.38980264465631e02
)
* ax
+ 9.31354094850610e02
)
* ax
+ 7.90950925327898e02
) * ax + 3.00459260956983e02
erf = 0.5 + (0.5 - _np.exp(-x * x) * top / bot)
if x < 0.0:
erf = -erf
return erf
if ax < 5.8:
x2 = x * x
t = 1.0 / x2
top = (
((2.10144126479064e00 * t + 2.62370141675169e01) * t + 2.13688200555087e01)
* t
+ 4.65807828718470e00
) * t + 2.82094791773523e-01
bot = (
((9.41537750555460e01 * t + 1.87114811799590e02) * t + 9.90191814623914e01)
* t
+ 1.80124575948747e01
) * t + 1.0
erf = (0.564189583547756e0 - top / (x2 * bot)) / ax
erf = 0.5 + (0.5 - _np.exp(-x2) * erf)
if x < 0.0:
erf = -erf
return erf
return _np.sign(x)
@_numba.jit(nopython=True, nogil=True, cache=False)
def _gaussian_integral(x, mu, sigma):
sq_norm = 0.70710678118654757 / sigma # sq_norm = sqrt(0.5/sigma**2)
d = x - mu
return 0.5 * (_math.erf((d + 0.5) * sq_norm) - _math.erf((d - 0.5) * sq_norm))
@_numba.jit(nopython=True, nogil=True, cache=False)
def _derivative_gaussian_integral(x, mu, sigma, photons, PSFc):
d = x - mu
a = _np.exp(-0.5 * ((d + 0.5) / sigma) ** 2)
b = _np.exp(-0.5 * ((d - 0.5) / sigma) ** 2)
dudt = -photons * PSFc * (a - b) / (_np.sqrt(2.0 * _np.pi) * sigma)
d2udt2 = (
-photons
* ((d + 0.5) * a - (d - 0.5) * b)
* PSFc
/ (_np.sqrt(2.0 * _np.pi) * sigma**3)
)
return dudt, d2udt2
@_numba.jit(nopython=True, nogil=True, cache=False)
def _derivative_gaussian_integral_1d_sigma(x, mu, sigma, photons, PSFc):
ax = _np.exp(-0.5 * ((x + 0.5 - mu) / sigma) ** 2)
bx = _np.exp(-0.5 * ((x - 0.5 - mu) / sigma) ** 2)
dudt = (
-photons
* (ax * (x + 0.5 - mu) - bx * (x - 0.5 - mu))
* PSFc
/ (_np.sqrt(2.0 * _np.pi) * sigma**2)
)
d2udt2 = -2.0 * dudt / sigma - photons * (
ax * (x + 0.5 - mu) ** 3 - bx * (x - 0.5 - mu) ** 3
) * PSFc / (_np.sqrt(2.0 * _np.pi) * sigma**5)
return dudt, d2udt2
@_numba.jit(nopython=True, nogil=True)
def _derivative_gaussian_integral_2d_sigma(x, y, mu, nu, sigma, photons, PSFx, PSFy):
dSx, ddSx = _derivative_gaussian_integral_1d_sigma(x, mu, sigma, photons, PSFy)
dSy, ddSy = _derivative_gaussian_integral_1d_sigma(y, nu, sigma, photons, PSFx)
dudt = dSx + dSy
d2udt2 = ddSx + ddSy
return dudt, d2udt2
def _worker(
func,
spots,
thetas,
CRLBs,
likelihoods,
iterations,
eps,
max_it,
current,
lock,
):
N = len(spots)
while True:
with lock:
index = current[0]
if index == N:
return
current[0] += 1
func(spots, index, thetas, CRLBs, likelihoods, iterations, eps, max_it)
def gaussmle(spots, eps, max_it, method="sigma"):
N = len(spots)
thetas = _np.zeros((N, 6), dtype=_np.float32)
CRLBs = _np.inf * _np.ones((N, 6), dtype=_np.float32)
likelihoods = _np.zeros(N, dtype=_np.float32)
iterations = _np.zeros(N, dtype=_np.int32)
if method == "sigma":
func = _mlefit_sigma
elif method == "sigmaxy":
func = _mlefit_sigmaxy
else:
raise ValueError("Method not available.")
for i in range(N):
func(spots, i, thetas, CRLBs, likelihoods, iterations, eps, max_it)
return thetas, CRLBs, likelihoods, iterations
def gaussmle_async(spots, eps, max_it, method="sigma"):
N = len(spots)
thetas = _np.zeros((N, 6), dtype=_np.float32)
CRLBs = _np.inf * _np.ones((N, 6), dtype=_np.float32)
likelihoods = _np.zeros(N, dtype=_np.float32)
iterations = _np.zeros(N, dtype=_np.int32)
n_workers = max(1, int(0.75 * _multiprocessing.cpu_count()))
lock = _threading.Lock()
current = [0]
if method == "sigma":
func = _mlefit_sigma
elif method == "sigmaxy":
func = _mlefit_sigmaxy
else:
raise ValueError("Method not available.")
executor = _futures.ThreadPoolExecutor(n_workers)
for i in range(n_workers):
executor.submit(
_worker,
func,
spots,
thetas,
CRLBs,
likelihoods,
iterations,
eps,
max_it,
current,
lock,
)
executor.shutdown(wait=False)
# A synchronous single-threaded version for debugging:
# for i in range(N):
# print('Spot', i)
# func(spots, i, thetas, CRLBs, likelihoods, iterations, eps, max_it)
return current, thetas, CRLBs, likelihoods, iterations
@_numba.jit(nopython=True, nogil=True)
def _mlefit_sigma(spots, index, thetas, CRLBs, likelihoods, iterations, eps, max_it):
n_params = 5
spot = spots[index]
size, _ = spot.shape
# theta is [x, y, N, bg, S]
theta = _initial_theta_sigma(spot, size)
max_step = _np.zeros(n_params, dtype=_np.float32)
max_step[0:2] = theta[4]
max_step[2:4] = 0.1 * theta[2:4]
max_step[4] = 0.2 * theta[4]
# Memory allocation
# (we do that outside of the loops to avoid huge delays in threaded code):
dudt = _np.zeros(n_params, dtype=_np.float32)
d2udt2 = _np.zeros(n_params, dtype=_np.float32)
numerator = _np.zeros(n_params, dtype=_np.float32)
denominator = _np.zeros(n_params, dtype=_np.float32)
old_x = theta[0]
old_y = theta[1]
kk = 0
while (
kk < max_it
): # we do this instead of a for loop for the special case of max_it=0
kk += 1
numerator[:] = 0.0
denominator[:] = 0.0
for ii in range(size):
for jj in range(size):
PSFx = _gaussian_integral(ii, theta[0], theta[4])
PSFy = _gaussian_integral(jj, theta[1], theta[4])
# Derivatives
dudt[0], d2udt2[0] = _derivative_gaussian_integral(
ii, theta[0], theta[4], theta[2], PSFy
)
dudt[1], d2udt2[1] = _derivative_gaussian_integral(
jj, theta[1], theta[4], theta[2], PSFx
)
dudt[2] = PSFx * PSFy
d2udt2[2] = 0.0
dudt[3] = 1.0
d2udt2[3] = 0.0
dudt[4], d2udt2[4] = _derivative_gaussian_integral_2d_sigma(
ii, jj, theta[0], theta[1], theta[4], theta[2], PSFx, PSFy
)
model = theta[2] * dudt[2] + theta[3]
cf = df = 0.0
data = spot[ii, jj]
if model > 10e-3:
cf = data / model - 1
df = data / model**2
cf = _np.minimum(cf, 10e4)
df = _np.minimum(df, 10e4)
for ll in range(n_params):
numerator[ll] += cf * dudt[ll]
denominator[ll] += cf * d2udt2[ll] - df * dudt[ll] ** 2
# The update
for ll in range(n_params):
if denominator[ll] == 0.0:
update = _np.sign(numerator[ll] * max_step[ll])
else:
update = _np.minimum(
_np.maximum(numerator[ll] / denominator[ll], -max_step[ll]),
max_step[ll],
)
if kk < 5:
update *= GAMMA[ll]
theta[ll] -= update
# Other constraints
theta[2] = _np.maximum(theta[2], 1.0)
theta[3] = _np.maximum(theta[3], 0.01)
theta[4] = _np.maximum(theta[4], 0.01)
theta[4] = _np.minimum(theta[4], size)
# Check for convergence
if (_np.abs(old_x - theta[0]) < eps) and (_np.abs(old_y - theta[1]) < eps):
break
else:
old_x = theta[0]
old_y = theta[1]
thetas[index, 0:5] = theta
thetas[index, 5] = theta[4]
iterations[index] = kk
# Calculating the CRLB and LogLikelihood
Div = 0.0
M = _np.zeros((n_params, n_params), dtype=_np.float32)
for ii in range(size):
for jj in range(size):
PSFx = _gaussian_integral(ii, theta[0], theta[4])
PSFy = _gaussian_integral(jj, theta[1], theta[4])
model = theta[3] + theta[2] * PSFx * PSFy
# Calculating derivatives
dudt[0], d2udt2[0] = _derivative_gaussian_integral(
ii, theta[0], theta[4], theta[2], PSFy
)
dudt[1], d2udt2[1] = _derivative_gaussian_integral(
jj, theta[1], theta[4], theta[2], PSFx
)
dudt[4], d2udt2[4] = _derivative_gaussian_integral_2d_sigma(
ii, jj, theta[0], theta[1], theta[4], theta[2], PSFx, PSFy
)
dudt[2] = PSFx * PSFy
dudt[3] = 1.0
# Building the Fisher Information Matrix
model = theta[3] + theta[2] * dudt[2]
for kk in range(n_params):
for ll in range(kk, n_params):
M[kk, ll] += dudt[ll] * dudt[kk] / model
M[ll, kk] = M[kk, ll]
# LogLikelihood
if model > 0:
data = spot[ii, jj]
if data > 0:
Div += data * _np.log(model) - model - data * _np.log(data) + data
else:
Div += -model
likelihoods[index] = Div
# Matrix inverse (CRLB=F^-1)
Minv = _np.linalg.pinv(M)
CRLB = _np.zeros(n_params, dtype=_np.float32)
for kk in range(n_params):
CRLB[kk] = Minv[kk, kk]
CRLBs[index, 0:5] = CRLB
CRLBs[index, 5] = CRLB[4]
@_numba.jit(nopython=True, nogil=True)
def _mlefit_sigmaxy(spots, index, thetas, CRLBs, likelihoods, iterations, eps, max_it):
n_params = 6
spot = spots[index]
size, _ = spot.shape
# Initial values
# theta is [x, y, N, bg, Sx, Sy]
theta = _initial_theta_sigmaxy(spot, size)
max_step = _np.zeros(n_params, dtype=_np.float32)
max_step[0:2] = theta[4]
max_step[2:4] = 0.1 * theta[2:4]
max_step[4:6] = 0.2 * theta[4:6]
# Memory allocation
# (we do that outside of the loops to avoid huge delays in threaded code):
dudt = _np.zeros(n_params, dtype=_np.float32)
d2udt2 = _np.zeros(n_params, dtype=_np.float32)
numerator = _np.zeros(n_params, dtype=_np.float32)
denominator = _np.zeros(n_params, dtype=_np.float32)
old_x = theta[0]
old_y = theta[1]
old_sx = theta[4]
old_sy = theta[5]
kk = 0
while (
kk < max_it
): # we do this instead of a for loop for the special case of max_it=0
kk += 1
numerator[:] = 0.0
denominator[:] = 0.0
for ii in range(size):
for jj in range(size):
PSFx = _gaussian_integral(ii, theta[0], theta[4])
PSFy = _gaussian_integral(jj, theta[1], theta[5])
# Derivatives
dudt[0], d2udt2[0] = _derivative_gaussian_integral(
ii, theta[0], theta[4], theta[2], PSFy
)
dudt[1], d2udt2[1] = _derivative_gaussian_integral(
jj, theta[1], theta[5], theta[2], PSFx
)
dudt[2] = PSFx * PSFy
d2udt2[2] = 0.0
dudt[3] = 1.0
d2udt2[3] = 0.0
dudt[4], d2udt2[4] = _derivative_gaussian_integral_1d_sigma(
ii, theta[0], theta[4], theta[2], PSFy
)
dudt[5], d2udt2[5] = _derivative_gaussian_integral_1d_sigma(
jj, theta[1], theta[5], theta[2], PSFx
)
model = theta[2] * dudt[2] + theta[3]
cf = df = 0.0
data = spot[ii, jj]
if model > 10e-3:
cf = data / model - 1
df = data / model**2
cf = _np.minimum(cf, 10e4)
df = _np.minimum(df, 10e4)
for ll in range(n_params):
numerator[ll] += cf * dudt[ll]
denominator[ll] += cf * d2udt2[ll] - df * dudt[ll] ** 2
# The update
for ll in range(n_params):
if denominator[ll] == 0.0:
# This is case is not handled in Lidke's code
# but it seems to be a problem here
# (maybe due to many iterations)
theta[ll] -= GAMMA[ll] * _np.sign(numerator[ll]) * max_step[ll]
else:
theta[ll] -= GAMMA[ll] * _np.minimum(
_np.maximum(numerator[ll] / denominator[ll], -max_step[ll]),
max_step[ll],
)
# Other constraints
theta[2] = _np.maximum(theta[2], 1.0)
theta[3] = _np.maximum(theta[3], 0.01)
theta[4] = _np.maximum(theta[4], 0.01)
theta[5] = _np.maximum(theta[5], 0.01)
# Check for convergence
if _np.abs(old_x - theta[0]) < eps:
if _np.abs(old_y - theta[1]) < eps:
if _np.abs(old_sx - theta[4]) < eps:
if _np.abs(old_sy - theta[5]) < eps:
break
old_x = theta[0]
old_y = theta[1]
old_sx = theta[4]
old_sy = theta[5]
thetas[index] = theta
iterations[index] = kk
# Calculating the CRLB and LogLikelihood
Div = 0.0
M = _np.zeros((n_params, n_params), dtype=_np.float32)
for ii in range(size):
for jj in range(size):
PSFx = _gaussian_integral(ii, theta[0], theta[4])
PSFy = _gaussian_integral(jj, theta[1], theta[5])
model = theta[3] + theta[2] * PSFx * PSFy
# Calculating derivatives
dudt[0], d2udt2[0] = _derivative_gaussian_integral(
ii, theta[0], theta[4], theta[2], PSFy
)
dudt[1], d2udt2[1] = _derivative_gaussian_integral(
jj, theta[1], theta[5], theta[2], PSFx
)
dudt[4], d2udt2[4] = _derivative_gaussian_integral_1d_sigma(
ii, theta[0], theta[4], theta[2], PSFy
)
dudt[5], d2udt2[5] = _derivative_gaussian_integral_1d_sigma(
jj, theta[1], theta[5], theta[2], PSFx
)
dudt[2] = PSFx * PSFy
dudt[3] = 1.0
# Building the Fisher Information Matrix
model = theta[3] + theta[2] * dudt[2]
for kk in range(n_params):
for ll in range(kk, n_params):
M[kk, ll] += dudt[ll] * dudt[kk] / model
M[ll, kk] = M[kk, ll]
# LogLikelihood
if model > 0:
data = spot[ii, jj]
if data > 0:
Div += data * _np.log(model) - model - data * _np.log(data) + data
else:
Div += -model
likelihoods[index] = Div
# Matrix inverse (CRLB=F^-1)
Minv = _np.linalg.pinv(M)
CRLB = _np.zeros(n_params, dtype=_np.float32)
for kk in range(n_params):
CRLB[kk] = Minv[kk, kk]
CRLBs[index] = CRLB
def locs_from_fits(identifications, theta, CRLBs, likelihoods, iterations, box):
box_offset = int(box / 2)
y = theta[:, 0] + identifications.y - box_offset
x = theta[:, 1] + identifications.x - box_offset
with _np.errstate(invalid="ignore"):
lpy = _np.sqrt(CRLBs[:, 0])
lpx = _np.sqrt(CRLBs[:, 1])
a = _np.maximum(theta[:, 4], theta[:, 5])
b = _np.minimum(theta[:, 4], theta[:, 5])
ellipticity = (a - b) / a
if hasattr(identifications, "n_id"):
locs = _np.rec.array(
(
identifications.frame,
x,
y,
theta[:, 2],
theta[:, 4],
theta[:, 5],
theta[:, 3],
lpx,
lpy,
ellipticity,
identifications.net_gradient,
identifications.n_id,
),
dtype=[
("frame", "u4"),
("x", "f4"),
("y", "f4"),
("photons", "f4"),
("sx", "f4"),
("sy", "f4"),
("bg", "f4"),
("lpx", "f4"),
("lpy", "f4"),
("ellipticity", "f4"),
("net_gradient", "f4"),
("n_id", "u4"),
],
)
locs.sort(kind="mergesort", order="n_id")
else:
locs = _np.rec.array(
(
identifications.frame,
x,
y,
theta[:, 2],
theta[:, 4],
theta[:, 5],
theta[:, 3],
lpx,
lpy,
ellipticity,
identifications.net_gradient,
),
dtype=[
("frame", "u4"),
("x", "f4"),
("y", "f4"),
("photons", "f4"),
("sx", "f4"),
("sy", "f4"),
("bg", "f4"),
("lpx", "f4"),
("lpy", "f4"),
("ellipticity", "f4"),
("net_gradient", "f4"),
],
)
locs.sort(kind="mergesort", order="frame")
return locs
| mit | cfa1c392905be78a2b66ccb514143f59 | 31.062591 | 87 | 0.471467 | 3.095854 | false | false | false | false |
jungmannlab/picasso | picasso/gui/render.py | 1 | 419669 | """
gui/render
~~~~~~~~~~~~~~~~~~~~
Graphical user interface for rendering localization images
:author: Joerg Schnitzbauer & Maximilian Strauss
& Rafal Kowalewski, 2017-2022
:copyright: Copyright (c) 2017 Jungmann Lab, MPI of Biochemistry
"""
import os
import sys
import traceback
import copy
import time
import os.path
import importlib, pkgutil
from glob import glob
from math import ceil
# from icecream import ic
from functools import partial
import lmfit
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import yaml
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT \
as NavigationToolbar
from scipy.ndimage.filters import gaussian_filter
from numpy.lib.recfunctions import stack_arrays
from PyQt5 import QtCore, QtGui, QtWidgets
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.cluster import KMeans, DBSCAN
from collections import Counter
from tqdm import tqdm
import colorsys
from .. import imageprocess, io, lib, postprocess, render, clusterer
from .rotation import RotationWindow
# PyImarisWrite works on windows only
if sys.platform == "win32":
from .. ext.bitplane import IMSWRITER
if IMSWRITER:
from .. ext.bitplane import numpy_to_imaris
from PyImarisWriter.ImarisWriterCtypes import *
from PyImarisWriter import PyImarisWriter as PW
else:
IMSWRITER = False
try:
from hdbscan import HDBSCAN
HDBSCAN_IMPORTED = True
except:
HDBSCAN_IMPORTED = False
if sys.platform == "darwin": # plots do not work on mac os
matplotlib.use('agg')
matplotlib.rcParams.update({"axes.titlesize": "large"})
DEFAULT_OVERSAMPLING = 1.0
INITIAL_REL_MAXIMUM = 0.5
ZOOM = 9 / 7
N_GROUP_COLORS = 8
N_Z_COLORS = 32
def get_colors(n_channels):
"""
Creates a list with rgb channels for each locs channel.
Colors go from red to green, blue, pink and red again.
Parameters
----------
n_channels : int
Number of locs channels
Returns
-------
list
Contains tuples with rgb channels
"""
hues = np.arange(0, 1, 1 / n_channels)
colors = [colorsys.hsv_to_rgb(_, 1, 1) for _ in hues]
return colors
def is_hexadecimal(text):
"""
Checks if text represents a hexadecimal code for rgb, e.g. #ff02d4.
Parameters
----------
text : str
String to be checked
Returns
-------
boolean
True if text represents rgb, False otherwise
"""
allowed_characters = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f',
'A', 'B', 'C', 'D', 'E', 'F',
]
sum_char = 0
if type(text) == str:
if text[0] == '#':
if len(text) == 7:
for char in text[1:]:
if char in allowed_characters:
sum_char += 1
if sum_char == 6:
return True
return False
def fit_cum_exp(data):
"""
Returns an lmfit Model class fitted to a 3-parameter cumulative
exponential.
"""
data.sort()
n = len(data)
y = np.arange(1, n + 1)
data_min = data.min()
data_max = data.max()
params = lmfit.Parameters()
params.add("a", value=n, vary=True, min=0)
params.add("t", value=np.mean(data), vary=True, min=data_min, max=data_max)
params.add("c", value=data_min, vary=True, min=0)
result = lib.CumulativeExponentialModel.fit(y, params, x=data)
return result
def kinetic_rate_from_fit(data):
""" Finds the mean dark time from the lmfit fitted Model. """
if len(data) > 2:
if data.ptp() == 0:
rate = np.nanmean(data)
else:
result = fit_cum_exp(data)
rate = result.best_values["t"]
else:
rate = np.nanmean(data)
return rate
estimate_kinetic_rate = kinetic_rate_from_fit
def check_pick(f):
""" Decorator verifying if there is at least one pick. """
def wrapper(*args):
if len(args[0]._picks) == 0:
QtWidgets.QMessageBox.information(
args[0],
"Pick Error",
("No localizations picked." " Please pick first."),
)
else:
return f(args[0])
return wrapper
def check_picks(f):
""" Decorator verifying if there are at least two picks. """
def wrapper(*args):
if len(args[0]._picks) < 2:
QtWidgets.QMessageBox.information(
args[0],
"Pick Error",
(
"No localizations picked."
" Please pick at least twice first."
),
)
else:
return f(args[0])
return wrapper
class FloatEdit(QtWidgets.QLineEdit):
"""
A class used for manipulating the influx rate in the info dialog.
"""
valueChanged = QtCore.pyqtSignal(float)
def __init__(self):
super().__init__()
self.setSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred
)
self.editingFinished.connect(self.onEditingFinished)
def onEditingFinished(self):
value = self.value()
self.valueChanged.emit(value)
def setValue(self, value):
text = "{:.10e}".format(value)
self.setText(text)
def value(self):
text = self.text()
value = float(text)
return value
class GenericPlotWindow(QtWidgets.QTabWidget):
"""
A class used to display trace in a pick.
...
Attributes
----------
canvas : FigureCanvas
PyQt5 backend used for displaying plots
figure : plt.Figure
toolbar : NavigationToolbar2QT
PyQt5 backend used for displaying plot manipulation functions,
e.g., save, zoom.
"""
def __init__(self, window_title):
super().__init__()
self.setWindowTitle(window_title)
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "render.ico")
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.resize(1000, 500)
self.figure = plt.Figure()
self.canvas = FigureCanvas(self.figure)
vbox = QtWidgets.QVBoxLayout()
self.setLayout(vbox)
vbox.addWidget(self.canvas)
self.toolbar = NavigationToolbar(self.canvas, self)
vbox.addWidget(self.toolbar)
class PickHistWindow(QtWidgets.QTabWidget):
"""
A class to display binding kinetics plots.
...
Attributes
----------
canvas : FigureCanvas
PyQt5 backend used for displaying plots
figure : plt.Figure
toolbar : NavigationToolbar2QT
PyQt5 backend used for displaying plot manipulation functions,
e.g., save, zoom.
Methods
-------
plot(pooled_locs, fit_result_len, fit_result_dark)
Plots two histograms for experimental data and exponential fits
"""
def __init__(self, info_dialog):
super().__init__()
self.setWindowTitle("Pick Histograms")
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "render.ico")
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.resize(1000, 500)
self.figure = plt.Figure()
self.canvas = FigureCanvas(self.figure)
vbox = QtWidgets.QVBoxLayout()
self.setLayout(vbox)
vbox.addWidget(self.canvas)
vbox.addWidget((NavigationToolbar(self.canvas, self)))
def plot(self, pooled_locs, fit_result_len, fit_result_dark):
"""
Plots two histograms for experimental data and exponential fits.
Parameters
----------
pooled_locs : np.recarray
All picked localizations
fit_result_len : lmfit.Model
Fitted model of a 3-parameter cumulative exponential for
lenghts of each localization
fit_result_dark : lmfit.Model
Fitted model of a 3-parameter cumulative exponential
"""
self.figure.clear()
# Length
axes = self.figure.add_subplot(121)
a = fit_result_len.best_values["a"]
t = fit_result_len.best_values["t"]
c = fit_result_len.best_values["c"]
axes.set_title(
"Length (cumulative) \n"
r"$Fit: {:.2f}\cdot(1-exp(x/{:.2f}))+{:.2f}$".format(a, t, c)
)
data = pooled_locs.len
data.sort()
y = np.arange(1, len(data) + 1)
axes.semilogx(data, y, label="data")
axes.semilogx(data, fit_result_len.best_fit, label="fit")
axes.legend(loc="best")
axes.set_xlabel("Duration (frames)")
axes.set_ylabel("Frequency")
# Dark
axes = self.figure.add_subplot(122)
a = fit_result_dark.best_values["a"]
t = fit_result_dark.best_values["t"]
c = fit_result_dark.best_values["c"]
axes.set_title(
"Dark time (cumulative) \n"
r"$Fit: {:.2f}\cdot(1-exp(x/{:.2f}))+{:.2f}$".format(a, t, c)
)
data = pooled_locs.dark
data.sort()
y = np.arange(1, len(data) + 1)
axes.semilogx(data, y, label="data")
axes.semilogx(data, fit_result_dark.best_fit, label="fit")
axes.legend(loc="best")
axes.set_xlabel("Duration (frames)")
axes.set_ylabel("Frequency")
self.canvas.draw()
class ApplyDialog(QtWidgets.QDialog):
"""
A class for the Apply Dialog.
Apply expressions to manipulate localizations' display.
...
Attributes
----------
channel : QComboBox
Points to the index of the channel to be manipulated
cmd : QLineEdit
Enter the expression here
label : QLabel
Displays which locs properties can be manipulated
Methods
-------
getCmd(parent=None)
Used for obtaining the expression
update_vars(index)
Update the variables that can be manipulated and show them in
self.label
Examples
--------
The examples below are to be input in self.cmd (Expression):
x += 10
Move x coordinate 10 units to the right (pixels)
y -= 3
Move y coordinate 3 units upwards (pixels)
flip x z
Exchange x- and z-axes
spiral 2 3
Plot each localization over time in a spiral with radius 2
pixels and 3 turns
uspiral
Undo the last spiral action
"""
def __init__(self, window):
super().__init__(window)
self.window = window
# vars = self.view.locs[0].dtype.names
self.setWindowTitle("Apply expression")
vbox = QtWidgets.QVBoxLayout(self)
layout = QtWidgets.QGridLayout()
vbox.addLayout(layout)
layout.addWidget(QtWidgets.QLabel("Channel:"), 0, 0)
self.channel = QtWidgets.QComboBox()
self.channel.addItems(self.window.view.locs_paths)
layout.addWidget(self.channel, 0, 1)
self.channel.currentIndexChanged.connect(self.update_vars)
layout.addWidget(QtWidgets.QLabel("Pre-defined variables:"), 1, 0)
self.label = QtWidgets.QLabel()
layout.addWidget(self.label, 1, 1)
self.update_vars(0)
layout.addWidget(QtWidgets.QLabel("Expression:"), 2, 0)
self.cmd = QtWidgets.QLineEdit()
layout.addWidget(self.cmd, 2, 1)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
# OK and Cancel buttons
self.buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal,
self,
)
vbox.addWidget(self.buttons)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
@staticmethod
def getCmd(parent=None):
"""
Obtain the expression as a string and the channel to be
manipulated.
"""
dialog = ApplyDialog(parent)
result = dialog.exec_()
cmd = dialog.cmd.text()
channel = dialog.channel.currentIndex()
return (cmd, channel, result == QtWidgets.QDialog.Accepted)
def update_vars(self, index):
"""
Update the variables that can be manipulated and show them in
self.label
"""
vars = self.window.view.locs[index].dtype.names
self.label.setText(str(vars))
class DatasetDialog(QtWidgets.QDialog):
"""
A class to handle the Dataset Dialog:
Show legend, show white background.
Tick and untick, change title of, set color, set relative intensity,
and close each channel.
...
Attributes
----------
auto_display : QCheckBox
Tick to automatically adjust the rendered localizations. Untick
to not change the rendering of localizations
auto_colors : QCheckBox
Tick to automatically color each channel. Untick to manually
change colors.
checks : list
List with QPushButtons for ticking/unticking each channel
closebuttons : list
List of QPushButtons to close each channel
colordisp_all : list
List of QLabels showing the color selected for each channel
colorselection : list
List of QComboBoxes specifying the color displayed for each
channel
default_colors : list
List of strings specifying the default 14 colors
intensitysettings : list
List of QDoubleSpinBoxes specifying relative intensity of each
channel
legend : QCheckBox
Used to show/hide legend
rgbf : list
List of lists of 3 elements specifying the corresponding colors
as RGB channels
title : list
List of QPushButtons to change the title of each channel
warning : boolean
Used to memorize if the warning about multiple channels is to
be displayed
wbackground : QCheckBox
Used to (de)activate white background for multichannel or
to invert colors for single channel
window : Window(QMainWindow)
Main window instance
Methods
-------
add_entry(path)
Adds the new channel for the given path
change_title(button_name)
Opens QInputDialog to enter the new title for a given channel
close_file(i)
Closes a given channel and delets all corresponding attributes
load_colors()
Loads a list of colors from a .yaml file
save_colors()
Saves the list of colors as a .yaml file
set_color(n)
Sets colorsdisp_all and colorselection in the given channel
update_colors()
Changes colors in self.colordisp_all and updates the scene in
the main window
update_viewport()
Updates the scene in the main window
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Datasets")
self.setModal(False)
self.layout = QtWidgets.QGridLayout()
self.warning = True
self.checks = []
self.title = []
self.closebuttons = []
self.colorselection = []
self.colordisp_all = []
self.intensitysettings = []
self.setLayout(self.layout)
self.legend = QtWidgets.QCheckBox("Show legend")
self.wbackground = QtWidgets.QCheckBox(
"Invert colors / white background"
)
self.auto_display = QtWidgets.QCheckBox("Automatic display update")
self.auto_display.setChecked(True)
self.auto_colors = QtWidgets.QCheckBox("Automatic coloring")
self.layout.addWidget(self.legend, 0, 0)
self.layout.addWidget(self.auto_display, 1, 0)
self.layout.addWidget(self.wbackground, 2, 0)
self.layout.addWidget(self.auto_colors, 3, 0)
self.layout.addWidget(QtWidgets.QLabel("Files"), 4, 0)
self.layout.addWidget(QtWidgets.QLabel("Change title"), 4, 1)
self.layout.addWidget(QtWidgets.QLabel("Color"), 4, 2)
self.layout.addWidget(QtWidgets.QLabel(""), 4, 3)
self.layout.addWidget(QtWidgets.QLabel("Rel. Intensity"), 4, 4)
self.layout.addWidget(QtWidgets.QLabel("Close"), 4, 5)
self.legend.stateChanged.connect(self.update_viewport)
self.wbackground.stateChanged.connect(self.update_viewport)
self.auto_display.stateChanged.connect(self.update_viewport)
self.auto_colors.stateChanged.connect(self.update_colors)
# save and load color list
save_button = QtWidgets.QPushButton("Save colors")
self.layout.addWidget(
save_button, 0, self.layout.columnCount() - 2, 1, 2
)
save_button.setFocusPolicy(QtCore.Qt.NoFocus)
save_button.clicked.connect(self.save_colors)
load_button = QtWidgets.QPushButton("Load colors")
self.layout.addWidget(
load_button, 1, self.layout.columnCount() - 2, 1, 2
)
load_button.setFocusPolicy(QtCore.Qt.NoFocus)
load_button.clicked.connect(self.load_colors)
self.default_colors = [
"red",
"cyan",
"green",
"yellow",
"blue",
"magenta",
"orange",
"amethyst",
"forestgreen",
"carmine",
"purple",
"sage",
"jade",
"azure",
]
self.rgbf = [
[1, 0, 0],
[0, 1, 1],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 0.5, 0],
[0.5, 0.5, 1],
[0, 0.5, 0],
[0.5, 0, 0],
[0.5, 0, 1],
[0.5, 0.5, 0],
[0, 0.5, 0.5],
[0, 0.5, 1],
]
def add_entry(self, path):
""" Adds the new channel for the given path. """
# display only the characters after the last '/'
# for a long path
if len(path) > 40:
path = os.path.basename(path)
path, ext = os.path.splitext(path)
# Create 3 buttons for checking, naming and closing the channel
c = QtWidgets.QCheckBox(path)
currentline = self.layout.rowCount()
t = QtWidgets.QPushButton("#")
t.setObjectName(str(currentline))
p = QtWidgets.QPushButton("x")
p.setObjectName(str(currentline))
# Append and setup the buttons
self.checks.append(c)
self.checks[-1].setChecked(True)
self.checks[-1].stateChanged.connect(self.update_viewport)
self.title.append(t)
self.title[-1].setAutoDefault(False)
self.title[-1].clicked.connect(
partial(self.change_title, t.objectName())
)
self.closebuttons.append(p)
self.closebuttons[-1].setAutoDefault(False)
self.closebuttons[-1].clicked.connect(
partial(self.close_file, p.objectName())
)
# create the self.colorselection widget
colordrop = QtWidgets.QComboBox(self)
colordrop.setEditable(True)
colordrop.lineEdit().setMaxLength(12)
for color in self.default_colors:
colordrop.addItem(color)
index = np.min([len(self.checks)-1, len(self.rgbf)-1])
colordrop.setCurrentText(self.default_colors[index])
colordrop.activated.connect(self.update_colors)
self.colorselection.append(colordrop)
self.colorselection[-1].currentIndexChanged.connect(
partial(self.set_color, t.objectName())
)
# create the label widget to show current color
colordisp = QtWidgets.QLabel(" ")
palette = colordisp.palette()
if self.auto_colors.isChecked():
colors = get_colors(len(self.checks) + 1)
r, g, b = colors[-1]
palette.setColor(
QtGui.QPalette.Window,
QtGui.QColor.fromRgbF(r, g, b, 1)
)
else:
palette.setColor(
QtGui.QPalette.Window,
QtGui.QColor.fromRgbF(
self.rgbf[index][0],
self.rgbf[index][1],
self.rgbf[index][2],
1,
)
)
colordisp.setAutoFillBackground(True)
colordisp.setPalette(palette)
self.colordisp_all.append(colordisp)
# create the relative intensity widget
intensity = QtWidgets.QDoubleSpinBox(self)
intensity.setKeyboardTracking(False)
intensity.setDecimals(2)
intensity.setValue(1.00)
self.intensitysettings.append(intensity)
self.intensitysettings[-1].valueChanged.connect(self.update_viewport)
# add all the widgets to the Dataset Dialog
self.layout.addWidget(c, currentline, 0)
self.layout.addWidget(t, currentline, 1)
self.layout.addWidget(colordrop, currentline, 2)
self.layout.addWidget(colordisp, currentline, 3)
self.layout.addWidget(intensity, currentline, 4)
self.layout.addWidget(p, currentline, 5)
# check if the number of channels surpassed the number of
# default colors
if len(self.checks) == len(self.default_colors):
if self.warning:
text = (
"The number of channels passed the number of default "
" colors. In case you would like to use your own color, "
" please insert the color's hexadecimal expression,"
" starting with '#', e.g. '#ffcdff' for pink or choose"
" the automatic coloring in the Files dialog."
)
QtWidgets.QMessageBox.information(self, "Warning", text)
self.warning = False
def update_colors(self):
"""
Changes colors in self.colordisp_all and updates the scene in
the main window
"""
n_channels = len(self.checks)
for i in range(n_channels):
self.set_color(i)
self.update_viewport()
def change_title(self, button_name):
"""
Opens QInputDialog to enter the new title for a given channel.
"""
for i in range(len(self.title)):
if button_name == self.title[i].objectName():
new_title, ok = QtWidgets.QInputDialog.getText(
self,
"Set the new title",
'Type "reset" to get the original title.'
)
if ok:
if new_title == "Reset" or new_title == "reset":
path = self.window.view.locs_paths[i]
if len(path) > 40:
path = os.path.basename(path)
new_title, ext = os.path.splitext(path)
self.checks[i].setText(new_title)
else:
self.checks[i].setText(new_title)
self.update_viewport()
# change size of the dialog
self.adjustSize()
# change name in the fast render dialog
self.window.fast_render_dialog.channel.setItemText(
i+1, new_title
)
break
def close_file(self, i, render=True):
"""
Closes a given channel and delets all corresponding attributes.
"""
if type(i) == str:
for j in range(len(self.closebuttons)):
if i == self.closebuttons[j].objectName():
i = j
# restart the main window if the last channel is closed
if len(self.closebuttons) == 1:
self.window.remove_locs()
else:
# remove widgets from the Dataset Dialog
self.layout.removeWidget(self.checks[i])
self.layout.removeWidget(self.title[i])
self.layout.removeWidget(self.colorselection[i])
self.layout.removeWidget(self.colordisp_all[i])
self.layout.removeWidget(self.intensitysettings[i])
self.layout.removeWidget(self.closebuttons[i])
# delete the widgets from the lists
del self.checks[i]
del self.title[i]
del self.colorselection[i]
del self.colordisp_all[i]
del self.intensitysettings[i]
del self.closebuttons[i]
# delete all the View attributes
del self.window.view.locs[i]
del self.window.view.locs_paths[i]
del self.window.view.infos[i]
del self.window.view.index_blocks[i]
# delete zcoord from slicer dialog
try:
self.window.slicer_dialog.zcoord[i]
except:
pass
# delete attributes from the fast render dialog
del self.window.view.all_locs[i]
self.window.fast_render_dialog.on_file_closed(i)
# adjust group color if needed
if len(self.window.view.locs) == 1:
if hasattr(self.window.view.locs[0], "group"):
self.window.view.group_color = (
self.window.view.get_group_color(
self.window.view.locs[0]
)
)
# delete drift data if provided
try:
del self._drift[i]
del self._driftfiles[i]
del self.currentdrift[i]
except:
pass
# update the window and adjust the size of the
# Dataset Dialog
if render:
self.update_viewport()
self.adjustSize()
def update_viewport(self):
""" Updates the scene in the main window. """
if self.auto_display.isChecked():
if self.window.view.viewport:
self.window.view.update_scene()
def set_color(self, n):
"""
Sets colorsdisp_all and colorselection in the given channel.
"""
if type(n) == str:
for j in range(len(self.title)):
if n == self.title[j].objectName():
n = j
palette = self.colordisp_all[n].palette()
color = self.colorselection[n].currentText()
if self.auto_colors.isChecked():
n_channels = len(self.checks)
r, g, b = get_colors(n_channels)[n]
palette.setColor(
QtGui.QPalette.Window,
QtGui.QColor.fromRgbF(r, g, b, 1)
)
elif is_hexadecimal(color):
color = color.lstrip("#")
r, g, b = tuple(
int(color[i: i + 2], 16) / 255 for i in (0, 2, 4)
)
palette.setColor(
QtGui.QPalette.Window, QtGui.QColor.fromRgbF(r, g, b, 1))
elif color in self.default_colors:
i = self.default_colors.index(color)
palette.setColor(
QtGui.QPalette.Window,
QtGui.QColor.fromRgbF(
self.rgbf[i][0],
self.rgbf[i][1],
self.rgbf[i][2], 1
)
)
self.colordisp_all[n].setPalette(palette)
def save_colors(self):
""" Saves the list of colors as a .yaml file. """
colornames = [_.currentText() for _ in self.colorselection]
out_path = self.window.view.locs_paths[0].replace(
".hdf5", "_colors.txt"
)
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Save colors to", out_path, filter="*txt"
)
if path:
with open(path, "w") as file:
for color in colornames:
file.write(color + "\n")
def load_colors(self):
""" Loads a list of colors from a .yaml file. """
path, ext = QtWidgets.QFileDialog.getOpenFileName(
self,
"Load colors from .txt",
directory=self.window.pwd,
filter="*.txt",
)
if path:
with open(path, "r") as file:
colors = file.readlines()
colornames = [color.rstrip() for color in colors]
# check that the number of channels is smaller than
# or equal to the number of color names in the .txt
if len(self.checks) > len(colornames):
raise ValueError("Txt file contains too few names")
# check that all the names are valid
for i, color in enumerate(colornames):
if (
not color in self.default_colors
and not is_hexadecimal(color)
):
raise ValueError(
f"'{color}' at position {i+1} is invalid."
)
# add the names to the 'Color' column (self.colorseletion)
for i, color_ in enumerate(self.colorselection):
color_.setCurrentText(colornames[i])
self.update_colors()
class PlotDialog(QtWidgets.QDialog):
"""
A class to plot a 3D scatter of picked localizations.
Allows the user to keep the given picks of remove them.
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Structure")
layout_grid = QtWidgets.QGridLayout(self)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.label = QtWidgets.QLabel()
layout_grid.addWidget(self.label, 0, 0, 1, 3)
layout_grid.addWidget(self.canvas, 1, 0, 1, 3)
# OK and Cancel buttons
self.buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Yes
| QtWidgets.QDialogButtonBox.No
| QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal,
self,
)
layout_grid.addWidget(self.buttons)
self.buttons.button(QtWidgets.QDialogButtonBox.Yes).clicked.connect(
self.on_accept
)
self.buttons.button(QtWidgets.QDialogButtonBox.No).clicked.connect(
self.on_reject
)
self.buttons.button(QtWidgets.QDialogButtonBox.Cancel).clicked.connect(
self.on_cancel
)
def on_accept(self):
self.setResult(1)
self.result = 1
self.close()
def on_reject(self):
self.setResult(0)
self.result = 0
self.close()
def on_cancel(self):
self.setResult(2)
self.result = 2
self.close()
@staticmethod
def getParams(all_picked_locs, current, length, mode, color_sys):
"""
Plots the 3D scatter and returns the clicked button.
mode == 0 means that the locs in picks are combined.
mode == 1 means that locs from a given channel are plotted.
"""
dialog = PlotDialog(None)
fig = dialog.figure
ax = fig.add_subplot(111, projection="3d")
dialog.label.setText(
"3D Scatterplot of pick {} of {}.".format(current + 1, length)
)
if mode == 1:
locs = all_picked_locs[current]
locs = stack_arrays(locs, asrecarray=True, usemask=False)
colors = locs["z"][:]
colors[
colors > np.mean(locs["z"]) + 3 * np.std(locs["z"])
] = np.mean(locs["z"]) + 3 * np.std(locs["z"])
colors[
colors < np.mean(locs["z"]) - 3 * np.std(locs["z"])
] = np.mean(locs["z"]) - 3 * np.std(locs["z"])
ax.scatter(locs["x"], locs["y"], locs["z"], c=colors, cmap="jet", s=2)
ax.set_xlabel("X [Px]")
ax.set_ylabel("Y [Px]")
ax.set_zlabel("Z [Px]")
ax.set_xlim(
np.mean(locs["x"]) - 3 * np.std(locs["x"]),
np.mean(locs["x"]) + 3 * np.std(locs["x"]),
)
ax.set_ylim(
np.mean(locs["y"]) - 3 * np.std(locs["y"]),
np.mean(locs["y"]) + 3 * np.std(locs["y"]),
)
ax.set_zlim(
np.mean(locs["z"]) - 3 * np.std(locs["z"]),
np.mean(locs["z"]) + 3 * np.std(locs["z"]),
)
plt.gca().patch.set_facecolor("black")
ax.w_xaxis.set_pane_color((0, 0, 0, 1.0))
ax.w_yaxis.set_pane_color((0, 0, 0, 1.0))
ax.w_zaxis.set_pane_color((0, 0, 0, 1.0))
else:
colors = color_sys
for l in range(len(all_picked_locs)):
locs = all_picked_locs[l][current]
locs = stack_arrays(locs, asrecarray=True, usemask=False)
ax.scatter(locs["x"], locs["y"], locs["z"], c=colors[l], s=2)
ax.set_xlim(
np.mean(locs["x"]) - 3 * np.std(locs["x"]),
np.mean(locs["x"]) + 3 * np.std(locs["x"]),
)
ax.set_ylim(
np.mean(locs["y"]) - 3 * np.std(locs["y"]),
np.mean(locs["y"]) + 3 * np.std(locs["y"]),
)
ax.set_zlim(
np.mean(locs["z"]) - 3 * np.std(locs["z"]),
np.mean(locs["z"]) + 3 * np.std(locs["z"]),
)
ax.set_xlabel("X [Px]")
ax.set_ylabel("Y [Px]")
ax.set_zlabel("Z [Px]")
plt.gca().patch.set_facecolor("black")
ax.w_xaxis.set_pane_color((0, 0, 0, 1.0))
ax.w_yaxis.set_pane_color((0, 0, 0, 1.0))
ax.w_zaxis.set_pane_color((0, 0, 0, 1.0))
result = dialog.exec_()
return dialog.result
class PlotDialogIso(QtWidgets.QDialog):
"""
A class to plot 4 scatter plots: XY, XZ and YZ projections and a
3D plot.
Allows the user to keep the given picks of remove them.
Everything but the getParams method is identical to PlotDialog.
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Structure")
layout_grid = QtWidgets.QGridLayout(self)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.label = QtWidgets.QLabel()
layout_grid.addWidget(self.label, 0, 0, 1, 3)
layout_grid.addWidget(self.canvas, 1, 0, 1, 3)
# OK and Cancel buttons
self.buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Yes
| QtWidgets.QDialogButtonBox.No
| QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal,
self,
)
layout_grid.addWidget(self.buttons)
self.buttons.button(QtWidgets.QDialogButtonBox.Yes).clicked.connect(
self.on_accept
)
self.buttons.button(QtWidgets.QDialogButtonBox.No).clicked.connect(
self.on_reject
)
self.buttons.button(QtWidgets.QDialogButtonBox.Cancel).clicked.connect(
self.on_cancel
)
def on_accept(self):
self.setResult(1)
self.result = 1
self.close()
def on_reject(self):
self.setResult(0)
self.result = 0
self.close()
def on_cancel(self):
self.setResult(2)
self.result = 2
self.close()
@staticmethod
def getParams(all_picked_locs, current, length, mode, color_sys):
"""
Plots the 3D scatter and 3 projections and returns the clicked
button.
mode == 0 means that the locs in picks are combined.
mode == 1 means that locs from a given channel are plotted.
"""
dialog = PlotDialogIso(None)
fig = dialog.figure
ax = fig.add_subplot(221, projection="3d")
dialog.label.setText(
"3D Scatterplot of pick {} of {}.".format(current + 1, length)
)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
if mode == 1:
locs = all_picked_locs[current]
locs = stack_arrays(locs, asrecarray=True, usemask=False)
colors = locs["z"][:]
colors[
colors > np.mean(locs["z"]) + 3 * np.std(locs["z"])
] = np.mean(locs["z"]) + 3 * np.std(locs["z"])
colors[
colors < np.mean(locs["z"]) - 3 * np.std(locs["z"])
] = np.mean(locs["z"]) - 3 * np.std(locs["z"])
ax.scatter(locs["x"], locs["y"], locs["z"], c=colors, cmap="jet", s=2)
ax.set_xlabel("X [Px]")
ax.set_ylabel("Y [Px]")
ax.set_zlabel("Z [Px]")
ax.set_xlim(
np.mean(locs["x"]) - 3 * np.std(locs["x"]),
np.mean(locs["x"]) + 3 * np.std(locs["x"]),
)
ax.set_ylim(
np.mean(locs["y"]) - 3 * np.std(locs["y"]),
np.mean(locs["y"]) + 3 * np.std(locs["y"]),
)
ax.set_zlim(
np.mean(locs["z"]) - 3 * np.std(locs["z"]),
np.mean(locs["z"]) + 3 * np.std(locs["z"]),
)
ax.set_title("3D")
# plt.gca().patch.set_facecolor('black')
ax.w_xaxis.set_pane_color((0, 0, 0, 1.0))
ax.w_yaxis.set_pane_color((0, 0, 0, 1.0))
ax.w_zaxis.set_pane_color((0, 0, 0, 1.0))
# AXES 2
ax2.scatter(locs["x"], locs["y"], c=colors, cmap="jet", s=2)
ax2.set_xlabel("X [Px]")
ax2.set_ylabel("Y [Px]")
ax2.set_xlim(
np.mean(locs["x"]) - 3 * np.std(locs["x"]),
np.mean(locs["x"]) + 3 * np.std(locs["x"]),
)
ax2.set_ylim(
np.mean(locs["y"]) - 3 * np.std(locs["y"]),
np.mean(locs["y"]) + 3 * np.std(locs["y"]),
)
ax2.set_title("XY")
ax2.set_facecolor("black")
# AXES 3
ax3.scatter(locs["x"], locs["z"], c=colors, cmap="jet", s=2)
ax3.set_xlabel("X [Px]")
ax3.set_ylabel("Z [Px]")
ax3.set_xlim(
np.mean(locs["x"]) - 3 * np.std(locs["x"]),
np.mean(locs["x"]) + 3 * np.std(locs["x"]),
)
ax3.set_ylim(
np.mean(locs["z"]) - 3 * np.std(locs["z"]),
np.mean(locs["z"]) + 3 * np.std(locs["z"]),
)
ax3.set_title("XZ")
ax3.set_facecolor("black")
# AXES 4
ax4.scatter(locs["y"], locs["z"], c=colors, cmap="jet", s=2)
ax4.set_xlabel("Y [Px]")
ax4.set_ylabel("Z [Px]")
ax4.set_xlim(
np.mean(locs["y"]) - 3 * np.std(locs["y"]),
np.mean(locs["y"]) + 3 * np.std(locs["y"]),
)
ax4.set_ylim(
np.mean(locs["z"]) - 3 * np.std(locs["z"]),
np.mean(locs["z"]) + 3 * np.std(locs["z"]),
)
ax4.set_title("YZ")
ax4.set_facecolor("black")
else:
colors = color_sys
for l in range(len(all_picked_locs)):
locs = all_picked_locs[l][current]
locs = stack_arrays(locs, asrecarray=True, usemask=False)
ax.scatter(locs["x"], locs["y"], locs["z"], c=colors[l], s=2)
ax2.scatter(locs["x"], locs["y"], c=colors[l], s=2)
ax3.scatter(locs["x"], locs["z"], c=colors[l], s=2)
ax4.scatter(locs["y"], locs["z"], c=colors[l], s=2)
ax.set_xlim(
np.mean(locs["x"]) - 3 * np.std(locs["x"]),
np.mean(locs["x"]) + 3 * np.std(locs["x"]),
)
ax.set_ylim(
np.mean(locs["y"]) - 3 * np.std(locs["y"]),
np.mean(locs["y"]) + 3 * np.std(locs["y"]),
)
ax.set_zlim(
np.mean(locs["z"]) - 3 * np.std(locs["z"]),
np.mean(locs["z"]) + 3 * np.std(locs["z"]),
)
ax.set_xlabel("X [Px]")
ax.set_ylabel("Y [Px]")
ax.set_zlabel("Z [Px]")
ax.w_xaxis.set_pane_color((0, 0, 0, 1.0))
ax.w_yaxis.set_pane_color((0, 0, 0, 1.0))
ax.w_zaxis.set_pane_color((0, 0, 0, 1.0))
# AXES 2
ax2.set_xlabel("X [Px]")
ax2.set_ylabel("Y [Px]")
ax2.set_xlim(
np.mean(locs["x"]) - 3 * np.std(locs["x"]),
np.mean(locs["x"]) + 3 * np.std(locs["x"]),
)
ax2.set_ylim(
np.mean(locs["y"]) - 3 * np.std(locs["y"]),
np.mean(locs["y"]) + 3 * np.std(locs["y"]),
)
ax2.set_title("XY")
ax2.set_facecolor("black")
# AXES 3
ax3.set_xlabel("X [Px]")
ax3.set_ylabel("Z [Px]")
ax3.set_xlim(
np.mean(locs["x"]) - 3 * np.std(locs["x"]),
np.mean(locs["x"]) + 3 * np.std(locs["x"]),
)
ax3.set_ylim(
np.mean(locs["z"]) - 3 * np.std(locs["z"]),
np.mean(locs["z"]) + 3 * np.std(locs["z"]),
)
ax3.set_title("XZ")
ax3.set_facecolor("black")
# AXES 4
ax4.set_xlabel("Y [Px]")
ax4.set_ylabel("Z [Px]")
ax4.set_xlim(
np.mean(locs["y"]) - 3 * np.std(locs["y"]),
np.mean(locs["y"]) + 3 * np.std(locs["y"]),
)
ax4.set_ylim(
np.mean(locs["z"]) - 3 * np.std(locs["z"]),
np.mean(locs["z"]) + 3 * np.std(locs["z"]),
)
ax4.set_title("YZ")
ax4.set_facecolor("black")
result = dialog.exec_()
return dialog.result
class ClsDlg3D(QtWidgets.QDialog):
"""
A class to cluster picked locs with k-means clustering in 3D.
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Structure")
self.showMaximized()
self.layout_grid = QtWidgets.QGridLayout(self)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.label = QtWidgets.QLabel()
self.layout_grid.addWidget(self.label, 0, 0, 1, 5)
self.layout_grid.addWidget(self.canvas, 1, 0, 8, 5)
self.buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Yes
| QtWidgets.QDialogButtonBox.No
| QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal,
self,
)
self.layout_grid.addWidget(self.buttons, 10, 0, 1, 3)
self.layout_grid.addWidget(QtWidgets.QLabel("No clusters:"), 10, 3, 1, 1)
self.n_clusters_spin = QtWidgets.QSpinBox()
self.layout_grid.addWidget(self.n_clusters_spin, 10, 4, 1, 1)
self.buttons.button(QtWidgets.QDialogButtonBox.Yes).clicked.connect(
self.on_accept
)
self.buttons.button(QtWidgets.QDialogButtonBox.No).clicked.connect(
self.on_reject
)
self.buttons.button(QtWidgets.QDialogButtonBox.Cancel).clicked.connect(
self.on_cancel
)
self.start_clusters = 0
self.n_clusters_spin.valueChanged.connect(self.on_cluster)
self.n_lines = 12
self.layout_grid.addWidget(QtWidgets.QLabel("Select"), 11, 4, 1, 1)
self.layout_grid.addWidget(QtWidgets.QLabel("X-Center"), 11, 0, 1, 1)
self.layout_grid.addWidget(QtWidgets.QLabel("Y-Center"), 11, 1, 1, 1)
self.layout_grid.addWidget(QtWidgets.QLabel("Z-Center"), 11, 2, 1, 1)
self.layout_grid.addWidget(QtWidgets.QLabel("Counts"), 11, 3, 1, 1)
self.checks = []
def add_clusters(self, element, x_mean, y_mean, z_mean):
c = QtWidgets.QCheckBox(str(element[0] + 1))
self.layout_grid.addWidget(c, self.n_lines, 4, 1, 1)
self.layout_grid.addWidget(
QtWidgets.QLabel(str(x_mean)), self.n_lines, 0, 1, 1
)
self.layout_grid.addWidget(
QtWidgets.QLabel(str(y_mean)), self.n_lines, 1, 1, 1
)
self.layout_grid.addWidget(
QtWidgets.QLabel(str(z_mean)), self.n_lines, 2, 1, 1
)
self.layout_grid.addWidget(
QtWidgets.QLabel(str(element[1])), self.n_lines, 3, 1, 1
)
self.n_lines += 1
self.checks.append(c)
self.checks[-1].setChecked(True)
def on_accept(self):
self.setResult(1)
self.result = 1
self.close()
def on_reject(self):
self.setResult(0)
self.result = 0
self.close()
def on_cancel(self):
self.setResult(2)
self.result = 2
self.close()
def on_cluster(self):
if (
self.n_clusters_spin.value() != self.start_clusters
): # only execute once the cluster number is changed
self.setResult(3)
self.result = 3
self.close()
@staticmethod
def getParams(
all_picked_locs, current, length, n_clusters, color_sys, pixelsize
):
dialog = ClsDlg3D(None)
dialog.start_clusters = n_clusters
dialog.n_clusters_spin.setValue(n_clusters)
fig = dialog.figure
ax1 = fig.add_subplot(121, projection="3d")
ax2 = fig.add_subplot(122, projection="3d")
dialog.label.setText(
"3D Scatterplot of Pick "
+ str(current + 1)
+ " of: "
+ str(length)
+ "."
)
print("Mode 1")
locs = all_picked_locs[current]
locs = stack_arrays(locs, asrecarray=True, usemask=False)
est = KMeans(n_clusters=n_clusters)
scaled_locs = lib.append_to_rec(
locs, locs["x"] * pixelsize, "x_scaled"
)
scaled_locs = lib.append_to_rec(
scaled_locs, locs["y"] * pixelsize, "y_scaled"
)
X = np.asarray(scaled_locs["x_scaled"])
Y = np.asarray(scaled_locs["y_scaled"])
Z = np.asarray(scaled_locs["z"])
est.fit(np.stack((X, Y, Z), axis=1))
labels = est.labels_
counts = list(Counter(labels).items())
# l_locs = lib.append_to_rec(l_locs,labels,'cluster')
ax1.scatter(locs["x"], locs["y"], locs["z"], c=labels.astype(np.float), s=2)
ax1.set_xlabel("X")
ax1.set_ylabel("Y")
ax1.set_zlabel("Z")
counts = list(Counter(labels).items())
cent = est.cluster_centers_
ax2.scatter(cent[:, 0], cent[:, 1], cent[:, 2], s=2)
for element in counts:
x_mean = cent[element[0], 0]
y_mean = cent[element[0], 1]
z_mean = cent[element[0], 2]
dialog.add_clusters(element, x_mean, y_mean, z_mean)
ax2.text(x_mean, y_mean, z_mean, element[1], fontsize=12)
ax1.set_xlabel("X [Px]")
ax1.set_ylabel("Y [Px]")
ax1.set_zlabel("Z [Px]")
ax2.set_xlabel("X [nm]")
ax2.set_ylabel("Y [nm]")
ax2.set_zlabel("Z [nm]")
ax1.w_xaxis.set_pane_color((0, 0, 0, 1.0))
ax1.w_yaxis.set_pane_color((0, 0, 0, 1.0))
ax1.w_zaxis.set_pane_color((0, 0, 0, 1.0))
plt.gca().patch.set_facecolor("black")
result = dialog.exec_()
checks = [not _.isChecked() for _ in dialog.checks]
checks = np.asarray(np.where(checks)) + 1
checks = checks[0]
labels += 1
labels = [0 if x in checks else x for x in labels]
labels = np.asarray(labels)
l_locs = lib.append_to_rec(scaled_locs, labels, "cluster")
l_locs_new_group = l_locs.copy()
power = np.round(n_clusters / 10) + 1
l_locs_new_group["group"] = (
l_locs_new_group["group"] * 10 ** power
+ l_locs_new_group["cluster"]
)
# Combine clustered locs
clustered_locs = []
for element in np.unique(labels):
if element != 0:
clustered_locs.append(
l_locs_new_group[l_locs["cluster"] == element]
)
return (
dialog.result,
dialog.n_clusters_spin.value(),
l_locs,
clustered_locs,
)
class ClsDlg2D(QtWidgets.QDialog):
"""
A class to cluster picked locs with k-means clustering in 2D.
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Structure")
self.layout_grid = QtWidgets.QGridLayout(self)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.label = QtWidgets.QLabel()
self.layout_grid.addWidget(self.label, 0, 0, 1, 5)
self.layout_grid.addWidget(self.canvas, 1, 0, 1, 5)
self.buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Yes
| QtWidgets.QDialogButtonBox.No
| QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal,
self,
)
self.layout_grid.addWidget(self.buttons, 2, 0, 1, 3)
self.layout_grid.addWidget(QtWidgets.QLabel("No clusters:"), 2, 3, 1, 1)
self.n_clusters_spin = QtWidgets.QSpinBox()
self.layout_grid.addWidget(self.n_clusters_spin, 2, 4, 1, 1)
self.buttons.button(QtWidgets.QDialogButtonBox.Yes).clicked.connect(
self.on_accept
)
self.buttons.button(QtWidgets.QDialogButtonBox.No).clicked.connect(
self.on_reject
)
self.buttons.button(QtWidgets.QDialogButtonBox.Cancel).clicked.connect(
self.on_cancel
)
self.start_clusters = 0
self.n_clusters_spin.valueChanged.connect(self.on_cluster)
self.n_lines = 4
self.layout_grid.addWidget(QtWidgets.QLabel("Select"), 3, 3, 1, 1)
self.layout_grid.addWidget(QtWidgets.QLabel("X-Center"), 3, 0, 1, 1)
self.layout_grid.addWidget(QtWidgets.QLabel("Y-Center"), 3, 1, 1, 1)
self.layout_grid.addWidget(QtWidgets.QLabel("Counts"), 3, 2, 1, 1)
self.checks = []
def add_clusters(self, element, x_mean, y_mean):
c = QtWidgets.QCheckBox(str(element[0] + 1))
self.layout_grid.addWidget(c, self.n_lines, 3, 1, 1)
self.layout_grid.addWidget(
QtWidgets.QLabel(str(x_mean)), self.n_lines, 0, 1, 1
)
self.layout_grid.addWidget(
QtWidgets.QLabel(str(y_mean)), self.n_lines, 1, 1, 1
)
self.layout_grid.addWidget(
QtWidgets.QLabel(str(element[1])), self.n_lines, 2, 1, 1
)
self.n_lines += 1
self.checks.append(c)
self.checks[-1].setChecked(True)
def on_accept(self):
self.setResult(1)
self.result = 1
self.close()
def on_reject(self):
self.setResult(0)
self.result = 0
self.close()
def on_cancel(self):
self.setResult(2)
self.result = 2
self.close()
def on_cluster(self):
if (
self.n_clusters_spin.value() != self.start_clusters
): # only execute once the cluster number is changed
self.setResult(3)
self.result = 3
self.close()
@staticmethod
def getParams(all_picked_locs, current, length, n_clusters, color_sys):
dialog = ClsDlg2D(None)
dialog.start_clusters = n_clusters
dialog.n_clusters_spin.setValue(n_clusters)
fig = dialog.figure
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
dialog.label.setText(
"2D Scatterplot of Pick "
+ str(current + 1)
+ " of: "
+ str(length)
+ "."
)
print("Mode 1")
locs = all_picked_locs[current]
locs = stack_arrays(locs, asrecarray=True, usemask=False)
est = KMeans(n_clusters=n_clusters)
scaled_locs = lib.append_to_rec(locs, locs["x"], "x_scaled")
scaled_locs = lib.append_to_rec(scaled_locs, locs["y"], "y_scaled")
X = np.asarray(scaled_locs["x_scaled"])
Y = np.asarray(scaled_locs["y_scaled"])
est.fit(np.stack((X, Y), axis=1))
labels = est.labels_
counts = list(Counter(labels).items())
# l_locs = lib.append_to_rec(l_locs,labels,'cluster')
ax1.scatter(locs["x"], locs["y"], c=labels.astype(np.float), s=2)
ax1.set_xlabel("X")
ax1.set_ylabel("Y")
counts = list(Counter(labels).items())
cent = est.cluster_centers_
ax2.scatter(cent[:, 0], cent[:, 1], s=2)
for element in counts:
x_mean = cent[element[0], 0]
y_mean = cent[element[0], 1]
dialog.add_clusters(element, x_mean, y_mean)
ax2.text(x_mean, y_mean, element[1], fontsize=12)
ax1.set_xlabel("X [Px]")
ax1.set_ylabel("Y [Px]")
ax2.set_xlabel("X [nm]")
ax2.set_ylabel("Y [nm]")
result = dialog.exec_()
checks = [not _.isChecked() for _ in dialog.checks]
checks = np.asarray(np.where(checks)) + 1
checks = checks[0]
labels += 1
labels = [0 if x in checks else x for x in labels]
labels = np.asarray(labels)
l_locs = lib.append_to_rec(scaled_locs, labels, "cluster")
l_locs_new_group = l_locs.copy()
power = np.round(n_clusters / 10) + 1
l_locs_new_group["group"] = (
l_locs_new_group["group"] * 10 ** power
+ l_locs_new_group["cluster"]
)
# Combine clustered locs
clustered_locs = []
for element in np.unique(labels):
if element != 0:
clustered_locs.append(
l_locs_new_group[l_locs["cluster"] == element]
)
return (
dialog.result,
dialog.n_clusters_spin.value(),
l_locs,
clustered_locs,
)
class LinkDialog(QtWidgets.QDialog):
"""
A class to obtain inputs for linking localizations.
...
Attributes
----------
max_dark_time : QDoubleSpinBox
contains the maximum gap between localizations (frames) to be
considered as belonging to the same group of linked locs
max_distance : QDoubleSpinBox
contains the maximum distance (pixels) between locs to be
considered as belonging to the same group of linked locs
Methods
-------
getParams(parent=None)
Creates the dialog and returns the requested values for linking
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Enter parameters")
vbox = QtWidgets.QVBoxLayout(self)
grid = QtWidgets.QGridLayout()
grid.addWidget(QtWidgets.QLabel("Max. distance (pixels):"), 0, 0)
self.max_distance = QtWidgets.QDoubleSpinBox()
self.max_distance.setRange(0, 1e6)
self.max_distance.setValue(1)
grid.addWidget(self.max_distance, 0, 1)
grid.addWidget(QtWidgets.QLabel("Max. transient dark frames:"), 1, 0)
self.max_dark_time = QtWidgets.QDoubleSpinBox()
self.max_dark_time.setRange(0, 1e9)
self.max_dark_time.setValue(1)
grid.addWidget(self.max_dark_time, 1, 1)
vbox.addLayout(grid)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
# OK and Cancel buttons
self.buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal,
self,
)
vbox.addWidget(self.buttons)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
@staticmethod
def getParams(parent=None):
"""
Creates the dialog and returns the requested values for
linking.
"""
dialog = LinkDialog(parent)
result = dialog.exec_()
return (
dialog.max_distance.value(),
dialog.max_dark_time.value(),
result == QtWidgets.QDialog.Accepted,
)
class DbscanDialog(QtWidgets.QDialog):
"""
A class to obtain inputs for DBSCAN.
See scikit-learn DBSCAN for more info.
...
Attributes
----------
density : QSpinBox
contains min_samples for DBSCAN (see scikit-learn)
radius : QDoubleSpinBox
contains epsilon (pixels) for DBSCAN (see scikit-learn)
Methods
-------
getParams(parent=None)
Creates the dialog and returns the requested values for DBSCAN
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Enter parameters")
vbox = QtWidgets.QVBoxLayout(self)
grid = QtWidgets.QGridLayout()
grid.addWidget(QtWidgets.QLabel("Radius (pixels):"), 0, 0)
self.radius = QtWidgets.QDoubleSpinBox()
self.radius.setRange(0.001, 1e6)
self.radius.setValue(0.1)
self.radius.setDecimals(3)
self.radius.setSingleStep(0.001)
grid.addWidget(self.radius, 0, 1)
grid.addWidget(QtWidgets.QLabel("Min. samples:"), 1, 0)
self.density = QtWidgets.QSpinBox()
self.density.setRange(1, 1e6)
self.density.setValue(4)
grid.addWidget(self.density, 1, 1)
vbox.addLayout(grid)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
# save cluster centers
self.save_centers = QtWidgets.QCheckBox("Save cluster centers")
self.save_centers.setChecked(False)
grid.addWidget(self.save_centers, 2, 0, 1, 2)
# OK and Cancel buttons
self.buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal,
self,
)
vbox.addWidget(self.buttons)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
@staticmethod
def getParams(parent=None):
"""
Creates the dialog and returns the requested values for DBSCAN.
"""
dialog = DbscanDialog(parent)
result = dialog.exec_()
return (
dialog.radius.value(),
dialog.density.value(),
dialog.save_centers.isChecked(),
result == QtWidgets.QDialog.Accepted,
)
class HdbscanDialog(QtWidgets.QDialog):
"""
A class to obtain inputs for HDBSCAN.
See https://hdbscan.readthedocs.io/en/latest/api.html#hdbscan
for more info.
...
Attributes
----------
cluster_eps : QDoubleSpinBox
contains cluster_selection_epsilon (pixels), see the website
min_cluster : QSpinBox
contains the minimum number of locs in a cluster
min_samples : QSpinBox
contains the number of locs in a neighbourhood for a loc to be
considered a core point, see the website
Methods
-------
getParams(parent=None)
Creates the dialog and returns the requested values for HDBSCAN
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Enter parameters")
vbox = QtWidgets.QVBoxLayout(self)
grid = QtWidgets.QGridLayout()
grid.addWidget(QtWidgets.QLabel("Min. cluster size:"), 0, 0)
self.min_cluster = QtWidgets.QSpinBox()
self.min_cluster.setRange(1, 1e6)
self.min_cluster.setValue(10)
grid.addWidget(self.min_cluster, 0, 1)
grid.addWidget(QtWidgets.QLabel("Min. samples:"), 1, 0)
self.min_samples = QtWidgets.QSpinBox()
self.min_samples.setRange(1, 1e6)
self.min_samples.setValue(10)
grid.addWidget(self.min_samples, 1, 1)
grid.addWidget(QtWidgets.QLabel(
"Intercluster max.\ndistance (pixels):"), 2, 0
)
self.cluster_eps = QtWidgets.QDoubleSpinBox()
self.cluster_eps.setRange(0, 1e6)
self.cluster_eps.setValue(0.0)
self.cluster_eps.setDecimals(3)
self.cluster_eps.setSingleStep(0.001)
grid.addWidget(self.cluster_eps, 2, 1)
vbox.addLayout(grid)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
# save cluster centers
self.save_centers = QtWidgets.QCheckBox("Save cluster centers")
self.save_centers.setChecked(False)
grid.addWidget(self.save_centers, 3, 0, 1, 2)
# OK and Cancel buttons
self.buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal,
self,
)
vbox.addWidget(self.buttons)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
@staticmethod
def getParams(parent=None):
"""
Creates the dialog and returns the requested values for
HDBSCAN.
"""
dialog = HdbscanDialog(parent)
result = dialog.exec_()
return (
dialog.min_cluster.value(),
dialog.min_samples.value(),
dialog.cluster_eps.value(),
dialog.save_centers.isChecked(),
result == QtWidgets.QDialog.Accepted,
)
class SMLMDialog3D(QtWidgets.QDialog):
"""
A class to obtain inputs for SMLM clusterer (3D).
...
Attributes
----------
radius_xy : QDoubleSpinBox
contains clustering radius in x and y directions
radius_z : QDoubleSpinBox
contains clustering radius in z direction
(typically =2*radius_xy)
min_locs : QSpinBox
contains minimum number of locs in cluster
Methods
-------
getParams(parent=None)
Creates the dialog and returns the requested values for
clustering
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Enter parameters (3D)")
vbox = QtWidgets.QVBoxLayout(self)
grid = QtWidgets.QGridLayout()
# radius xy
grid.addWidget(QtWidgets.QLabel("Cluster radius xy (pixels):"), 0, 0)
self.radius_xy = QtWidgets.QDoubleSpinBox()
self.radius_xy.setRange(0.0001, 1e3)
self.radius_xy.setDecimals(4)
self.radius_xy.setValue(0.1)
grid.addWidget(self.radius_xy, 0, 1)
# radius z
grid.addWidget(QtWidgets.QLabel("Cluster radius z (pixels):"), 1, 0)
self.radius_z = QtWidgets.QDoubleSpinBox()
self.radius_z.setRange(0, 1e3)
self.radius_z.setDecimals(4)
self.radius_z.setValue(0.25)
grid.addWidget(self.radius_z, 1, 1)
# min no. locs
grid.addWidget(QtWidgets.QLabel("Min. no. locs:"), 2, 0)
self.min_locs = QtWidgets.QSpinBox()
self.min_locs.setRange(1, 1e6)
self.min_locs.setValue(10)
grid.addWidget(self.min_locs, 2, 1)
# save cluster centers
self.save_centers = QtWidgets.QCheckBox("Save cluster centers")
self.save_centers.setChecked(False)
grid.addWidget(self.save_centers, 3, 0, 1, 2)
# perform basic frame analysis
self.frame_analysis = QtWidgets.QCheckBox(
"Perform basic frame analysis"
)
self.frame_analysis.setChecked(True)
grid.addWidget(self.frame_analysis, 4, 0, 1, 2)
vbox.addLayout(grid)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
# OK and Cancel buttons
self.buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal,
self,
)
vbox.addWidget(self.buttons)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
@staticmethod
def getParams(parent=None):
"""
Creates the dialog and returns the requested values for
SMLM clusterer (3D).
"""
dialog = SMLMDialog3D(parent)
result = dialog.exec_()
return (
dialog.radius_xy.value(),
dialog.radius_z.value(),
dialog.min_locs.value(),
dialog.save_centers.isChecked(),
dialog.frame_analysis.isChecked(),
result == QtWidgets.QDialog.Accepted,
)
class SMLMDialog2D(QtWidgets.QDialog):
"""
A class to obtain inputs for SMLM clusterer (2D).
...
Attributes
----------
radius : QDoubleSpinBox
contains clustering radius in x and y directions
min_locs : QSpinBox
contains minimum number of locs in cluster
Methods
-------
getParams(parent=None)
Creates the dialog and returns the requested values for
clustering
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Enter parameters (2D)")
vbox = QtWidgets.QVBoxLayout(self)
grid = QtWidgets.QGridLayout()
# clustering radius
grid.addWidget(QtWidgets.QLabel("Cluster radius (pixels):"), 0, 0)
self.radius = QtWidgets.QDoubleSpinBox()
self.radius.setRange(0.0001, 1e3)
self.radius.setDecimals(4)
self.radius.setValue(0.1)
grid.addWidget(self.radius, 0, 1)
# min no. locs
grid.addWidget(QtWidgets.QLabel("Min. no. locs:"), 1, 0)
self.min_locs = QtWidgets.QSpinBox()
self.min_locs.setRange(1, 1e6)
self.min_locs.setValue(10)
grid.addWidget(self.min_locs, 1, 1)
# save cluster centers
self.save_centers = QtWidgets.QCheckBox("Save cluster centers")
self.save_centers.setChecked(False)
grid.addWidget(self.save_centers, 2, 0, 1, 2)
# perform basic frame analysis
self.frame_analysis = QtWidgets.QCheckBox(
"Perform basic frame analysis"
)
self.frame_analysis.setChecked(True)
grid.addWidget(self.frame_analysis, 3, 0, 1, 2)
vbox.addLayout(grid)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
# OK and Cancel buttons
self.buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal,
self,
)
vbox.addWidget(self.buttons)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
@staticmethod
def getParams(parent=None):
"""
Creates the dialog and returns the requested values for
SMLM clusterer (2D).
"""
dialog = SMLMDialog2D(parent)
result = dialog.exec_()
return (
dialog.radius.value(),
dialog.min_locs.value(),
dialog.save_centers.isChecked(),
dialog.frame_analysis.isChecked(),
result == QtWidgets.QDialog.Accepted,
)
class TestClustererDialog(QtWidgets.QDialog):
"""
A class to test clustering paramaters on a region of interest.
The user needs to pick a single region of interest using the Pick
tool. Use Alt + {W, A, S, D, -, =} to change field of view.
Calculating recommended values works for DBSCAN only. Search radius
is taken to be NeNA precision across the whole image. Please keep
in mind that this value does not have to be the optimal one.
...
Attributes
----------
channel : int
Channel index for localizations that are tested
clusterer_name : QComboBox
contains all clusterer types available in Picasso: Render
display_all_locs : QCheckBox
if ticked, unclustered locs are displayed in separete channel
pick : list
coordinates of the last pick (region of interest) that was
displayed
pick_size : float
width (if rectangular) or diameter (if circular) of the pick
test_dbscan_params : QWidget
contains widgets with parameters for DBSCAN
test_hdbscan_params : QWidget
contains widgets with parameters for HDBSCAN
test_smlm_params : QWidget
contains widhtes with parameters for SMLM clusterer
view : QLabel
widget for displaying rendered clustered localizations
window : QMainWindow
instance of the main Picasso: Render window
Methods
-------
assign_groups(locs, labels)
Filters out non-clustered locs and adds group column to locs
cluster(locs, params):
Clusters locs using the chosen clusterer and its params
get_cluster_params()
Extracts clustering parameters for a given clusterer into a
dictionary
get_full_fov()
Updates viewport in self.view
pick_changed()
Checks if region of interest has changed since the last
rendering
test_clusterer()
Prepares clustering parameters, performs clustering and
renders localizations
"""
def __init__(self, window):
super().__init__()
self.setWindowTitle("Test Clusterer")
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "render.ico")
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.pick = None
self.pick_size = None
self.window = window
self.view = TestClustererView(self)
layout = QtWidgets.QGridLayout(self)
self.setLayout(layout)
# explanation
layout.addWidget(
QtWidgets.QLabel(
"Pick a region of interest and test different clustering\n"
"parameters.\n\n"
"Use shortcuts Alt + {W, A, S, D, -, =} to change FOV.\n"
), 0, 0
)
# parameters
parameters_box = QtWidgets.QGroupBox("Parameters")
layout.addWidget(parameters_box, 1, 0)
parameters_grid = QtWidgets.QGridLayout(parameters_box)
# parameters - choose clusterer
self.clusterer_name = QtWidgets.QComboBox()
for name in ['DBSCAN', 'HDBSCAN', 'SMLM']:
self.clusterer_name.addItem(name)
parameters_grid.addWidget(self.clusterer_name, 0, 0)
# parameters - clusterer parameters
parameters_stack = QtWidgets.QStackedWidget()
parameters_grid.addWidget(parameters_stack, 1, 0, 1, 2)
self.clusterer_name.currentIndexChanged.connect(
parameters_stack.setCurrentIndex
)
self.test_dbscan_params = TestDBSCANParams(self)
parameters_stack.addWidget(self.test_dbscan_params)
self.test_hdbscan_params = TestHDBSCANParams(self)
parameters_stack.addWidget(self.test_hdbscan_params)
self.test_smlm_params = TestSMLMParams(self)
parameters_stack.addWidget(self.test_smlm_params)
# parameters - display mode
self.display_all_locs = QtWidgets.QCheckBox(
"Display non-clustered localizations"
)
self.display_all_locs.setChecked(False)
self.display_all_locs.stateChanged.connect(self.view.update_scene)
parameters_grid.addWidget(self.display_all_locs, 2, 0, 1, 2)
# parameters - test
test_button = QtWidgets.QPushButton("Test")
test_button.clicked.connect(self.test_clusterer)
test_button.setDefault(True)
parameters_grid.addWidget(test_button, 3, 0)
# display settings - return to full FOV
full_fov = QtWidgets.QPushButton("Full FOV")
full_fov.clicked.connect(self.get_full_fov)
parameters_grid.addWidget(full_fov, 3, 1)
# view
view_box = QtWidgets.QGroupBox("View")
layout.addWidget(view_box, 0, 1, 3, 1)
view_grid = QtWidgets.QGridLayout(view_box)
view_grid.addWidget(self.view)
# shortcuts for navigating in View
# arrows
left_action = QtWidgets.QAction(self)
left_action.setShortcut("Alt+A")
left_action.triggered.connect(self.view.to_left)
self.addAction(left_action)
right_action = QtWidgets.QAction(self)
right_action.setShortcut("Alt+D")
right_action.triggered.connect(self.view.to_right)
self.addAction(right_action)
up_action = QtWidgets.QAction(self)
up_action.setShortcut("Alt+W")
up_action.triggered.connect(self.view.to_up)
self.addAction(up_action)
down_action = QtWidgets.QAction(self)
down_action.setShortcut("Alt+S")
down_action.triggered.connect(self.view.to_down)
self.addAction(down_action)
# zooming
zoomin_action = QtWidgets.QAction(self)
zoomin_action.setShortcut("Alt+=")
zoomin_action.triggered.connect(self.view.zoom_in)
self.addAction(zoomin_action)
zoomout_action = QtWidgets.QAction(self)
zoomout_action.setShortcut("Alt+-")
zoomout_action.triggered.connect(self.view.zoom_out)
self.addAction(zoomout_action)
def cluster(self, locs, params):
"""
Clusters locs using the chosen clusterer.
Parameters
----------
locs : np.recarray
Contains all picked localizations from a given channel
params : dict
Contains clustering paramters for a given clusterer
Returns
-------
np.recarray
Contains localizations that were clustered. Cluster label
is saved in "group" dtype.
"""
# for converting z coordinates
pixelsize = self.window.display_settings_dlg.pixelsize.value()
if hasattr(locs, "z"):
X = np.vstack((locs.x, locs.y, locs.z / pixelsize)).T
else:
X = np.vstack((locs.x, locs.y)).T
clusterer_name = self.clusterer_name.currentText()
if clusterer_name == "DBSCAN":
clusterer_ = DBSCAN(
eps=params["radius"],
min_samples=params["min_samples"],
).fit(X)
labels = clusterer_.labels_
elif clusterer_name == "HDBSCAN":
if HDBSCAN_IMPORTED:
clusterer_ = HDBSCAN(
min_samples=params["min_samples"],
min_cluster_size=params["min_cluster_size"],
cluster_selection_epsilon=params["intercluster_radius"],
).fit(X)
labels = clusterer_.labels_
else:
return None
elif clusterer_name == "SMLM":
if params["frame_analysis"]:
frame = locs.frame
else:
frame = None
if hasattr(locs, "z"):
X[:, 2] = X[:, 2] * params["radius_xy"] / params["radius_z"]
labels = clusterer._cluster(
X,
params["radius_xy"],
params["min_cluster_size"],
frame,
)
else:
labels = clusterer._cluster(
X,
params["radius_xy"],
params["min_cluster_size"],
frame,
)
locs = self.assign_groups(locs, labels)
if len(locs):
self.view.group_color = self.window.view.get_group_color(locs)
return locs
def assign_groups(self, locs, labels):
"""
Filters out non-clustered locs and adds group column to locs.
Parameters
----------
locs : np.recarray
Contains all picked localizations from a given channel
labels : np.array
Contains cluster indeces in scikit-learn format, i.e.
-1 means no cluster, other integers are cluster ids.
Returns
-------
np.recarray
Contains localizations that were clustered, with "group"
dtype specifying cluster indeces
"""
group = np.int32(labels)
locs = lib.append_to_rec(locs, group, "group")
locs = locs[locs.group != -1]
return locs
def get_cluster_params(self):
"""
Extracts clustering parameters for a given clusterer into a
dictionary.
"""
params = {}
clusterer_name = self.clusterer_name.currentText()
if clusterer_name == "DBSCAN":
params["radius"] = self.test_dbscan_params.radius.value()
params["min_samples"] = self.test_dbscan_params.min_samples.value()
elif clusterer_name == "HDBSCAN":
params["min_cluster_size"] = (
self.test_hdbscan_params.min_cluster_size.value()
)
params["min_samples"] = (
self.test_hdbscan_params.min_samples.value()
)
params["intercluster_radius"] = (
self.test_hdbscan_params.cluster_eps.value()
)
elif clusterer_name == "SMLM":
params["radius_xy"] = self.test_smlm_params.radius_xy.value()
params["radius_z"] = self.test_smlm_params.radius_z.value()
params["min_cluster_size"] = self.test_smlm_params.min_locs.value()
params["frame_analysis"] = self.test_smlm_params.fa.isChecked()
return params
def get_full_fov(self):
""" Updates viewport in self.view. """
if self.view.locs is not None:
self.view.viewport = self.view.get_full_fov()
self.view.update_scene()
def test_clusterer(self):
"""
Prepares clustering parameters, performs clustering and
renders localizations.
"""
# make sure one pick is present
if len(self.window.view._picks) != 1:
raise ValueError("Choose only one pick region")
# get clustering parameters
params = self.get_cluster_params()
# extract picked locs
self.channel = self.window.view.get_channel("Test clusterer")
locs = self.window.view.picked_locs(self.channel)[0]
# cluster picked locs
self.view.locs = self.cluster(locs, params)
# update viewport if pick has changed
if self.pick_changed():
self.view.viewport = self.view.get_full_fov()
if self.view.locs is None:
message = (
"No HDBSCAN detected. Please install\n"
"the python package HDBSCAN*."
)
QtWidgets.QMessageBox.information(
self,
"No HDBSCAN",
message,
)
return
# render clustered locs
self.view.update_scene()
def pick_changed(self):
"""
Checks if region of interest has changed since the last
rendering.
"""
pick = self.window.view._picks[0]
if self.window.tools_settings_dialog.pick_shape == "Circle":
pick_size = self.window.tools_settings_dialog.pick_diameter.value()
else:
pick_size = self.window.tools_settings_dialog.pick_width.value()
if pick != self.pick or pick_size != self.pick_size:
self.pick = pick
self.pick_size = pick_size
return True
else:
return False
class TestDBSCANParams(QtWidgets.QWidget):
"""
Class containing user-selected clustering parameters for DBSCAN.
"""
def __init__(self, dialog):
super().__init__()
self.dialog = dialog
grid = QtWidgets.QGridLayout(self)
grid.addWidget(QtWidgets.QLabel("Radius (pixels):"), 0, 0)
self.radius = QtWidgets.QDoubleSpinBox()
self.radius.setKeyboardTracking(False)
self.radius.setRange(0.001, 1e6)
self.radius.setValue(0.1)
self.radius.setDecimals(3)
self.radius.setSingleStep(0.001)
grid.addWidget(self.radius, 0, 1)
grid.addWidget(QtWidgets.QLabel("Min. samples:"), 1, 0)
self.min_samples = QtWidgets.QSpinBox()
self.min_samples.setKeyboardTracking(False)
self.min_samples.setValue(4)
self.min_samples.setRange(1, 1e6)
self.min_samples.setSingleStep(1)
grid.addWidget(self.min_samples, 1, 1)
grid.setRowStretch(2, 1)
class TestHDBSCANParams(QtWidgets.QWidget):
"""
Class containing user-selected clustering parameters for HDBSCAN.
"""
def __init__(self, dialog):
super().__init__()
self.dialog = dialog
grid = QtWidgets.QGridLayout(self)
grid.addWidget(QtWidgets.QLabel("Min. cluster size:"), 0, 0)
self.min_cluster_size = QtWidgets.QSpinBox()
self.min_cluster_size.setKeyboardTracking(False)
self.min_cluster_size.setValue(10)
self.min_cluster_size.setRange(1, 1e6)
self.min_cluster_size.setSingleStep(1)
grid.addWidget(self.min_cluster_size, 0, 1)
grid.addWidget(QtWidgets.QLabel("Min. samples"), 1, 0)
self.min_samples = QtWidgets.QSpinBox()
self.min_samples.setKeyboardTracking(False)
self.min_samples.setValue(10)
self.min_samples.setRange(1, 1e6)
self.min_samples.setSingleStep(1)
grid.addWidget(self.min_samples, 1, 1)
grid.addWidget(
QtWidgets.QLabel("Intercluster max.\ndistance (pixels):"), 2, 0
)
self.cluster_eps = QtWidgets.QDoubleSpinBox()
self.cluster_eps.setKeyboardTracking(False)
self.cluster_eps.setRange(0, 1e6)
self.cluster_eps.setValue(0.0)
self.cluster_eps.setDecimals(3)
self.cluster_eps.setSingleStep(0.001)
grid.addWidget(self.cluster_eps, 2, 1)
grid.setRowStretch(3, 1)
class TestSMLMParams(QtWidgets.QWidget):
"""
Class containing user-selected clustering parameters for SMLM
clusterer.
"""
def __init__(self, dialog):
super().__init__()
self.dialog = dialog
grid = QtWidgets.QGridLayout(self)
grid.addWidget(QtWidgets.QLabel("Radius xy [pixels]:"), 0, 0)
self.radius_xy = QtWidgets.QDoubleSpinBox()
self.radius_xy.setKeyboardTracking(False)
self.radius_xy.setValue(0.1)
self.radius_xy.setRange(0.0001, 1e3)
self.radius_xy.setDecimals(4)
grid.addWidget(self.radius_xy, 0, 1)
grid.addWidget(QtWidgets.QLabel("Radius z (3D only):"), 1, 0)
self.radius_z = QtWidgets.QDoubleSpinBox()
self.radius_z.setKeyboardTracking(False)
self.radius_z.setValue(0.25)
self.radius_z.setRange(0, 1e3)
self.radius_z.setDecimals(4)
grid.addWidget(self.radius_z, 1, 1)
grid.addWidget(QtWidgets.QLabel("Min. no. locs"), 2, 0)
self.min_locs = QtWidgets.QSpinBox()
self.min_locs.setKeyboardTracking(False)
self.min_locs.setValue(10)
self.min_locs.setRange(1, 1e6)
self.min_locs.setSingleStep(1)
grid.addWidget(self.min_locs, 2, 1)
self.fa = QtWidgets.QCheckBox("Frame analysis")
self.fa.setChecked(True)
grid.addWidget(self.fa, 3, 0, 1, 2)
grid.setRowStretch(4, 1)
class TestClustererView(QtWidgets.QLabel):
"""
Class used for rendering and displaying clustered localizations.
...
Attributes
----------
dialog : QDialog
Instance of the Test Clusterer dialog
locs : np.recarray
Clustered localizations
_size : int
Specifies size of this widget (display pixels)
view : QLabel
Instance of View class. Used for calling functions
viewport : list
Contains two elements specifying min and max values of x and y
to be displayed.
Methods
-------
get_full_fov()
Finds field of view that contains all localizations
get_optimal_oversampling()
Finds optimal oversampling for the current viewport
scale_contrast(images)
Finds optimal contrast for images
shift_viewport(dx, dy)
Moves viewport by a specified amount
split_locs()
Splits self.locs into a list. It has either two (all
clustered locs and all picked locs) or N_GROUP_COLORS elements
(each one for a group color)
to_down()
Shifts viewport downwards
to_left()
Shifts viewport to the left
to_right()
Shifts viewport to the right
to_up()
Shifts viewport upwards
update_scene()
Renders localizations
viewport_height()
Returns viewport's height in pixels
viewport_width()
Returns viewport's width in pixels
zoom(factor)
Changes size of viewport given factor
zoom_in()
Decreases size of viewport
zoom_out()
Increases size of viewport
"""
def __init__(self, dialog):
super().__init__()
self.dialog = dialog
self.view = dialog.window.view
self.viewport = None
self.locs = None
self._size = 500
self.setMinimumSize(self._size, self._size)
self.setMaximumSize(self._size, self._size)
def to_down(self):
""" Shifts viewport downwards. """
if self.viewport is not None:
h = self.viewport_height()
dy = 0.3 * h
self.shift_viewport(0, dy)
def to_left(self):
""" Shifts viewport to the left. """
if self.viewport is not None:
w = self.viewport_width()
dx = -0.3 * w
self.shift_viewport(dx, 0)
def to_right(self):
""" Shifts viewport to the right. """
if self.viewport is not None:
w = self.viewport_width()
dx = 0.3 * w
self.shift_viewport(dx, 0)
def to_up(self):
""" Shifts viewport upwards. """
if self.viewport is not None:
h = self.viewport_height()
dy = -0.3 * h
self.shift_viewport(0, dy)
def zoom_in(self):
""" Decreases size of viewport. """
if self.viewport is not None:
self.zoom(1 / ZOOM)
def zoom_out(self):
""" Increases size of viewport. """
if self.viewport is not None:
self.zoom(ZOOM)
def zoom(self, factor):
"""
Changes size of viewport.
Paramaters
----------
factor : float
Specifies the factor by which viewport is changed
"""
height = self.viewport_height()
width = self.viewport_width()
new_height = height * factor
new_width = width * factor
center_y, center_x = self.view.viewport_center(self.viewport)
self.viewport = [
(center_y - new_height / 2, center_x - new_width / 2),
(center_y + new_height / 2, center_x + new_width / 2),
]
self.update_scene()
def viewport_width(self):
""" Returns viewport's width in pixels. """
return self.viewport[1][1] - self.viewport[0][1]
def viewport_height(self):
""" Returns viewport's height in pixels. """
return self.viewport[1][0] - self.viewport[0][0]
def shift_viewport(self, dx, dy):
""" Moves viewport by a specified amount. """
(y_min, x_min), (y_max, x_max) = self.viewport
self.viewport = [(y_min + dy, x_min + dx), (y_max + dy, x_max + dx)]
self.update_scene()
def update_scene(self):
""" Renders localizations. """
if self.locs is not None:
if self.viewport is None:
self.viewport = self.get_full_fov()
# split locs according to their group colors
locs = self.split_locs()
# render kwargs
kwargs = {
'oversampling': self.get_optimal_oversampling(),
'viewport': self.viewport,
'blur_method': 'convolve',
'min_blur_width': 0,
}
# render images for all channels
images = [render.render(_, **kwargs)[1] for _ in locs]
# scale image
images = self.scale_contrast(images)
# create image to display
Y, X = images.shape[1:]
bgra = np.zeros((Y, X, 4), dtype=np.float32)
colors = get_colors(images.shape[0])
for color, image in zip(colors, images): # color each channel
bgra[:, :, 0] += color[2] * image
bgra[:, :, 1] += color[1] * image
bgra[:, :, 2] += color[0] * image
bgra = np.minimum(bgra, 1)
bgra = self.view.to_8bit(bgra)
bgra[:, :, 3].fill(255) # black background
qimage = QtGui.QImage(
bgra.data, X, Y, QtGui.QImage.Format_RGB32
).scaled(
self._size,
self._size,
QtCore.Qt.KeepAspectRatioByExpanding
)
self.setPixmap(QtGui.QPixmap.fromImage(qimage))
def split_locs(self):
"""
Splits self.locs into a list. It has either two (all
clustered locs and all picked locs) or N_GROUP_COLORS elements
(each one for a group color).
"""
if self.dialog.display_all_locs.isChecked():
# two channels, all locs and clustered locs
channel = self.dialog.channel
locs = [
self.dialog.window.view.picked_locs(channel)[0],
self.locs,
]
else:
# multiple channels, each for one group color
locs = [
self.locs[self.group_color == _] for _ in range(N_GROUP_COLORS)
]
return locs
def get_optimal_oversampling(self):
"""
Finds optimal oversampling for the current viewport.
Returns
-------
float
The optimal oversampling, i.e. number of display pixels per
camera pixels
"""
height = self.viewport_height()
width = self.viewport_width()
return (self._size / min(height, width)) / 1.05
def scale_contrast(self, images):
"""
Finds optimal contrast for images.
Parameters
----------
images : list of np.arrays
Arrays with rendered locs (grayscale)
Returns
-------
list of np.arrays
Scaled images
"""
upper = min(
[
_.max()
for _ in images # if no locs were clustered
if _.max() != 0 # the maximum value in image is 0.0
]
) / 4
# upper = INITIAL_REL_MAXIMUM * max_
images = images / upper
images[~np.isfinite(images)] = 0
images = np.minimum(images, 1.0)
images = np.maximum(images, 0.0)
return images
def get_full_fov(self):
"""
Finds field of view that contains all localizations.
Returns
-------
list
Specifies viewport
"""
x_min = np.min(self.locs.x) - 1
x_max = np.max(self.locs.x) + 1
y_min = np.min(self.locs.y) - 1
y_max = np.max(self.locs.y) + 1
return ([y_min, x_min], [y_max, x_max])
class DriftPlotWindow(QtWidgets.QTabWidget):
"""
A class to plot drift (2D or 3D).
...
Attributes
----------
figure : plt.Figure
canvas : FigureCanvas
PyQt5 backend used for displaying plots
Methods
-------
plot_2D(drift)
Creates 2 plots with drift
plot_3d(drift)
Creates 3 plots with drift
"""
def __init__(self, info_dialog):
super().__init__()
self.setWindowTitle("Drift Plot")
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "render.ico")
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.resize(1000, 500)
self.figure = plt.Figure()
self.canvas = FigureCanvas(self.figure)
vbox = QtWidgets.QVBoxLayout()
self.setLayout(vbox)
vbox.addWidget(self.canvas)
vbox.addWidget((NavigationToolbar(self.canvas, self)))
def plot_3d(self, drift):
"""
Creates 3 plots: frames vs x/y, x vs y in time, frames vs z.
Parameters
----------
drift : np.recarray
Contains 3 dtypes: x, y and z. Stores drift in each
coordinate (pixels)
"""
self.figure.clear()
ax1 = self.figure.add_subplot(131)
ax1.plot(drift.x, label="x")
ax1.plot(drift.y, label="y")
ax1.legend(loc="best")
ax1.set_xlabel("Frame")
ax1.set_ylabel("Drift (pixel)")
ax2 = self.figure.add_subplot(132)
ax2.plot(
drift.x,
drift.y,
color=list(plt.rcParams["axes.prop_cycle"])[2][
"color"
],
)
ax2.set_xlabel("x")
ax2.set_ylabel("y")
ax3 = self.figure.add_subplot(133)
ax3.plot(drift.z, label="z")
ax3.legend(loc="best")
ax3.set_xlabel("Frame")
ax3.set_ylabel("Drift (nm)")
self.canvas.draw()
def plot_2d(self, drift):
"""
Creates 2 plots: frames vs x/y, x vs y in time.
Parameters
----------
drift : np.recarray
Contains 2 dtypes: x and y. Stores drift in each
coordinate (pixels)
"""
self.figure.clear()
ax1 = self.figure.add_subplot(121)
ax1.plot(drift.x, label="x")
ax1.plot(drift.y, label="y")
ax1.legend(loc="best")
ax1.set_xlabel("Frame")
ax1.set_ylabel("Drift (pixel)")
ax2 = self.figure.add_subplot(122)
ax2.plot(
drift.x,
drift.y,
color=list(plt.rcParams["axes.prop_cycle"])[2][
"color"
],
)
ax2.set_xlabel("x")
ax2.set_ylabel("y")
self.canvas.draw()
class ChangeFOV(QtWidgets.QDialog):
"""
A class for manually changing field of view.
...
Attributes
----------
h_box : QDoubleSpinBox
contains the height of the viewport (pixels)
w_box : QDoubleSpinBox
contains the width of the viewport (pixels)
x_box : QDoubleSpinBox
contains the minimum x coordinate (pixels) to be displayed
y_box : QDoubleSpinBox
contains the minimum y coordinate (pixels) to be displayed
Methods
-------
load_fov()
Used for loading a FOV from a .txt file
save_fov()
Used for saving the current FOV as a .txt file
update_scene()
Updates the scene in the main window and Display section of the
Info Dialog
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Change field of view")
self.setModal(False)
self.layout = QtWidgets.QGridLayout()
self.setLayout(self.layout)
self.layout.addWidget(QtWidgets.QLabel("X:"), 0, 0)
self.x_box = QtWidgets.QDoubleSpinBox()
self.x_box.setKeyboardTracking(False)
self.x_box.setRange(-100, 1e6)
self.layout.addWidget(self.x_box, 0, 1)
self.layout.addWidget(QtWidgets.QLabel("Y :"), 1, 0)
self.y_box = QtWidgets.QDoubleSpinBox()
self.y_box.setKeyboardTracking(False)
self.y_box.setRange(-100, 1e6)
self.layout.addWidget(self.y_box, 1, 1)
self.layout.addWidget(QtWidgets.QLabel("Width:"), 2, 0)
self.w_box = QtWidgets.QDoubleSpinBox()
self.w_box.setKeyboardTracking(False)
self.w_box.setRange(0, 1e3)
self.layout.addWidget(self.w_box, 2, 1)
self.layout.addWidget(QtWidgets.QLabel("Height:"), 3, 0)
self.h_box = QtWidgets.QDoubleSpinBox()
self.h_box.setKeyboardTracking(False)
self.h_box.setRange(0, 1e3)
self.layout.addWidget(self.h_box, 3, 1)
self.apply = QtWidgets.QPushButton("Apply")
self.layout.addWidget(self.apply, 4, 0)
self.apply.clicked.connect(self.update_scene)
self.savefov = QtWidgets.QPushButton("Save FOV")
self.layout.addWidget(self.savefov, 5, 0)
self.savefov.clicked.connect(self.save_fov)
self.loadfov = QtWidgets.QPushButton("Load FOV")
self.layout.addWidget(self.loadfov, 6, 0)
self.loadfov.clicked.connect(self.load_fov)
def save_fov(self):
""" Used for saving the current FOV as a .txt file. """
path = self.window.view.locs_paths[0]
base, ext = os.path.splitext(path)
out_path = base + "_fov.txt"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Save FOV to", out_path, filter="*.txt"
)
fov = np.array([
self.x_box.value(),
self.y_box.value(),
self.w_box.value(),
self.h_box.value(),
])
np.savetxt(path, fov)
def load_fov(self):
""" Used for loading a FOV from a .txt file. """
path, ext = QtWidgets.QFileDialog.getOpenFileName(
self, "Load FOV from", filter="*.txt"
)
[x, y, w, h] = np.loadtxt(path)
self.x_box.setValue(x)
self.y_box.setValue(y)
self.w_box.setValue(w)
self.h_box.setValue(h)
self.update_scene()
def update_scene(self):
"""
Updates the scene in the main window and Display section of the
Info Dialog.
"""
x_min = self.x_box.value()
y_min = self.y_box.value()
x_max = self.x_box.value() + self.w_box.value()
y_max = self.y_box.value() + self.h_box.value()
viewport = [(y_min, x_min), (y_max, x_max)]
self.window.view.update_scene(viewport=viewport)
self.window.info_dialog.xy_label.setText(
"{:.2f} / {:.2f} ".format(x_min, y_min)
)
self.window.info_dialog.wh_label.setText(
"{:.2f} / {:.2f} pixel".format(
self.w_box.value(), self.h_box.value()
)
)
class InfoDialog(QtWidgets.QDialog):
"""
A class to show information about the current display, fit
precision, number of locs and picks, including QPAINT.
...
Attributes
----------
change_display : QPushButton
opens self.change_fov
change_fov : ChangeFOV(QDialog)
dialog for changing field of view
height_label : QLabel
contains the height of the window (pixels)
dark_mean : QLabel
shows the mean dark time (frames) in all picks
dark_std : QLabel
shows the std dark time (frames) in all picks
fit_precision : QLabel
shows median fit precision of the first channel (pixels)
influx_rate : FloadEdit(QLineEdit)
contains the calculated or input influx rate (1/frames)
locs_label : QLabel
shows the number of locs in the current FOV
lp: float
NeNA localization precision (pixels). None, if not calculated yet
max_dark_time : QSpinBox
contains the maximum gap between localizations (frames) to be
considered as belonging to the same group of linked locs
movie_grid : QGridLayout
contains all the info about the fit precision
nena_button : QPushButton
calculates nearest neighbor based analysis fit precision
n_localization_mean : QLabel
shows the mean number of locs in all picks
n_localization_std : QLabel
shows the std number of locs in all picks
n_picks : QLabel
shows the number of picks
n_units_mean : QLabel
shows the calculated mean number of binding sites in all picks
n_units_std : QLabel
shows the calculated std number of binding sites in all picks
picks_grid : QGridLayout
contains all the info about the picks
rmsd_mean : QLabel
shows the mean root mean square displacement in all picks in
x and y axes
rmsd_std : QLabel
shows the std root mean square displacement in all picks in
x and y axes
rmsd_z_mean : QLabel
shows the mean root mean square displacement in all picks in
z axis
rmsd_z_std : QLabel
shows the std root mean square displacement in all picks in
z axis
units_per_pick : QSpinBox
contains the number of binding sites per pick
wh_label : QLabel
displays the width and height of FOV (pixels)
window : Window(QMainWindow)
main window instance
width_label : QLabel
contains the width of the window (pixels)
xy_label : QLabel
shows the minimum y and u coordinates in FOV (pixels)
Methods
-------
calibrate_influx()
Calculates influx rate (1/frames)
calculate_nena_lp()
Calculates and plots NeNA precision in a given channel
calculate_n_units()
Calculates number of units in each pick
udpate_n_units()
Displays the mean and std number of units in the Dialog
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Info")
self.setModal(False)
self.lp = None
self.change_fov = ChangeFOV(self.window)
vbox = QtWidgets.QVBoxLayout(self)
# Display
display_groupbox = QtWidgets.QGroupBox("Display")
vbox.addWidget(display_groupbox)
display_grid = QtWidgets.QGridLayout(display_groupbox)
display_grid.addWidget(QtWidgets.QLabel("Image width:"), 0, 0)
self.width_label = QtWidgets.QLabel()
display_grid.addWidget(self.width_label, 0, 1)
display_grid.addWidget(QtWidgets.QLabel("Image height:"), 1, 0)
self.height_label = QtWidgets.QLabel()
display_grid.addWidget(self.height_label, 1, 1)
display_grid.addWidget(QtWidgets.QLabel("View X / Y:"), 2, 0)
self.xy_label = QtWidgets.QLabel()
display_grid.addWidget(self.xy_label, 2, 1)
display_grid.addWidget(QtWidgets.QLabel("View width / height:"), 3, 0)
self.wh_label = QtWidgets.QLabel()
display_grid.addWidget(self.wh_label, 3, 1)
self.change_display = QtWidgets.QPushButton("Change field of view")
display_grid.addWidget(self.change_display, 4, 0)
self.change_display.clicked.connect(self.change_fov.show)
# Movie
movie_groupbox = QtWidgets.QGroupBox("Movie")
vbox.addWidget(movie_groupbox)
self.movie_grid = QtWidgets.QGridLayout(movie_groupbox)
self.movie_grid.addWidget(
QtWidgets.QLabel("Median fit precision:"), 0, 0
)
self.fit_precision = QtWidgets.QLabel("-")
self.movie_grid.addWidget(self.fit_precision, 0, 1)
self.movie_grid.addWidget(QtWidgets.QLabel("NeNA precision:"), 1, 0)
self.nena_button = QtWidgets.QPushButton("Calculate")
self.nena_button.clicked.connect(self.calculate_nena_lp)
self.nena_button.setDefault(False)
self.nena_button.setAutoDefault(False)
self.movie_grid.addWidget(self.nena_button, 1, 1)
# FOV
fov_groupbox = QtWidgets.QGroupBox("Field of view")
vbox.addWidget(fov_groupbox)
fov_grid = QtWidgets.QGridLayout(fov_groupbox)
fov_grid.addWidget(QtWidgets.QLabel("# Localizations:"), 0, 0)
self.locs_label = QtWidgets.QLabel()
fov_grid.addWidget(self.locs_label, 0, 1)
# Picks
picks_groupbox = QtWidgets.QGroupBox("Picks")
vbox.addWidget(picks_groupbox)
self.picks_grid = QtWidgets.QGridLayout(picks_groupbox)
self.picks_grid.addWidget(QtWidgets.QLabel("# Picks:"), 0, 0)
self.n_picks = QtWidgets.QLabel()
self.picks_grid.addWidget(self.n_picks, 0, 1)
compute_pick_info_button = QtWidgets.QPushButton("Calculate info below")
compute_pick_info_button.clicked.connect(
self.window.view.update_pick_info_long
)
self.picks_grid.addWidget(compute_pick_info_button, 1, 0, 1, 3)
self.picks_grid.addWidget(QtWidgets.QLabel("<b>Mean</b"), 2, 1)
self.picks_grid.addWidget(QtWidgets.QLabel("<b>Std</b>"), 2, 2)
row = self.picks_grid.rowCount()
self.picks_grid.addWidget(QtWidgets.QLabel("# Localizations:"), row, 0)
self.n_localizations_mean = QtWidgets.QLabel()
self.picks_grid.addWidget(self.n_localizations_mean, row, 1)
self.n_localizations_std = QtWidgets.QLabel()
self.picks_grid.addWidget(self.n_localizations_std, row, 2)
row = self.picks_grid.rowCount()
self.picks_grid.addWidget(QtWidgets.QLabel("RMSD to COM:"), row, 0)
self.rmsd_mean = QtWidgets.QLabel()
self.picks_grid.addWidget(self.rmsd_mean, row, 1)
self.rmsd_std = QtWidgets.QLabel()
self.picks_grid.addWidget(self.rmsd_std, row, 2)
row = self.picks_grid.rowCount()
self.picks_grid.addWidget(QtWidgets.QLabel("RMSD in z:"), row, 0)
self.rmsd_z_mean = QtWidgets.QLabel()
self.picks_grid.addWidget(self.rmsd_z_mean, row, 1)
self.rmsd_z_std = QtWidgets.QLabel()
self.picks_grid.addWidget(self.rmsd_z_std, row, 2)
row = self.picks_grid.rowCount()
self.picks_grid.addWidget(
QtWidgets.QLabel("Ignore dark times <="), row, 0
)
self.max_dark_time = QtWidgets.QSpinBox()
self.max_dark_time.setRange(0, 1e9)
self.max_dark_time.setValue(1)
self.picks_grid.addWidget(self.max_dark_time, row, 1, 1, 2)
row = self.picks_grid.rowCount()
self.picks_grid.addWidget(QtWidgets.QLabel("Length:"), row, 0)
self.length_mean = QtWidgets.QLabel()
self.picks_grid.addWidget(self.length_mean, row, 1)
self.length_std = QtWidgets.QLabel()
self.picks_grid.addWidget(self.length_std, row, 2)
row = self.picks_grid.rowCount()
self.picks_grid.addWidget(QtWidgets.QLabel("Dark time:"), row, 0)
self.dark_mean = QtWidgets.QLabel()
self.picks_grid.addWidget(self.dark_mean, row, 1)
self.dark_std = QtWidgets.QLabel()
self.picks_grid.addWidget(self.dark_std, row, 2)
row = self.picks_grid.rowCount()
self.picks_grid.addWidget(QtWidgets.QLabel("# Units per pick:"), row, 0)
self.units_per_pick = QtWidgets.QSpinBox()
self.units_per_pick.setRange(1, 1e6)
self.units_per_pick.setValue(1)
self.picks_grid.addWidget(self.units_per_pick, row, 1, 1, 2)
calculate_influx_button = QtWidgets.QPushButton("Calibrate influx")
calculate_influx_button.clicked.connect(self.calibrate_influx)
self.picks_grid.addWidget(
calculate_influx_button, self.picks_grid.rowCount(), 0, 1, 3
)
row = self.picks_grid.rowCount()
self.picks_grid.addWidget(
QtWidgets.QLabel("Influx rate (1/frames):"), row, 0
)
self.influx_rate = FloatEdit()
self.influx_rate.setValue(0.03)
self.influx_rate.valueChanged.connect(self.update_n_units)
self.picks_grid.addWidget(self.influx_rate, row, 1, 1, 2)
row = self.picks_grid.rowCount()
self.picks_grid.addWidget(QtWidgets.QLabel("# Units:"), row, 0)
self.n_units_mean = QtWidgets.QLabel()
self.picks_grid.addWidget(self.n_units_mean, row, 1)
self.n_units_std = QtWidgets.QLabel()
self.picks_grid.addWidget(self.n_units_std, row, 2)
self.pick_hist_window = PickHistWindow(self)
pick_hists = QtWidgets.QPushButton("Histograms")
pick_hists.clicked.connect(self.pick_hist_window.show)
self.picks_grid.addWidget(
pick_hists, self.picks_grid.rowCount(), 0, 1, 3
)
def calculate_nena_lp(self):
"""
Calculates and plots NeNA precision in a given channel. """
channel = self.window.view.get_channel("Calculate NeNA precision")
if channel is not None:
locs = self.window.view.locs[channel]
info = self.window.view.infos[channel]
# modify the movie grid
self.nena_button.setParent(None)
self.movie_grid.removeWidget(self.nena_button)
progress = lib.ProgressDialog(
"Calculating NeNA precision", 0, 100, self
)
result_lp = postprocess.nena(locs, info, progress.set_value)
self.nena_label = QtWidgets.QLabel()
self.movie_grid.addWidget(self.nena_label, 1, 1)
self.nena_result, self.lp = result_lp
self.lp *= self.window.display_settings_dlg.pixelsize.value()
self.nena_label.setText("{:.3} nm".format(self.lp))
show_plot_button = QtWidgets.QPushButton("Show plot")
self.movie_grid.addWidget(
show_plot_button, self.movie_grid.rowCount() - 1, 2
)
# Nena plot
self.nena_window = NenaPlotWindow(self)
self.nena_window.plot(self.nena_result)
show_plot_button.clicked.connect(self.nena_window.show)
def calibrate_influx(self):
""" Calculates influx rate (1/frames). """
influx = (
1 / self.pick_info["pooled dark"] / self.units_per_pick.value()
)
self.influx_rate.setValue(influx)
self.update_n_units()
def calculate_n_units(self, dark):
""" Calculates number of units in each pick. """
influx = self.influx_rate.value()
return 1 / (influx * dark)
def update_n_units(self):
"""
Displays the mean and std number of units in the
Dialog.
"""
n_units = self.calculate_n_units(self.pick_info["dark"])
self.n_units_mean.setText("{:,.2f}".format(np.mean(n_units)))
self.n_units_std.setText("{:,.2f}".format(np.std(n_units)))
class NenaPlotWindow(QtWidgets.QTabWidget):
""" A class to plot NeNA precision. """
def __init__(self, info_dialog):
super().__init__()
self.setWindowTitle("Nena Plot")
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "render.ico")
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.resize(1000, 500)
self.figure = plt.Figure()
self.canvas = FigureCanvas(self.figure)
vbox = QtWidgets.QVBoxLayout()
self.setLayout(vbox)
vbox.addWidget(self.canvas)
vbox.addWidget((NavigationToolbar(self.canvas, self)))
def plot(self, nena_result):
self.figure.clear()
d = nena_result.userkws["d"]
ax = self.figure.add_subplot(111)
ax.set_title("Next frame neighbor distance histogram")
ax.plot(d, nena_result.data, label="Data")
ax.plot(d, nena_result.best_fit, label="Fit")
ax.set_xlabel("Distance (Px)")
ax.set_ylabel("Counts")
ax.legend(loc="best")
self.canvas.draw()
class MaskSettingsDialog(QtWidgets.QDialog):
"""
A class to mask localizations based on their density.
...
Attributes
----------
ax1 : plt.axes.Axes
axis where all locs are shown with a given oversampling
ax2 : plt.axes.Axes
axis where blurred locs are shown
ax3 : plt.axes.Axes
axis where binary mask is shown
ax4 : plt.axes.Axes
axis where masked locs are shown (initially shows only zeros)
cached_blur : int
0 if image is to be blurred, 1 otherwise
cached_oversampling : int
0 if image is to be redrawn, 1 otherwise
cached_thresh : int
0 if mask is to be calculated, 1 otherwise
canvas : FigureCanvas
canvas used for plotting
channel : int
channel of localizations that are plotted in the canvas
cmap : str
colormap used in displaying images, same as in the main window
disp_px_size : QSpinBox
contains the display pixel size [nm]
figure : plt.figure.Figure
figure containg subplots
index_locs : list
localizations that were masked; may contain a single or all
channels
index_locs_out : list
localizations that were not masked; may contain a single or
all channels
infos : list
contains .yaml metadata files for all locs channels loaded when
starting the dialog
H : np.array
histogram displaying all localizations loaded; displayed in ax1
H_blur : np.array
histogram displaying blurred localizations; displayed in ax2
H_new : np.array
histogram displaying masked localizations; displayed in ax4
locs : list
contains all localizations loaded when starting the dialog
mask : np.array
histogram displaying binary mask; displayed in ax3
mask_blur : QDoubleSpinBox
contains the blur value
mask_loaded : bool
True, if mask was loaded from an external file
mask_thresh : QDoubleSpinBox
contains the threshold value for masking
paths : list
contains paths to all localizations loaded when starting the
dialog
save_all : QCheckBox
if checked, all channels loaded are masked; otherwise only
one channel
save_button : QPushButton
used for saving masked localizations
save_mask_button : QPushButton
used for saving the current mask as a .npy
_size_hint : tuple
determines the minimum size of the dialog
window : QMainWindow
instance of the main window
x_max : float
width of the loaded localizations
y_max : float
height of the loaded localizations
Methods
-------
blur_image()
Blurs localizations using a Gaussian filter
generate_image()
Histograms loaded localizations from a given channel
init_dialog()
Initializes dialog when called from the main window
load_mask()
Loads binary mask from .npy format
mask_image()
Calculates binary mask based on threshold
mask_locs()
Masks localizations from a single or all channels
_mask_locs(locs)
Masks locs given a mask
save_mask()
Saves binary mask into .npy format
save_locs()
Saves masked localizations
save_locs_multi()
Saves masked localizations for all loaded channels
update_plots()
Plots in all 4 axes
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Generate Mask")
self.setModal(False)
self.channel = 0
self._size_hint = (670, 840)
self.setMinimumSize(*self._size_hint)
vbox = QtWidgets.QVBoxLayout(self)
mask_groupbox = QtWidgets.QGroupBox("Mask Settings")
vbox.addWidget(mask_groupbox)
mask_grid = QtWidgets.QGridLayout(mask_groupbox)
mask_grid.addWidget(QtWidgets.QLabel("Display pixel size [nm]"), 0, 0)
self.disp_px_size = QtWidgets.QSpinBox()
self.disp_px_size.setRange(0.1, 99999)
self.disp_px_size.setValue(300)
self.disp_px_size.setSingleStep(10)
self.disp_px_size.setKeyboardTracking(False)
self.disp_px_size.valueChanged.connect(self.update_plots)
mask_grid.addWidget(self.disp_px_size, 0, 1)
mask_grid.addWidget(QtWidgets.QLabel("Blur"), 1, 0)
self.mask_blur = QtWidgets.QDoubleSpinBox()
self.mask_blur.setRange(0, 9999)
self.mask_blur.setValue(1)
self.mask_blur.setSingleStep(0.1)
self.mask_blur.setDecimals(5)
self.mask_blur.setKeyboardTracking(False)
self.mask_blur.valueChanged.connect(self.update_plots)
mask_grid.addWidget(self.mask_blur, 1, 1)
mask_grid.addWidget(QtWidgets.QLabel("Threshold"), 2, 0)
self.mask_thresh = QtWidgets.QDoubleSpinBox()
self.mask_thresh.setRange(0, 1)
self.mask_thresh.setValue(0.5)
self.mask_thresh.setSingleStep(0.01)
self.mask_thresh.setDecimals(5)
self.mask_thresh.setKeyboardTracking(False)
self.mask_thresh.valueChanged.connect(self.update_plots)
mask_grid.addWidget(self.mask_thresh, 2, 1)
gridspec_dict = {
'bottom': 0.05,
'top': 0.95,
'left': 0.05,
'right': 0.95,
}
(
self.figure,
((self.ax1, self.ax2), (self.ax3, self.ax4)),
) = plt.subplots(2, 2, figsize=(6, 6), gridspec_kw=gridspec_dict)
self.canvas = FigureCanvas(self.figure)
mask_grid.addWidget(self.canvas, 3, 0, 1, 2)
self.save_all = QtWidgets.QCheckBox("Mask all channels")
self.save_all.setChecked(False)
mask_grid.addWidget(self.save_all, 4, 0)
load_mask_button = QtWidgets.QPushButton("Load Mask")
load_mask_button.setFocusPolicy(QtCore.Qt.NoFocus)
load_mask_button.clicked.connect(self.load_mask)
mask_grid.addWidget(load_mask_button, 5, 0)
self.save_mask_button = QtWidgets.QPushButton("Save Mask")
self.save_mask_button.setEnabled(False)
self.save_mask_button.setFocusPolicy(QtCore.Qt.NoFocus)
self.save_mask_button.clicked.connect(self.save_mask)
mask_grid.addWidget(self.save_mask_button, 5, 1)
mask_button = QtWidgets.QPushButton("Mask")
mask_button.setFocusPolicy(QtCore.Qt.NoFocus)
mask_button.clicked.connect(self.mask_locs)
mask_grid.addWidget(mask_button, 6, 0)
self.save_button = QtWidgets.QPushButton("Save localizations")
self.save_button.setEnabled(False)
self.save_button.setFocusPolicy(QtCore.Qt.NoFocus)
self.save_button.clicked.connect(self.save_locs)
mask_grid.addWidget(self.save_button, 6, 1)
self.cached_oversampling = 0
self.cached_blur = 0
self.cached_thresh = 0
self.mask_loaded = False
def init_dialog(self):
"""
Initializes dialog when called from the main window.
Loades localizations and metadata, updates plots.
"""
self.mask_loaded = False
self.locs = self.window.view.locs
self.paths = self.window.view.locs_paths
self.infos = self.window.view.infos
# which channel to plot
self.channel = self.window.view.get_channel("Mask image")
self.cmap = self.window.display_settings_dlg.colormap.currentText()
self.show()
locs = self.locs[self.channel]
info = self.infos[self.channel][0]
self.x_max = info["Width"]
self.y_max = info["Height"]
self.update_plots()
def generate_image(self):
""" Histograms loaded localizations from a given channel. """
locs = self.locs[self.channel]
oversampling = (
self.window.display_settings_dlg.pixelsize.value()
/ self.disp_px_size.value()
)
viewport = ((0, 0), (self.y_max, self.x_max))
_, H = render.render(
locs,
oversampling=oversampling,
viewport=viewport,
blur_method=None,
)
self.H = H / H.max()
def blur_image(self):
""" Blurs localizations using a Gaussian filter. """
H_blur = gaussian_filter(self.H, sigma=self.mask_blur.value())
H_blur = H_blur / np.max(H_blur)
self.H_blur = H_blur # image to be displayed in self.ax2
def save_mask(self):
""" Saves binary mask into .npy format. """
# get name for saving mask
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Save mask to", filter="*.npy"
)
if path:
np.save(path, self.mask)
def load_mask(self):
""" Loads binary mask from .npy format. """
# choose which file to load
path, ext = QtWidgets.QFileDialog.getOpenFileName(
self, "Load mask", filter="*.npy"
)
if path:
self.mask_loaded = True # will block changing of the mask
self.mask = np.load(path)
# update plots without drawing a new mask
self.update_plots(new_mask=False)
def mask_image(self):
""" Calculates binary mask based on threshold. """
if not self.mask_loaded:
mask = np.zeros(self.H_blur.shape, dtype=np.int8)
mask[self.H_blur > self.mask_thresh.value()] = 1
self.mask = mask
self.save_mask_button.setEnabled(True)
def update_plots(self, new_mask=True):
"""
Plots in all 4 axes.
Parameters
----------
new_mask : boolean (default=True)
True if new mask is to be calculated
"""
if self.mask_blur.value() == 0.00000:
self.mask_blur.setValue(0.00001)
if new_mask:
if self.cached_oversampling:
self.cached_oversampling = 0
if self.cached_blur:
self.cached_blur = 0
if self.cached_thresh:
self.cached_thresh = 0
if not self.cached_oversampling:
self.generate_image()
self.blur_image()
self.mask_image()
self.cached_oversampling = 1
self.cached_blur = 1
self.cached_thresh = 1
if not self.cached_blur:
self.blur_image()
self.mask_image()
self.cached_blur = 1
self.cached_thresh = 1
if not self.cached_thresh:
self.mask_image()
self.cached_thresh = 1
self.ax1.imshow(self.H, cmap=self.cmap)
self.ax1.set_title("Original")
self.ax2.imshow(self.H_blur, cmap=self.cmap)
self.ax2.set_title("Blurred")
self.ax3.imshow(self.mask, cmap='Greys_r')
self.ax3.set_title("Mask")
self.ax4.imshow(np.zeros_like(self.H), cmap=self.cmap)
self.ax4.set_title("Masked image")
for ax in (self.ax1, self.ax2, self.ax3, self.ax4):
ax.grid(False)
ax.axis('off')
self.canvas.draw()
def mask_locs(self):
""" Masks localizations from a single or all channels. """
self.index_locs = [] # locs in the mask
self.index_locs_out = [] # locs outside the mask
if self.save_all.isChecked(): # all channels
for locs in self.locs:
self._mask_locs(locs)
else: # only the current channel
locs = self.locs[self.channel]
self._mask_locs(locs)
def _mask_locs(self, locs):
"""
Masks locs given a mask.
Parameters
----------
locs : np.recarray
Localizations to be masked
"""
x_ind = (
np.floor(locs["x"] / self.x_max * self.mask.shape[0])
).astype(int)
y_ind = (
np.floor(locs["y"] / self.y_max * self.mask.shape[1])
).astype(int)
index = self.mask[y_ind, x_ind].astype(bool)
locs_in = locs[index]
locs_in.sort(kind="mergesort", order="frame")
locs_out = locs[~index]
locs_out.sort(kind="mergesort", order="frame")
self.index_locs.append(locs_in) # locs in the mask
self.index_locs_out.append(locs_out) # locs outside the mask
if (
(
self.save_all.isChecked()
and len(self.index_locs) == self.channel + 1
)
or not self.save_all.isChecked()
): # update masked locs plot if the current channel is masked
_, self.H_new = render.render(
self.index_locs[-1],
oversampling=(
self.window.display_settings_dlg.pixelsize.value()
/ self.disp_px_size.value()
),
viewport=((0, 0), (self.y_max, self.x_max)),
blur_method=None,
)
self.ax4.imshow(self.H_new, cmap=self.cmap)
self.ax4.grid(False)
self.ax4.axis('off')
self.save_button.setEnabled(True)
self.canvas.draw()
def save_locs(self):
""" Saves masked localizations. """
if self.save_all.isChecked(): # save all channels
self.save_locs_multi()
else:
out_path = self.paths[self.channel].replace(
".hdf5", "_mask_in.hdf5"
)
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save localizations within mask",
out_path,
filter="*.hdf5",
)
if path:
info = self.infos[self.channel] + [
{
"Generated by": "Picasso Render : Mask in ",
"Display pixel size [nm]": self.disp_px_size.value(),
"Blur": self.mask_blur.value(),
"Threshold": self.mask_thresh.value(),
}
]
io.save_locs(path, self.index_locs[0], info)
out_path = self.paths[self.channel].replace(
".hdf5", "_mask_out.hdf5"
)
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save localizations outside of mask",
out_path,
filter="*.hdf5",
)
if path:
info = self.infos[self.channel] + [
{
"Generated by": "Picasso Render : Mask out",
"Display pixel size [nm]": self.disp_px_size.value(),
"Blur": self.mask_blur.value(),
"Threshold": self.mask_thresh.value(),
}
]
io.save_locs(path, self.index_locs_out[0], info)
def save_locs_multi(self):
""" Saves masked localizations for all loaded channels. """
suffix_in, ok1 = QtWidgets.QInputDialog.getText(
self,
"",
"Enter suffix for localizations inside the mask",
QtWidgets.QLineEdit.Normal,
"_mask_in",
)
if ok1:
suffix_out, ok2 = QtWidgets.QInputDialog.getText(
self,
"",
"Enter suffix for localizations outside the mask",
QtWidgets.QLineEdit.Normal,
"_mask_out",
)
if ok2:
for channel in range(len(self.index_locs)):
out_path = self.paths[channel].replace(
".hdf5", f"{suffix_in}.hdf5"
)
info = self.infos[channel] + [
{
"Generated by": "Picasso Render : Mask in",
"Display pixel size [nm]": self.disp_px_size.value(),
"Blur": self.mask_blur.value(),
"Threshold": self.mask_thresh.value(),
}
]
io.save_locs(out_path, self.index_locs[channel], info)
out_path = self.paths[channel].replace(
".hdf5", f"{suffix_out}.hdf5"
)
info = self.infos[channel] + [
{
"Generated by": "Picasso Render : Mask out",
"Display pixel size [nm]": self.disp_px_size.value(),
"Blur": self.mask_blur.value(),
"Threshold": self.mask_thresh.value(),
}
]
io.save_locs(out_path, self.index_locs_out[channel], info)
class PickToolCircleSettings(QtWidgets.QWidget):
""" A class contating information about circular pick. """
def __init__(self, window, tools_settings_dialog):
super().__init__()
self.grid = QtWidgets.QGridLayout(self)
self.window = window
self.grid.addWidget(QtWidgets.QLabel("Diameter (cam. pixel):"), 0, 0)
self.pick_diameter = QtWidgets.QDoubleSpinBox()
self.pick_diameter.setRange(0, 999999)
self.pick_diameter.setValue(1)
self.pick_diameter.setSingleStep(0.1)
self.pick_diameter.setDecimals(3)
self.pick_diameter.setKeyboardTracking(False)
self.pick_diameter.valueChanged.connect(
tools_settings_dialog.on_pick_dimension_changed
)
self.grid.addWidget(self.pick_diameter, 0, 1)
self.grid.addWidget(
QtWidgets.QLabel("Pick similar +/- range (std)"), 1, 0
)
self.pick_similar_range = QtWidgets.QDoubleSpinBox()
self.pick_similar_range.setRange(0, 100000)
self.pick_similar_range.setValue(2)
self.pick_similar_range.setSingleStep(0.1)
self.pick_similar_range.setDecimals(2)
self.grid.addWidget(self.pick_similar_range, 1, 1)
class PickToolRectangleSettings(QtWidgets.QWidget):
""" A class containing information about rectangular pick. """
def __init__(self, window, tools_settings_dialog):
super().__init__()
self.window = window
self.grid = QtWidgets.QGridLayout(self)
self.grid.addWidget(QtWidgets.QLabel("Width (cam. pixel):"), 0, 0)
self.pick_width = QtWidgets.QDoubleSpinBox()
self.pick_width.setRange(0, 999999)
self.pick_width.setValue(1)
self.pick_width.setSingleStep(0.1)
self.pick_width.setDecimals(3)
self.pick_width.setKeyboardTracking(False)
self.pick_width.valueChanged.connect(
tools_settings_dialog.on_pick_dimension_changed
)
self.grid.addWidget(self.pick_width, 0, 1)
self.grid.setRowStretch(1, 1)
class ToolsSettingsDialog(QtWidgets.QDialog):
"""
A dialog class to customize picks - vary shape and size, annotate,
change std for picking similar.
...
Attributes
----------
pick_annotation : QCheckBox
tick to display picks' indeces
pick_diameter : QDoubleSpinBox
contains the diameter of circular picks (pixels)
pick_shape : QComboBox
contains the str with the shape of picks (circle or rectangle)
pick_width : QDoubleSpinBox
contains the width of rectangular picks (pixels)
point_picks : QCheckBox
tick to display circular picks as 3-pixels-wide points
Methods
-------
on_pick_dimension_changed(*args)
Resets index_blocks in View and updates the scene
update_scene_with_cache(*args)
Quick (cached) update of the current view when picks change
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Tools Settings")
self.setModal(False)
self.vbox = QtWidgets.QVBoxLayout(self)
self.pick_groupbox = QtWidgets.QGroupBox("Pick")
self.vbox.addWidget(self.pick_groupbox)
pick_grid = QtWidgets.QGridLayout(self.pick_groupbox)
pick_grid.addWidget(QtWidgets.QLabel("Shape:"), 1, 0)
self.pick_shape = QtWidgets.QComboBox()
self.pick_shape.addItems(["Circle", "Rectangle"])
pick_grid.addWidget(self.pick_shape, 1, 1)
pick_stack = QtWidgets.QStackedWidget()
pick_grid.addWidget(pick_stack, 2, 0, 1, 2)
self.pick_shape.currentIndexChanged.connect(pick_stack.setCurrentIndex)
# Circle
self.pick_circle_settings = PickToolCircleSettings(window, self)
pick_stack.addWidget(self.pick_circle_settings)
self.pick_similar_range = self.pick_circle_settings.pick_similar_range
self.pick_diameter = self.pick_circle_settings.pick_diameter
# Rectangle
self.pick_rectangle_settings = PickToolRectangleSettings(window, self)
pick_stack.addWidget(self.pick_rectangle_settings)
self.pick_width = self.pick_rectangle_settings.pick_width
self.pick_annotation = QtWidgets.QCheckBox("Annotate picks")
self.pick_annotation.stateChanged.connect(self.update_scene_with_cache)
pick_grid.addWidget(self.pick_annotation, 3, 0)
self.point_picks = QtWidgets.QCheckBox(
"Display circular picks as points"
)
self.point_picks.stateChanged.connect(self.update_scene_with_cache)
pick_grid.addWidget(self.point_picks, 4, 0)
def on_pick_dimension_changed(self, *args):
""" Resets index_blokcs in View and updates the scene. """
self.window.view.index_blocks = [
None for _ in self.window.view.index_blocks
]
self.update_scene_with_cache()
def update_scene_with_cache(self, *args):
"""
Quick (cached) update of the current view when picks change.
"""
self.window.view.update_scene(use_cache=True)
class DisplaySettingsDialog(QtWidgets.QDialog):
"""
A class to change display settings, e.g.: zoom, display pixel size,
contrast and blur.
...
Attributes
----------
blur_buttongroup : QButtonGroup
contains available localization blur methods
colormap : QComboBox
contains strings with available colormaps (single channel only)
color_step : QSpinBox
defines how many colors are to be rendered
disp_px_size : QDoubleSpinBox
contains the size of super-resolution pixels in nm
dynamic_disp_px : QCheckBox
tick to automatically adjust to current window size when zooming.
maximum : QDoubleSpinBox
defines at which number of localizations per super-resolution
pixel the maximum color of the colormap should be applied
maximum_render : QDoubleSpinBox
contains the maximum value of the parameter to be rendered
min_blur_width : QDoubleSpinBox
contains the minimum blur for each localization (pixels)
minimap : QCheckBox
tick to display minimap showing current FOV
minimum : QDoubleSpinBox
defines at which number of localizations per super-resolution
pixel the minimum color of the colormap should be applied
minimum_render : QDoubleSpinBox
contains the minimum value of the parameter to be rendered
parameter : QComboBox
defines what property should be rendered, e.g.: z, photons
pixelsize : QDoubleSpinBox
contains the camera pixel size (nm)
render_check : QCheckBox
tick to activate parameter rendering
scalebar : QDoubleSpinBox
contains the scale bar's length (nm)
scalebar_groupbox : QGroupBox
group with options for customizing scale bar, tick to display
scalebar_text : QCheckBox
tick to display scale bar's length (nm)
show_legend : QPushButton
click to display parameter rendering's legend
_silent_disp_px_update : boolean
True if update display pixel size in background
zoom : QDoubleSpinBox
contains zoom's magnitude
Methods
-------
on_cmap_changed()
Loads custom colormap if requested
on_disp_px_changed(value)
Sets new display pixel size, updates contrast and updates scene
in the main window
on_zoom_changed(value)
Zooms the image in the main window
render_scene(*args, **kwargs)
Updates scene in the main window
set_dynamic_disp_px(state)
Updates scene if dynamic display pixel size is checked
set_disp_px_silently(disp_px_size)
Changes the value of display pixel size in background
set_zoom_silently(zoom)
Changes the value of zoom in the background
silent_maximum_update(value)
Changes the value of self.maximum in the background
silent_minimum_update(value)
Changes the value of self.minimum in the background
update_scene(*args, **kwargs)
Updates scene with cache
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Display Settings")
self.resize(200, 0)
self.setModal(False)
vbox = QtWidgets.QVBoxLayout(self)
# General
general_groupbox = QtWidgets.QGroupBox("General")
vbox.addWidget(general_groupbox)
general_grid = QtWidgets.QGridLayout(general_groupbox)
general_grid.addWidget(QtWidgets.QLabel("Zoom:"), 0, 0)
self.zoom = QtWidgets.QDoubleSpinBox()
self.zoom.setKeyboardTracking(False)
self.zoom.setRange(10 ** (-self.zoom.decimals()), 1e6)
self.zoom.valueChanged.connect(self.on_zoom_changed)
general_grid.addWidget(self.zoom, 0, 1)
general_grid.addWidget(
QtWidgets.QLabel("Display pixel size [nm]:"), 1, 0
)
self._disp_px_size = 130 / DEFAULT_OVERSAMPLING
self.disp_px_size = QtWidgets.QDoubleSpinBox()
self.disp_px_size.setRange(0.00001, 100000)
self.disp_px_size.setSingleStep(1)
self.disp_px_size.setDecimals(5)
self.disp_px_size.setValue(self._disp_px_size)
self.disp_px_size.setKeyboardTracking(False)
self.disp_px_size.valueChanged.connect(self.on_disp_px_changed)
general_grid.addWidget(self.disp_px_size, 1, 1)
self.dynamic_disp_px = QtWidgets.QCheckBox("dynamic")
self.dynamic_disp_px.setChecked(True)
self.dynamic_disp_px.toggled.connect(
self.set_dynamic_disp_px
)
general_grid.addWidget(self.dynamic_disp_px, 2, 1)
self.minimap = QtWidgets.QCheckBox("show minimap")
general_grid.addWidget(self.minimap, 3, 1)
self.minimap.stateChanged.connect(self.update_scene)
# Contrast
contrast_groupbox = QtWidgets.QGroupBox("Contrast")
vbox.addWidget(contrast_groupbox)
contrast_grid = QtWidgets.QGridLayout(contrast_groupbox)
minimum_label = QtWidgets.QLabel("Min. Density:")
contrast_grid.addWidget(minimum_label, 0, 0)
self.minimum = QtWidgets.QDoubleSpinBox()
self.minimum.setRange(0, 999999)
self.minimum.setSingleStep(5)
self.minimum.setValue(0)
self.minimum.setDecimals(6)
self.minimum.setKeyboardTracking(False)
self.minimum.valueChanged.connect(self.update_scene)
contrast_grid.addWidget(self.minimum, 0, 1)
maximum_label = QtWidgets.QLabel("Max. Density:")
contrast_grid.addWidget(maximum_label, 1, 0)
self.maximum = QtWidgets.QDoubleSpinBox()
self.maximum.setRange(0, 999999)
self.maximum.setSingleStep(5)
self.maximum.setValue(100)
self.maximum.setDecimals(6)
self.maximum.setKeyboardTracking(False)
self.maximum.valueChanged.connect(self.update_scene)
contrast_grid.addWidget(self.maximum, 1, 1)
contrast_grid.addWidget(QtWidgets.QLabel("Colormap:"), 2, 0)
self.colormap = QtWidgets.QComboBox()
self.colormap.addItems(plt.colormaps())
self.colormap.addItem("Custom")
contrast_grid.addWidget(self.colormap, 2, 1)
self.colormap.currentIndexChanged.connect(
self.on_cmap_changed
)
# Blur
blur_groupbox = QtWidgets.QGroupBox("Blur")
blur_grid = QtWidgets.QGridLayout(blur_groupbox)
self.blur_buttongroup = QtWidgets.QButtonGroup()
points_button = QtWidgets.QRadioButton("None")
self.blur_buttongroup.addButton(points_button)
smooth_button = QtWidgets.QRadioButton("One-Pixel-Blur")
self.blur_buttongroup.addButton(smooth_button)
convolve_button = QtWidgets.QRadioButton("Global Localization Precision")
self.blur_buttongroup.addButton(convolve_button)
gaussian_button = QtWidgets.QRadioButton(
"Individual Localization Precision"
)
self.blur_buttongroup.addButton(gaussian_button)
gaussian_iso_button = QtWidgets.QRadioButton(
"Individual Localization Precision, iso"
)
self.blur_buttongroup.addButton(gaussian_iso_button)
blur_grid.addWidget(points_button, 0, 0, 1, 2)
blur_grid.addWidget(smooth_button, 1, 0, 1, 2)
blur_grid.addWidget(convolve_button, 2, 0, 1, 2)
blur_grid.addWidget(gaussian_button, 3, 0, 1, 2)
blur_grid.addWidget(gaussian_iso_button, 4, 0, 1, 2)
convolve_button.setChecked(True)
self.blur_buttongroup.buttonReleased.connect(self.render_scene)
blur_grid.addWidget(
QtWidgets.QLabel("Min. Blur (cam. pixel):"), 5, 0, 1, 1
)
self.min_blur_width = QtWidgets.QDoubleSpinBox()
self.min_blur_width.setRange(0, 999999)
self.min_blur_width.setSingleStep(0.01)
self.min_blur_width.setValue(0)
self.min_blur_width.setDecimals(3)
self.min_blur_width.setKeyboardTracking(False)
self.min_blur_width.valueChanged.connect(self.render_scene)
blur_grid.addWidget(self.min_blur_width, 5, 1, 1, 1)
vbox.addWidget(blur_groupbox)
self.blur_methods = {
points_button: None,
smooth_button: "smooth",
convolve_button: "convolve",
gaussian_button: "gaussian",
gaussian_iso_button: "gaussian_iso",
}
# Camera_parameters
camera_groupbox = QtWidgets.QGroupBox("Camera")
self.camera_grid = QtWidgets.QGridLayout(camera_groupbox)
self.camera_grid.addWidget(QtWidgets.QLabel("Pixel Size:"), 0, 0)
self.pixelsize = QtWidgets.QDoubleSpinBox()
self.pixelsize.setRange(1, 100000)
self.pixelsize.setValue(130)
self.pixelsize.setKeyboardTracking(False)
self.pixelsize.valueChanged.connect(self.update_scene)
self.camera_grid.addWidget(self.pixelsize, 0, 1)
vbox.addWidget(camera_groupbox)
# Scalebar
self.scalebar_groupbox = QtWidgets.QGroupBox("Scale Bar")
self.scalebar_groupbox.setCheckable(True)
self.scalebar_groupbox.setChecked(False)
self.scalebar_groupbox.toggled.connect(self.update_scene)
vbox.addWidget(self.scalebar_groupbox)
scalebar_grid = QtWidgets.QGridLayout(self.scalebar_groupbox)
scalebar_grid.addWidget(
QtWidgets.QLabel("Scale Bar Length (nm):"), 0, 0
)
self.scalebar = QtWidgets.QDoubleSpinBox()
self.scalebar.setRange(0.0001, 100000)
self.scalebar.setValue(500)
self.scalebar.setKeyboardTracking(False)
self.scalebar.valueChanged.connect(self.update_scene)
scalebar_grid.addWidget(self.scalebar, 0, 1)
self.scalebar_text = QtWidgets.QCheckBox("Print scale bar length")
self.scalebar_text.stateChanged.connect(self.update_scene)
scalebar_grid.addWidget(self.scalebar_text, 1, 0)
self._silent_disp_px_update = False
# Render
self.render_groupbox = QtWidgets.QGroupBox("Render properties")
vbox.addWidget(self.render_groupbox)
render_grid = QtWidgets.QGridLayout(self.render_groupbox)
render_grid.addWidget(QtWidgets.QLabel("Parameter:"), 0, 0)
self.parameter = QtWidgets.QComboBox()
render_grid.addWidget(self.parameter, 0, 1)
self.parameter.activated.connect(self.window.view.set_property)
minimum_label_render = QtWidgets.QLabel("Min.:")
render_grid.addWidget(minimum_label_render, 1, 0)
self.minimum_render = QtWidgets.QDoubleSpinBox()
self.minimum_render.setRange(-999999, 999999)
self.minimum_render.setSingleStep(5)
self.minimum_render.setValue(0)
self.minimum_render.setDecimals(2)
self.minimum_render.setKeyboardTracking(False)
self.minimum_render.setEnabled(False)
self.minimum_render.valueChanged.connect(
self.window.view.activate_render_property
)
render_grid.addWidget(self.minimum_render, 1, 1)
maximum_label_render = QtWidgets.QLabel("Max.:")
render_grid.addWidget(maximum_label_render, 2, 0)
self.maximum_render = QtWidgets.QDoubleSpinBox()
self.maximum_render.setRange(-999999, 999999)
self.maximum_render.setSingleStep(5)
self.maximum_render.setValue(100)
self.maximum_render.setDecimals(2)
self.maximum_render.setKeyboardTracking(False)
self.maximum_render.setEnabled(False)
self.maximum_render.valueChanged.connect(
self.window.view.activate_render_property
)
render_grid.addWidget(self.maximum_render, 2, 1)
color_step_label = QtWidgets.QLabel("Colors:")
render_grid.addWidget(color_step_label, 3, 0)
self.color_step = QtWidgets.QSpinBox()
self.color_step.setRange(1, 256)
self.color_step.setSingleStep(16)
self.color_step.setValue(32)
self.color_step.setKeyboardTracking(False)
self.color_step.setEnabled(False)
self.color_step.valueChanged.connect(
self.window.view.activate_render_property
)
render_grid.addWidget(self.color_step, 3, 1)
self.render_check = QtWidgets.QCheckBox("Render")
self.render_check.stateChanged.connect(
self.window.view.activate_render_property
)
self.render_check.setEnabled(False)
render_grid.addWidget(self.render_check, 4, 0)
self.show_legend = QtWidgets.QPushButton("Show legend")
render_grid.addWidget(self.show_legend, 4, 1)
self.show_legend.setEnabled(False)
self.show_legend.setAutoDefault(False)
self.show_legend.clicked.connect(self.window.view.show_legend)
def on_cmap_changed(self):
""" Loads custom colormap if requested. """
if self.colormap.currentText() == "Custom":
path, ext = QtWidgets.QFileDialog.getOpenFileName(
self, "Load custom colormap", filter="*.npy"
)
if path:
cmap = np.load(path)
if cmap.shape != (256, 4):
raise ValueError(
"Colormap must be of shape (256, 4)\n"
f"The loaded colormap has shape {cmap.shape}"
)
self.colormap.setCurrentIndex(0)
elif not np.all((cmap >= 0) & (cmap <= 1)):
raise ValueError(
"All elements of the colormap must be between\n"
"0 and 1"
)
self.colormap.setCurrentIndex(0)
else:
self.window.view.custom_cmap = cmap
self.update_scene()
def on_disp_px_changed(self, value):
"""
Sets new display pixel size, updates contrast and updates scene
in the main window.
"""
contrast_factor = (value / self._disp_px_size) ** 2
self._disp_px_size = value
self.silent_minimum_update(contrast_factor * self.minimum.value())
self.silent_maximum_update(contrast_factor * self.maximum.value())
if not self._silent_disp_px_update:
self.dynamic_disp_px.setChecked(False)
self.window.view.update_scene()
def on_zoom_changed(self, value):
""" Zooms the image in the main window. """
self.window.view.set_zoom(value)
def set_disp_px_silently(self, disp_px_size):
""" Changes the value of self.disp_px_size in the background. """
self._silent_disp_px_update = True
self.disp_px_size.setValue(disp_px_size)
self._silent_disp_px_update = False
def set_zoom_silently(self, zoom):
""" Changes the value of zoom in the background. """
self.zoom.blockSignals(True)
self.zoom.setValue(zoom)
self.zoom.blockSignals(False)
def silent_minimum_update(self, value):
""" Changes the value of self.minimum in the background. """
self.minimum.blockSignals(True)
self.minimum.setValue(value)
self.minimum.blockSignals(False)
def silent_maximum_update(self, value):
""" Changes the value of self.maximum in the background. """
self.maximum.blockSignals(True)
self.maximum.setValue(value)
self.maximum.blockSignals(False)
def render_scene(self, *args, **kwargs):
""" Updates scene in the main window. """
self.window.view.update_scene()
def set_dynamic_disp_px(self, state):
""" Updates scene if dynamic display pixel size is checked. """
if state:
self.window.view.update_scene()
def update_scene(self, *args, **kwargs):
""" Updates scene with cache. """
self.window.view.update_scene(use_cache=True)
class FastRenderDialog(QtWidgets.QDialog):
"""
A class to randomly sample a given percentage of locs to increase
the speed of rendering.
...
Attributes
----------
channel : QComboBox
contains the channel where fast rendering is to be applied
fraction : QSpinBox
contains the percentage of locs to be sampled
fractions : list
contains the percetanges for all channels of locs to be sampled
sample_button : QPushButton
click to sample locs according to the percetanges specified by
self.fractions
window : QMainWindow
instance of the main window
Methods
-------
on_channel_changed()
Retrieves value in self.fraction to the last chosen one
on_file_added()
Adds new item in self.channel
on_file_closed(idx)
Removes item in self.channel
on_fraction_changed()
Updates self.fractions
sample_locs()
Draws a fraction of locs specified by self.fractions
"""
def __init__(self, window):
super().__init__()
self.window = window
self.setWindowTitle("Fast Render")
self.setWindowIcon(self.window.icon)
self.layout = QtWidgets.QGridLayout()
self.setLayout(self.layout)
self.fractions = [100]
# info explaining what is this dialog
self.layout.addWidget(QtWidgets.QLabel(
(
"Change percentage of locs displayed in each\n"
"channel to increase the speed of rendering.\n\n"
"NOTE: sampling locs may lead to unexpected behaviour\n"
"when using some of Picasso : Render functions.\n"
"Please set the percentage below to 100 to avoid\n"
"such situations."
)
), 0, 0, 1, 2)
# choose channel
self.layout.addWidget(QtWidgets.QLabel("Channel: "), 1, 0)
self.channel = QtWidgets.QComboBox(self)
self.channel.setEditable(False)
self.channel.addItem("All channels")
self.channel.activated.connect(self.on_channel_changed)
self.layout.addWidget(self.channel, 1, 1)
# choose percentage
self.layout.addWidget(
QtWidgets.QLabel(
"Percentage of localizations\nto be displayed"
), 2, 0
)
self.fraction = QtWidgets.QSpinBox(self)
self.fraction.setSingleStep(1)
self.fraction.setMinimum(1)
self.fraction.setMaximum(100)
self.fraction.setValue(100)
self.fraction.valueChanged.connect(self.on_fraction_changed)
self.layout.addWidget(self.fraction, 2, 1)
# randomly draw localizations in each channel
self.sample_button = QtWidgets.QPushButton(
"Randomly sample\nlocalizations"
)
self.sample_button.clicked.connect(self.sample_locs)
self.layout.addWidget(self.sample_button, 3, 1)
def on_channel_changed(self):
"""
Retrieves value in self.fraction to the last chosen one.
"""
idx = self.channel.currentIndex()
self.fraction.blockSignals(True)
self.fraction.setValue(self.fractions[idx])
self.fraction.blockSignals(False)
def on_file_added(self):
""" Adds new item in self.channel. """
self.channel.addItem(self.window.dataset_dialog.checks[-1].text())
self.fractions.append(100)
def on_file_closed(self, idx):
""" Removes item from self.channel. """
self.channel.removeItem(idx+1)
del self.fractions[idx+1]
def on_fraction_changed(self):
""" Updates self.fractions. """
idx = self.channel.currentIndex()
self.fractions[idx] = self.fraction.value()
def sample_locs(self):
""" Draws a fraction of locs specified by self.fractions. """
idx = self.channel.currentIndex()
if idx == 0: # all channels share the same fraction
for i in range(len(self.window.view.locs_paths)):
n_locs = len(self.window.view.all_locs[i])
rand_idx = np.random.choice(
n_locs,
size=int(n_locs * self.fractions[0] / 100),
replace=False,
) # random indeces to extract locs
self.window.view.locs[i] = (
self.window.view.all_locs[i][rand_idx]
) # assign new localizations to be displayed
else: # each channel individually
for i in range(len(self.window.view.locs_paths)):
n_locs = len(self.window.view.all_locs[i])
rand_idx = np.random.choice(
n_locs,
size=int(n_locs * self.fractions[i+1] / 100),
replace=False,
) # random indeces to extract locs
self.window.view.locs[i] = (
self.window.view.all_locs[i][rand_idx]
) # assign new localizations to be displayed
# update view.group_color if needed:
if (len(self.fractions) == 2 and
hasattr(self.window.view.locs[0], "group")
):
self.window.view.group_color = (
self.window.view.get_group_color(
self.window.view.locs[0]
)
)
self.index_blocks = [None] * len(self.window.view.locs)
self.window.view.update_scene()
class SlicerDialog(QtWidgets.QDialog):
"""
A class to customize slicing 3D data in z axis.
...
Attributes
----------
bins : np.array
contatins bins used in plotting the histogram
canvas : FigureCanvas
contains the histogram of number of locs in slices
colors : list
contains rgb channels for each localization channel
export_button : QPushButton
click to export slices into .tif files
full_check : QCheckBox
tick to save the whole FOV, untick to save only the current
viewport
patches : list
contains plt.artists used in creating histograms
pick_slice : QDoubleSpinBox
contains slice thickness (nm)
separate_check : QCheckBox
tick to save channels separately when exporting slice
sl : QSlider
points to the slice to be displayed
slicer_cache : dict
contains QPixmaps that have been drawn for each slice
slicermax : float
maximum value of self.sl
slicermin : float
minimum value of self.sl
slicerposition : float
current position of self.sl
slicer_radio_button : QCheckBox
tick to slice locs
window : QMainWindow
instance of the main window
zcoord : list
z coordinates of each channel of localization (nm);
added when loading each channel (see View.add)
Methods
-------
calculate_histogram()
Calculates and histograms z coordintes of each channel
export_stack()
Saves all slices as .tif files
initialize()
Called when the dialog is open, calculates the histograms and
shows the dialog
on_pick_slice_changed()
Modifies histograms when slice thickness changes
on_slice_position_changed(position)
Changes some properties and updates scene in the main window
toggle_slicer()
Updates scene in the main window when slicing is called
"""
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("3D Slicer")
self.setModal(False)
self.setMinimumSize(550, 690) # to display the histogram
vbox = QtWidgets.QVBoxLayout(self)
slicer_groupbox = QtWidgets.QGroupBox("Slicer Settings")
vbox.addWidget(slicer_groupbox)
slicer_grid = QtWidgets.QGridLayout(slicer_groupbox)
slicer_grid.addWidget(
QtWidgets.QLabel("Slice Thickness [nm]:"), 0, 0
)
self.pick_slice = QtWidgets.QDoubleSpinBox()
self.pick_slice.setRange(0.01, 99999)
self.pick_slice.setValue(50)
self.pick_slice.setSingleStep(1)
self.pick_slice.setDecimals(2)
self.pick_slice.setKeyboardTracking(False)
self.pick_slice.valueChanged.connect(self.on_pick_slice_changed)
slicer_grid.addWidget(self.pick_slice, 0, 1)
self.sl = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.sl.setMinimum(0)
self.sl.setMaximum(50)
self.sl.setValue(25)
self.sl.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.sl.setTickInterval(1)
self.sl.valueChanged.connect(self.on_slice_position_changed)
slicer_grid.addWidget(self.sl, 1, 0, 1, 2)
self.figure, self.ax = plt.subplots(1, figsize=(3, 3))
self.canvas = FigureCanvas(self.figure)
slicer_grid.addWidget(self.canvas, 2, 0, 1, 2)
self.slicer_radio_button = QtWidgets.QCheckBox("Slice Dataset")
self.slicer_radio_button.stateChanged.connect(self.toggle_slicer)
slicer_grid.addWidget(self.slicer_radio_button, 3, 0)
self.separate_check = QtWidgets.QCheckBox("Export channels separate")
slicer_grid.addWidget(self.separate_check, 4, 0)
self.full_check = QtWidgets.QCheckBox("Export full image")
slicer_grid.addWidget(self.full_check, 5, 0)
self.export_button = QtWidgets.QPushButton("Export Slices")
self.export_button.setAutoDefault(False)
self.export_button.clicked.connect(self.export_stack)
slicer_grid.addWidget(self.export_button, 6, 0)
self.zcoord = []
def initialize(self):
"""
Called when the dialog is open, calculates the histograms and
shows the dialog.
"""
self.calculate_histogram()
self.show()
def calculate_histogram(self):
""" Calculates and histograms z coordintes of each channel. """
# slice thickness
slice = self.pick_slice.value()
# ax = self.figure.add_subplot(111)
# # clear the plot
# plt.cla()
self.ax.clear()
n_channels = len(self.zcoord)
# get colors for each channel (from dataset dialog)
colors = [
_.palette().color(QtGui.QPalette.Window)
for _ in self.window.dataset_dialog.colordisp_all
]
self.colors = [
[_.red() / 255, _.green() / 255, _.blue() / 255] for _ in colors
]
# get bins, starting with minimum z and ending with max z
self.bins = np.arange(
np.amin(np.hstack(self.zcoord)),
np.amax(np.hstack(self.zcoord)),
slice,
)
# plot histograms
self.patches = []
for i in range(len(self.zcoord)):
_, _, patches = self.ax.hist(
self.zcoord[i],
self.bins,
density=True,
facecolor=self.colors[i],
alpha=0.5,
)
self.patches.append(patches)
self.ax.set_xlabel("z-coordinate [nm]")
self.ax.set_ylabel("Rel. frequency")
self.ax.set_title(r"$\mathrm{Histogram\ of\ Z:}$")
self.canvas.draw()
self.sl.setMaximum(len(self.bins) - 2)
self.sl.setValue(len(self.bins) / 2)
# reset cache
self.slicer_cache = {}
def on_pick_slice_changed(self):
""" Modifies histograms when slice thickness changes. """
# reset cache
self.slicer_cache = {}
if len(self.bins) < 3: # in case there should be only 1 bin
self.calculate_histogram()
else:
self.calculate_histogram()
self.sl.setValue(len(self.bins) / 2)
# self.on_slice_position_changed(self.sl.value())
def toggle_slicer(self):
""" Updates scene in the main window slicing is called. """
self.window.view.update_scene()
def on_slice_position_changed(self, position):
"""
Changes some properties and updates scene in the main window.
"""
for i in range(len(self.zcoord)):
for patch in self.patches[i]:
patch.set_facecolor(self.colors[i])
self.patches[i][position].set_facecolor("black")
self.slicerposition = position
self.canvas.draw()
self.slicermin = self.bins[position]
self.slicermax = self.bins[position + 1]
self.window.view.update_scene_slicer()
def export_stack(self):
""" Saves all slices as .tif files. """
# get filename for saving
try:
base, ext = os.path.splitext(self.window.view.locs_paths[0])
except AttributeError:
return
out_path = base + ".tif"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Save z slices", out_path, filter="*.tif"
)
if path:
base, ext = os.path.splitext(path)
if self.separate_check.isChecked(): # each channel individually
# Uncheck all
for checks in self.window.dataset_dialog.checks:
checks.setChecked(False)
for j in range(len(self.window.view.locs)):
# load a single channel
self.window.dataset_dialog.checks[j].setChecked(True)
progress = lib.ProgressDialog(
"Exporting slices..", 0, self.sl.maximum(), self
)
progress.set_value(0)
progress.show()
# save each channel one by one
for i in tqdm(range(self.sl.maximum() + 1)):
self.sl.setValue(i)
out_path = (
base
+ "_Z"
+ "{num:03d}".format(num=i)
+ "_CH"
+ "{num:03d}".format(num=j+1)
+ ".tif"
)
if self.full_check.isChecked(): # full FOV
movie_height, movie_width = (
self.window.view.movie_size()
)
viewport = [(0, 0), (movie_height, movie_width)]
qimage = self.window.view.render_scene(
cache=False, viewport=viewport
)
gray = qimage.convertToFormat(
QtGui.QImage.Format_RGB16
)
else: # current FOV
gray = self.window.view.qimage.convertToFormat(
QtGui.QImage.Format_RGB16
)
gray.save(out_path)
progress.set_value(i)
progress.close()
self.window.dataset_dialog.checks[j].setChecked(False)
for checks in self.window.dataset_dialog.checks:
checks.setChecked(True)
else: # all channels at once
progress = lib.ProgressDialog(
"Exporting slices..", 0, self.sl.maximum(), self
)
progress.set_value(0)
progress.show()
for i in tqdm(range(self.sl.maximum() + 1)):
self.sl.setValue(i)
out_path = (
base
+ "_Z"
+ "{num:03d}".format(num=i)
+ "_CH001"
+ ".tif"
)
if self.full_check.isChecked(): # full FOV
movie_height, movie_width = (
self.window.view.movie_size()
)
viewport = [(0, 0), (movie_height, movie_width)]
qimage = self.window.view.render_scene(
cache=False, viewport=viewport
)
qimage.save(out_path)
else: # current FOV
self.window.view.qimage.save(out_path)
progress.set_value(i)
progress.close()
class View(QtWidgets.QLabel):
"""
A class to display super-resolution datasets.
...
Attributes
----------
all_locs : list
contains a np.recarray with localizations for each channel;
important for fast rendering
currentdrift : list
contains the most up-to-date drift for each channel
custom_cmap : np.array
custom colormap loaded from .npy, see DisplaySettingsDialog
_drift : list
contains np.recarrays with drift info for each channel, None if
no drift found/calculated
_driftfiles : list
contains paths to drift .txt files for each channel
group_color : np.array
important for single channel data with group info (picked or
clustered locs); contains an integer index for each loc
defining its color
image : np.array
Unprocessed image of rendered localizations
index_blocks : list
contains tuples with info about indexed locs for each channel,
None if not calculated yet
infos : list
contains a dictionary with metadata for each channel
locs : list
contains a np.recarray with localizations for each channel,
reduced in case of fast rendering
locs_paths : list
contains a str defining the path for each channel
median_lp : float
median lateral localization precision of the first locs file
(pixels)
_mode : str
defines current mode (zoom, pick or measure); important for
mouseEvents
n_locs : int
number of localizations loaded; if multichannel, the sum is
given
origin : QPoint
position of the origin of the zoom-in rectangle
_pan : boolean
indicates if image is currently panned
pan_start_x : float
x coordinate of panning's starting position
pan_start_y : float
y coordinate of panning's starting position
_picks : list
contains the coordatines of current picks
_pixmap : QPixMap
Pixmap currently displayed
_points : list
contains the coordinates of points to measure distances
between them
qimage : QImage
current image of rendered locs, picks and other drawings
qimage_no_picks : QImage
current image of rendered locs without picks and measuring
points
rectangle_pick_current_x : float
x coordinate of the leading edge of the drawn rectangular pick
rectangle_pick_current_y : float
y coordinate of the leading edge of the drawn rectangular pick
_rectangle_pick_ongoing : boolean
indicates if a rectangular pick is currently drawn
rectangle_pick_start : tuple
(rectangle_pick_start_x, rectangle_pick_start_y), see below
rectangle_pick_start_x : float
x coordinate of the starting edge of the drawn rectangular pick
rectangle_pick_start_y : float
y coordinate of the starting edge of the drawn rectangular pick
rubberband : QRubberBand
draws a rectangle used in zooming in
_size_hint : tuple
used for size adjustment
unfold_status : str
specifies if unfold/refold groups
window : QMainWindow
instance of the main window
x_color : np.array
indexes each loc according to its parameter value;
see self.activate_render_property
x_locs : list
contains np.recarrays with locs to be rendered by property; one
per color
x_render_cache : list
contains dicts with caches for storing info about locs rendered
by a property
x_render_state : boolean
indicates if rendering by property is used
Methods
-------
activate_property_menu()
Allows changing render parameters
activate_render_property()
Assigns locs by color to render a chosen property
add(path)
Loads a .hdf5 and .yaml files
add_drift(channel, drift)
Assigns attributes and saves .txt drift file
add_multiple(paths)
Loads several .hdf5 and .yaml files
add_pick(position)
Adds a pick at a given position
add_point(position)
Adds a point at a given position for measuring distances
add_picks(positions)
Adds several picks
adjust_viewport_to_view(viewport)
Adds space to viewport to match self.window's aspect ratio
align()
Align channels by RCC or from picked locs
analyze_cluster()
Clusters picked locs using k-means clustering
apply_drift()
Applies drift to locs from a .txt file
combine()
Combines all locs in each pick into one localization
clear_picks()
Deletes all current picks
CPU_or_GPU_box()
Creates a message box with buttons to choose between
CPU and GPU SMLM clustering.
dbscan()
Gets channel, parameters and path for DBSCAN
_dbscan(channel, path, params)
Performs DBSCAN in a given channel with user-defined parameters
deactivate_property_menu()
Blocks changing render parameters
display_pixels_per_viewport_pixels()
Returns optimal oversampling
dragEnterEvent(event)
Defines what happens when a file is dragged onto the main
window
draw_minimap(image)
Draws a minimap showing the position of the current viewport
draw_legend(image)
Draws legend for multichannel data
draw_picks(image)
Draws all picks onto rendered localizations
draw_points(image)
Draws points and lines and distances between them
draw_rectangle_pick_ongoing(image)
Draws an ongoing rectangular pick onto rendered localizations
draw_scalebar(image)
Draws a scalebar
draw_scene(viewport)
Renders locs in the given viewport and draws picks, legend, etc
draw_scene_slicer(viewport)
Renders sliced locs in the given viewport and draws picks etc
dropEvent(event)
Defines what happens when a file is dropped onto the window
export_trace()
Saves trace as a .csv
filter_picks()
Filters picks by number of locs
fit_in_view()
Updates scene with all locs shown
get_channel()
Opens an input dialog to ask for a channel
get_channel3d()
Similar to get_channel, used in selecting 3D picks
get_channel_all_seq()
Similar to get_channel, adds extra index for applying to all
channels
get_group_color(locs)
Finds group color index for each localization
get_index_blocks(channel)
Calls self.index_locs if not calculated earlier
get_pick_rectangle_corners(start_x, start_y, end_x, end_y, width)
Finds the positions of a rectangular pick's corners
get_pick_rectangle_polygon(start_x, start_y, end_x, end_y, width)
Finds a PyQt5 object used for drawing a rectangular pick
get_render_kwargs()
Returns a dictionary to be used for the kwargs of render.render
hdscan()
Gets channel, parameters and path for HDBSCAN
_hdbscan(channel, path, params)
Performs HDBSCAN in a given channel with user-defined
parameters
index_locs(channel)
Indexes locs from channel in a grid
load_picks(path)
Loads picks from .yaml file defined by path
link
map_to_movie(position)
Converts coordinates from display units to camera units
map_to_view(x,y)
Converts coordinates from camera units to display units
max_movie_height()
Returns maximum height of all loaded images
max_movie_width()
Returns maximum width of all loaded images
mouseMoveEvent(event)
Defines actions taken when moving mouse
mousePressEvent(event)
Defines actions taken when pressing mouse button
mouseReleaseEvent(event)
Defines actions taken when releasing mouse button
move_to_pick()
Change viewport to show a pick identified by its id
movie_size()
Returns tuple with movie height and width
nearest_neighbor()
Gets channels for nearest neighbor analysis
_nearest_neighbor(channel1, channel2)
Calculates and saves distances of the nearest neighbors between
localizations in channels 1 and 2
on_pick_shape_changed(pick_shape_index)
Assigns attributes and updates scene if new pick shape chosen
pan_relative(dy, dx)
Moves viewport by a given relative distance
pick_message_box(params)
Returns a message box for selecting picks
pick_similar()
Searches picks similar to the current picks
picked_locs(channel)
Returns picked localizations in the specified channel
read_colors()
Finds currently selected colors for multicolor rendering
refold_groups()
Refolds grouped locs across x axis
relative_position(viewport_center, cursor_position)
Finds the position of the cursor relative to the viewport's
center
remove_points()
Removes all distance measurement points
remove_picks(position)
Deletes picks at a given position
remove_picked_locs()
Gets channel for removing picked localizations
_remove_picked_locs(channel)
Deletes localizations in picks in channel
render_multi_channel(kwargs)
Renders and paints multichannel locs
render_scene()
Returns QImage with rendered localizations
render_single_channel(kwargs)
Renders single channel localizations
resizeEvent()
Defines what happens when window is resized
rmsd_at_com(locs)
Calculates root mean square displacement at center of mass
save_channel()
Opens an input dialog asking which channel of locs to save
save_channel_pickprops()
Opens an input dialog asking which channel to use in saving
pick properties
save_pick_properties(path, channel)
Saves picks' properties in a given channel to path
save_picked_locs(path, channel)
Saves picked locs from channel to path
save_picked_locs_multi(path)
Saves picked locs combined from all channels to path
save_picks(path)
Saves picked regions in .yaml format
scale_contrast(image)
Scales image based on contrast value from Display Settings
Dialog
select_traces()
Lets user to select picks based on their traces
set_mode()
Sets self._mode for QMouseEvents
set_property()
Activates rendering by property
set_zoom(zoom)
Zooms in/out to the given value
shifts_from_picked_coordinate(locs, coordinate)
Calculates shifts between channels along a given coordinate
shift_from_picked()
For each pick, calculate the center of mass and rcc based on
shifts
shift_from_rcc()
Estimates image shifts based on whole images' rcc
show_drift()
Plots current drift
show_legend()
Displays legend for rendering by property
show_pick()
Lets user select picks based on their 2D scatter
show_pick_3d
Lets user select picks based on their 3D scatter
show_pick_3d_iso
Lets user select picks based on their 3D scatter and
projections
show_trace()
Displays x and y coordinates of locs in picks in time
sizeHint()
Returns recommended window size
smlm_clusterer()
Gets channel, parameters and path for SMLM clustering
_smlm_clusterer(channel, path, params)
Performs SMLM clustering in a given channel with user-defined
parameters
subtract_picks(path)
Clears current picks that cover the picks loaded from path
to_8bit(image)
Converted normalised image to 8 bit
to_down()
Called on pressing down arrow; moves FOV
to_left()
Called on pressing left arrow; moves FOV
to_right()
Called on pressing right arrow; moves FOV
to_up()
Called on pressing up arrow; moves FOV
undrift()
Undrifts with RCC
undrift_from_picked
Gets channel for undrifting from picked locs
_undrift_from_picked
Undrifts based on picked locs in a given channel
_undrift_from_picked_coordinate
Calculates drift in a given coordinate
undrift_from_picked2d
Gets channel for undrifting from picked locs in 2D
_undrift_from_picked2d
Undrifts in x and y based on picked locs in a given channel
undo_drift
Gets channel for undoing drift
_undo_drift
Deletes the latest drift in a given channel
unfold_groups()
Shifts grouped locs across x axis
unfold_groups_square()
Shifts grouped locs onto a rectangular grid of chosen length
update_cursor()
Changes cursor according to self._mode
update_pick_info_long()
Called when evaluating picks statistics in Info Dialog
update_pick_info_short()
Updates number of picks in Info Dialog
update_scene()
Updates the view of rendered locs as well as cursor
update_scene_slicer()
Updates the view of rendered locs if they are sliced
viewport_center()
Finds viewport's center (pixels)
viewport_height()
Finds viewport's height (pixels)
viewport_size()
Finds viewport's height and width (pixels)
viewport_width()
Finds viewport's width (pixels)
wheelEvent(QWheelEvent)
Defines what happens when mouse wheel is used
zoom(factor)
Changes zoom relatively to factor
zoom_in()
Zooms in by a constant factor
zoom_out()
Zooms out by a constant factor
"""
def __init__(self, window):
super().__init__()
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "render.ico")
icon = QtGui.QIcon(icon_path)
self.icon = icon
self.setAcceptDrops(True)
self.setSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding
)
self.rubberband = QtWidgets.QRubberBand(
QtWidgets.QRubberBand.Rectangle, self
)
self.rubberband.setStyleSheet("selection-background-color: white")
self.window = window
self._pixmap = None
self.all_locs = [] # for fast render
self.locs = []
self.infos = []
self.locs_paths = []
self.group_color = []
self._mode = "Zoom"
self._pan = False
self._rectangle_pick_ongoing = False
self._size_hint = (768, 768)
self.n_locs = 0
self._picks = []
self._points = []
self.index_blocks = []
self._drift = []
self._driftfiles = []
self.currentdrift = []
self.x_render_cache = []
self.x_render_state = False
def get_group_color(self, locs):
"""
Finds group color for each localization in single channel data
with group info.
Parameters
----------
locs : np.recarray
Array with all localizations
Returns
-------
np.array
Array with int group color index for each loc
"""
groups = np.unique(locs.group)
groupcopy = locs.group.copy()
# check if groups are consecutive
if set(groups) == set(range(min(groups), max(groups) + 1)):
if len(groups) > 5000:
choice = QtWidgets.QMessageBox.question(
self,
"Group question",
(
"Groups are not consecutive"
" and more than 5000 groups detected."
" Re-Index groups? This may take a while."
),
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
)
if choice == QtWidgets.QMessageBox.Yes:
pb = lib.ProgressDialog(
"Re-Indexing groups", 0, len(groups), self
)
pb.set_value(0)
for i in tqdm(range(len(groups))):
groupcopy[locs.group == groups[i]] = i
pb.set_value(i)
pb.close()
else:
for i in tqdm(range(len(groups))):
groupcopy[locs.group == groups[i]] = i
else:
for i in range(len(groups)):
groupcopy[locs.group == groups[i]] = i
np.random.shuffle(groups)
groups %= N_GROUP_COLORS
return groups[groupcopy]
def add(self, path, render=True):
"""
Loads a .hdf5 localizations and the associated .yaml metadata
files.
Parameters
----------
path : str
String specifying the path to the .hdf5 file
render : boolean, optional
Specifies if the loaded files should be rendered
(default True)
"""
# read .hdf5 and .yaml files
try:
locs, info = io.load_locs(path, qt_parent=self)
except io.NoMetadataFileError:
return
locs = lib.ensure_sanity(locs, info)
# update pixelsize
for element in info:
if "Picasso Localize" in element.values():
if "Pixelsize" in element:
self.window.display_settings_dlg.pixelsize.setValue(
element["Pixelsize"]
)
# append loaded data
self.locs.append(locs)
self.all_locs.append(copy.copy(locs)) # for fast rendering
self.infos.append(info)
self.locs_paths.append(path)
self.index_blocks.append(None)
# try to load a drift .txt file:
drift = None
if "Last driftfile" in info[-1]:
driftpath = info[-1]["Last driftfile"]
if driftpath is not None:
try:
with open(driftpath, "r") as f:
drifttxt = np.loadtxt(f)
drift_x = drifttxt[:, 0]
drift_y = drifttxt[:, 1]
if drifttxt.shape[1] == 3:
drift_z = drifttxt[:, 2]
drift = (drift_x, drift_y, drift_z)
drift = np.rec.array(
drift, dtype=[("x", "f"), ("y", "f"), ("z", "f")]
)
else:
drift = (drift_x, drift_y)
drift = np.rec.array(
drift, dtype=[("x", "f"), ("y", "f")]
)
except Exception as e:
print(e)
# drift already initialized before
pass
# append drift info
self._drift.append(drift)
self._driftfiles.append(None)
self.currentdrift.append(None)
# if this is the first loc file, find the median localization
# precision and set group colors, if needed
if len(self.locs) == 1:
self.median_lp = np.mean(
[np.median(locs.lpx), np.median(locs.lpy)]
)
if hasattr(locs, "group"):
if len(self.group_color) == 0 and locs.group.size:
self.group_color = self.get_group_color(self.locs[0])
# render the loaded file
if render:
self.fit_in_view(autoscale=True)
self.update_scene()
# add options to rendering by parameter
self.window.display_settings_dlg.parameter.addItems(locs.dtype.names)
if hasattr(locs, "z"):
# append z coordinates for slicing
self.window.slicer_dialog.zcoord.append(locs.z)
# unlock 3D settings
for action in self.window.actions_3d:
action.setVisible(True)
# allow using View, Tools and Postprocess menus
for menu in self.window.menus:
menu.setDisabled(False)
# change current working directory
os.chdir(os.path.dirname(path))
# add the locs to the dataset dialog
self.window.dataset_dialog.add_entry(path)
self.window.setWindowTitle(
"Picasso: Render. File: {}".format(os.path.basename(path))
)
# fast rendering add channel
self.window.fast_render_dialog.on_file_added()
def add_multiple(self, paths):
""" Loads several .hdf5 and .yaml files.
Parameters
----------
paths: list
Contains the paths to the files to be loaded
"""
if len(paths):
fit_in_view = len(self.locs) == 0
paths = sorted(paths)
pd = lib.ProgressDialog(
"Loading channels", 0, len(paths), self
)
pd.set_value(0)
pd.setModal(False)
for i, path in enumerate(paths):
self.add(path, render=False)
pd.set_value(i+1)
if len(self.locs): # if loading was successful
if fit_in_view:
self.fit_in_view(autoscale=True)
else:
self.update_scene()
def add_pick(self, position, update_scene=True):
""" Adds a pick at a given position. """
self._picks.append(position)
self.update_pick_info_short()
if update_scene:
self.update_scene(picks_only=True)
def add_picks(self, positions):
""" Adds several picks. """
for position in positions:
self.add_pick(position, update_scene=False)
self.update_scene(picks_only=True)
def add_point(self, position, update_scene=True):
"""
Adds a point at a given position for measuring distances.
"""
self._points.append(position)
if update_scene:
self.update_scene()
def adjust_viewport_to_view(self, viewport):
"""
Adds space to a desired viewport, such that it matches the
window aspect ratio. Returns a viewport.
"""
viewport_height = viewport[1][0] - viewport[0][0]
viewport_width = viewport[1][1] - viewport[0][1]
view_height = self.height()
view_width = self.width()
viewport_aspect = viewport_width / viewport_height
view_aspect = view_width / view_height
if view_aspect >= viewport_aspect:
y_min = viewport[0][0]
y_max = viewport[1][0]
x_range = viewport_height * view_aspect
x_margin = (x_range - viewport_width) / 2
x_min = viewport[0][1] - x_margin
x_max = viewport[1][1] + x_margin
else:
x_min = viewport[0][1]
x_max = viewport[1][1]
y_range = viewport_width / view_aspect
y_margin = (y_range - viewport_height) / 2
y_min = viewport[0][0] - y_margin
y_max = viewport[1][0] + y_margin
return [(y_min, x_min), (y_max, x_max)]
def align(self):
""" Align channels by RCC or from picked localizations. """
if len(self._picks) > 0: # shift from picked
# find shift between channels
shift = self.shift_from_picked()
print("Shift {}".format(shift))
sp = lib.ProgressDialog(
"Shifting channels", 0, len(self.locs), self
)
sp.set_value(0)
# align each channel
for i, locs_ in enumerate(self.locs):
locs_.y -= shift[0][i]
locs_.x -= shift[1][i]
if len(shift) == 3:
locs_.z -= shift[2][i]
self.all_locs[i] = copy.copy(locs_)
# Cleanup
self.index_blocks[i] = None
sp.set_value(i + 1)
self.update_scene()
else: # align using whole images
max_iterations = 5
iteration = 0
convergence = 0.001 # (pixels), around 0.1 nm
shift_x = []
shift_y = []
shift_z = []
display = False
progress = lib.ProgressDialog(
"Aligning images..", 0, max_iterations, self
)
progress.show()
progress.set_value(0)
for iteration in range(max_iterations):
completed = True
progress.set_value(iteration)
# find shift between channels
shift = self.shift_from_rcc()
sp = lib.ProgressDialog(
"Shifting channels", 0, len(self.locs), self
)
sp.set_value(0)
temp_shift_x = []
temp_shift_y = []
temp_shift_z = []
for i, locs_ in enumerate(self.locs):
if (
np.absolute(shift[0][i]) + np.absolute(shift[1][i])
> convergence
):
completed = False
# shift each channel
locs_.y -= shift[0][i]
locs_.x -= shift[1][i]
temp_shift_x.append(shift[1][i])
temp_shift_y.append(shift[0][i])
if len(shift) == 3:
locs_.z -= shift[2][i]
temp_shift_z.append(shift[2][i])
sp.set_value(i + 1)
self.all_locs = copy.copy(self.locs)
shift_x.append(np.mean(temp_shift_x))
shift_y.append(np.mean(temp_shift_y))
if len(shift) == 3:
shift_z.append(np.mean(temp_shift_z))
iteration += 1
self.update_scene()
# Skip when converged:
if completed:
break
progress.close()
# Plot shift
if display:
fig1 = plt.figure(figsize=(8, 8))
plt.suptitle("Shift")
plt.subplot(1, 1, 1)
plt.plot(shift_x, "o-", label="x shift")
plt.plot(shift_y, "o-", label="y shift")
plt.xlabel("Iteration")
plt.ylabel("Mean Shift per Iteration (Px)")
plt.legend(loc="best")
fig1.show()
@check_pick
def combine(self):
"""
Combines locs in picks.
Works by linking all locs in each pick region, leading to only
one loc per pick.
See View.link for more info.
"""
channel = self.get_channel()
picked_locs = self.picked_locs(channel, add_group=False)
out_locs = []
# use very large values for linking localizations
r_max = 2 * max(
self.infos[channel][0]["Height"], self.infos[channel][0]["Width"]
)
max_dark = self.infos[channel][0]["Frames"]
progress = lib.ProgressDialog(
"Combining localizations in picks", 0, len(picked_locs), self
)
# link every localization in each pick
for i, pick_locs in enumerate(picked_locs):
pick_locs_out = postprocess.link(
pick_locs,
self.infos[channel],
r_max=r_max,
max_dark_time=max_dark,
remove_ambiguous_lengths=False,
)
if not pick_locs_out:
print("no locs in pick - skipped")
else:
out_locs.append(pick_locs_out)
progress.set_value(i + 1)
self.all_locs[channel] = stack_arrays(
out_locs, asrecarray=True, usemask=False
)
self.locs[channel] = copy.copy(self.all_locs[channel])
if hasattr(self.all_locs[channel], "group"):
groups = np.unique(self.all_locs[channel].group)
# In case a group is missing
groups = np.arange(np.max(groups) + 1)
np.random.shuffle(groups)
groups %= N_GROUP_COLORS
self.group_color = groups[self.all_locs[channel].group]
self.update_scene()
def link(self):
"""
Link localizations
"""
channel = self.get_channel()
if hasattr(self.all_locs[channel], "len"):
QtWidgets.QMessageBox.information(
self, "Link", "Localizations are already linked. Aborting..."
)
return
else:
r_max, max_dark, ok = LinkDialog.getParams()
if ok:
status = lib.StatusDialog("Linking localizations...", self)
self.all_locs[channel] = postprocess.link(
self.all_locs[channel],
self.infos[channel],
r_max=r_max,
max_dark_time=max_dark,
)
status.close()
if hasattr(self.all_locs[channel], "group"):
groups = np.unique(self.all_locs[channel].group)
groups = np.arange(np.max(groups) + 1)
np.random.shuffle(groups)
groups %= N_GROUP_COLORS
self.group_color = groups[self.all_locs[channel].group]
self.locs[channel] = copy.copy(self.all_locs[channel])
self.update_scene()
def dbscan(self):
"""
Gets channel, parameters and path for DBSCAN.
"""
channel = self.get_channel_all_seq("Cluster")
# get DBSCAN parameters
params = DbscanDialog.getParams()
ok = params[-1] # true if parameters were given
if ok:
if channel == len(self.locs_paths): # apply to all channels
# get saving name suffix
suffix, ok = QtWidgets.QInputDialog.getText(
self,
"Input Dialog",
"Enter suffix",
QtWidgets.QLineEdit.Normal,
"_clustered",
)
if ok:
for channel in range(len(self.locs_paths)):
path = self.locs_paths[channel].replace(
".hdf5", f"{suffix}.hdf5"
)
self._dbscan(channel, path, params)
else:
# get the path to save
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save clustered locs",
self.locs_paths[channel].replace(".hdf5", "_clustered.hdf5"),
filter="*.hdf5",
)
if path:
self._dbscan(channel, path, params)
def _dbscan(self, channel, path, params):
"""
Performs DBSCAN in a given channel with user-defined parameters
and saves the result.
Parameters
----------
channel : int
Index of the channel were clustering is performed
path : str
Path to save clustered localizations
params : list
DBSCAN parameters
"""
radius, min_density, save_centers, _ = params
status = lib.StatusDialog(
"Applying DBSCAN. This may take a while.", self
)
# keep group info if already present
if hasattr(self.all_locs[channel], "group"):
locs = lib.append_to_rec(
self.all_locs[channel],
self.all_locs[channel].group,
"group_input",
)
else:
locs = self.all_locs[channel]
# perform DBSCAN in a channel
locs = postprocess.dbscan(
locs,
radius,
min_density,
self.window.display_settings_dlg.pixelsize.value(),
)
dbscan_info = {
"Generated by": "Picasso DBSCAN",
"Number of clusters": len(np.unique(locs.group)),
"Radius [cam. px]": radius,
"Minimum local density": min_density,
}
io.save_locs(path, locs, self.infos[channel] + [dbscan_info])
status.close()
if save_centers:
status = lib.StatusDialog("Calculating cluster centers", self)
path = path.replace(".hdf5", "_cluster_centers.hdf5")
centers = clusterer.find_cluster_centers(locs)
io.save_locs(path, centers, self.infos[channel] + [dbscan_info])
status.close()
def hdbscan(self):
"""
Gets channel, parameters and path for HDBSCAN.
"""
if not HDBSCAN_IMPORTED: # no hdbscan package found
message = (
"No HDBSCAN detected. Please install\n"
"the python package HDBSCAN*."
)
QtWidgets.QMessageBox.information(
self,
"No HDBSCAN",
message,
)
return
channel = self.get_channel_all_seq("Cluster")
# get HDBSCAN parameters
params = HdbscanDialog.getParams()
ok = params[-1] # true if parameters were given
if ok:
if channel == len(self.locs_paths): # apply to all channels
# get saving name suffix
suffix, ok = QtWidgets.QInputDialog.getText(
self,
"Input Dialog",
"Enter suffix",
QtWidgets.QLineEdit.Normal,
"_clustered",
)
if ok:
for channel in range(len(self.locs_paths)):
path = self.locs_paths[channel].replace(
".hdf5", f"{suffix}.hdf5"
)
self._hdbscan(channel, path, params)
else:
# get the path to save
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save clustered locs",
self.locs_paths[channel].replace(
".hdf5",
"_clustered.hdf5"
),
filter="*.hdf5",
)
if path:
self._hdbscan(channel, path, params)
def _hdbscan(self, channel, path, params):
"""
Performs HDBSCAN in a given channel with user-defined
parameters and saves the result.
Parameters
----------
channel : int
Index of the channel were clustering is performed
path : str
Path to save clustered localizations
params : list
HDBSCAN parameters
"""
min_cluster, min_samples, cluster_eps, save_centers, _ = params
status = lib.StatusDialog(
"Applying HDBSCAN. This may take a while.", self
)
# keep group info if already present
if hasattr(self.all_locs[channel], "group"):
locs = lib.append_to_rec(
self.all_locs[channel],
self.all_locs[channel].group,
"group_input",
)
else:
locs = self.all_locs[channel]
# perform HDBSCAN for each channel
locs = postprocess.hdbscan(
locs,
min_cluster,
min_samples,
cluster_eps,
self.window.display_settings_dlg.pixelsize.value(),
)
hdbscan_info = {
"Generated by": "Picasso HDBSCAN",
"Number of clusters": len(np.unique(locs.group)),
"Min. cluster": min_cluster,
"Min. samples": min_samples,
"Intercluster distance": cluster_eps,
}
io.save_locs(path, locs, self.infos[channel] + [hdbscan_info])
status.close()
if save_centers:
status = lib.StatusDialog("Calculating cluster centers", self)
path = path.replace(".hdf5", "_cluster_centers.hdf5")
centers = clusterer.find_cluster_centers(locs)
io.save_locs(path, centers, self.infos[channel] + [hdbscan_info])
status.close()
def smlm_clusterer(self):
"""
Gets channel, parameters and path for SMLM clustering
"""
channel = self.get_channel_all_seq("Cluster")
# get clustering parameters
if any([hasattr(_, "z") for _ in self.all_locs]):
params = SMLMDialog3D.getParams()
else:
params = SMLMDialog2D.getParams()
ok = params[-1] # true if parameters were given
if ok:
if channel == len(self.locs_paths): # apply to all
# get saving name suffix
suffix, ok = QtWidgets.QInputDialog.getText(
self,
"Input Dialog",
"Enter suffix",
QtWidgets.QLineEdit.Normal,
"_clustered",
)
if ok:
for channel in range(len(self.locs_paths)):
path = self.locs_paths[channel].replace(
".hdf5", f"{suffix}.hdf5"
) # add the suffix to the current path
self._smlm_clusterer(channel, path, params)
else:
# get the path to save
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save clustered locs",
self.locs_paths[channel].replace(
".hdf5", "_clustered.hdf5"
),
filter="*.hdf5",
)
if path:
self._smlm_clusterer(channel, path, params)
def _smlm_clusterer(self, channel, path, params):
"""
Performs SMLM clustering in a given channel with user-defined
parameters and saves the result.
Parameters
----------
channel : int
Index of the channel were clustering is performed
path : str
Path to save clustered localizations
params : list
SMLM clustering parameters
"""
# for converting z coordinates
pixelsize = self.window.display_settings_dlg.pixelsize.value()
if len(self._picks): # cluster only picked localizations
clustered_locs = [] # list with picked locs after clustering
picked_locs = self.picked_locs(channel, add_group=False)
group_offset = 1
pd = lib.ProgressDialog(
"Clustering in picks", 0, len(picked_locs), self
)
pd.set_value(0)
for i in range(len(picked_locs)):
locs = picked_locs[i]
# save pick index as group_input
locs = lib.append_to_rec(
locs,
i * np.ones(len(locs), dtype=np.int32),
"group_input",
)
if len(locs) > 0:
labels = clusterer.cluster(locs, params, pixelsize)
temp_locs = lib.append_to_rec(
locs, labels, "group"
) # add cluster id to locs
# -1 means no cluster assigned to a loc
temp_locs = temp_locs[temp_locs.group != -1]
if len(temp_locs) > 0:
# make sure each picks produces unique cluster ids
temp_locs.group += group_offset
clustered_locs.append(temp_locs)
group_offset += np.max(labels) + 1
pd.set_value(i + 1)
clustered_locs = stack_arrays(
clustered_locs, asrecarray=True, usemask=False
) # np.recarray with all clustered locs to be saved
else: # cluster all locs
status = lib.StatusDialog("Clustering localizations", self)
# keep group info if already present
if hasattr(self.all_locs[channel], "group"):
locs = lib.append_to_rec(
self.all_locs[channel],
self.all_locs[channel].group,
"group_input",
)
else:
locs = self.all_locs[channel]
labels = clusterer.cluster(locs, params, pixelsize)
clustered_locs = lib.append_to_rec(
locs, labels, "group"
) # add cluster id to locs
# -1 means no cluster assigned to a loc
clustered_locs = clustered_locs[clustered_locs.group != -1]
status.close()
# saving
if hasattr(self.all_locs[channel], "z"):
new_info = {
"Generated by": "Picasso Render SMLM clusterer 3D",
"Number of clusters": len(np.unique(clustered_locs.group)),
"Clustering radius xy [cam. px]": params[0],
"Clustering radius z [cam. px]": params[1],
"Min. cluster size": params[2],
"Performed basic frame analysis": params[-2],
}
else:
new_info = {
"Generated by": "Picasso Render SMLM clusterer 2D",
"Number of clusters": len(np.unique(clustered_locs.group)),
"Clustering radius [cam. px]": params[0],
"Min. cluster size": params[1],
"Performed basic frame analysis": params[-2],
}
info = self.infos[channel] + [new_info]
# save locs
io.save_locs(path, clustered_locs, info)
# save cluster centers
if params[-3]:
status = lib.StatusDialog("Calculating cluster centers", self)
path = path.replace(".hdf5", "_cluster_centers.hdf5")
centers = clusterer.find_cluster_centers(clustered_locs)
io.save_locs(path, centers, info)
status.close()
def shifts_from_picked_coordinate(self, locs, coordinate):
"""
Calculates shifts between channels along a given coordinate.
Parameters
----------
locs : np.recarray
Picked locs from all channels
coordinate : str
Specifies which coordinate should be used (x, y, z)
Returns
-------
np.array
Array of shape (n_channels, n_channels) with shifts between
all channels
"""
n_channels = len(locs)
# Calculating center of mass for each channel and pick
coms = []
for channel_locs in locs:
coms.append([])
for group_locs in channel_locs:
group_com = np.mean(getattr(group_locs, coordinate))
coms[-1].append(group_com)
# Calculating image shifts
d = np.zeros((n_channels, n_channels))
for i in range(n_channels - 1):
for j in range(i + 1, n_channels):
d[i, j] = np.nanmean(
[cj - ci for ci, cj in zip(coms[i], coms[j])]
)
return d
def shift_from_picked(self):
"""
Used by align. For each pick, calculate the center of mass and
rcc based on shifts.
Returns
-------
tuple
With shifts; shape (2,) or (3,) (if z coordinate present)
"""
n_channels = len(self.locs)
locs = [self.picked_locs(_) for _ in range(n_channels)]
dy = self.shifts_from_picked_coordinate(locs, "y")
dx = self.shifts_from_picked_coordinate(locs, "x")
if all([hasattr(_[0], "z") for _ in locs]):
dz = self.shifts_from_picked_coordinate(locs, "z")
else:
dz = None
return lib.minimize_shifts(dx, dy, shifts_z=dz)
def shift_from_rcc(self):
"""
Used by align. Estimates image shifts based on whole images'
rcc.
Returns
-------
tuple
With shifts; shape (2,) or (3,) (if z coordinate present)
"""
n_channels = len(self.locs)
rp = lib.ProgressDialog("Rendering images", 0, n_channels, self)
rp.set_value(0)
images = []
# render each channel and save it in images
for i, (locs_, info_) in enumerate(zip(self.locs, self.infos)):
_, image = render.render(locs_, info_, blur_method="smooth")
images.append(image)
rp.set_value(i + 1)
n_pairs = int(n_channels * (n_channels - 1) / 2)
rc = lib.ProgressDialog("Correlating image pairs", 0, n_pairs, self)
return imageprocess.rcc(images, callback=rc.set_value)
@check_pick
def clear_picks(self):
""" Deletes all current picks. """
self._picks = []
self.window.info_dialog.n_picks.setText(str(len(self._picks)))
self.update_scene(picks_only=True)
def dragEnterEvent(self, event):
"""
Defines what happens when a file is dragged onto the window.
"""
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def get_pick_rectangle_corners(
self, start_x, start_y, end_x, end_y, width
):
"""
Finds the positions of corners of a rectangular pick.
Rectangular pick is defined by:
[(start_x, start_y), (end_x, end_y)]
and its width. (all values in pixels)
Returns
-------
tuple
Contains corners' x and y coordinates in two lists
"""
if end_x == start_x:
alpha = np.pi / 2
else:
alpha = np.arctan((end_y - start_y) / (end_x - start_x))
dx = width * np.sin(alpha) / 2
dy = width * np.cos(alpha) / 2
x1 = start_x - dx
x2 = start_x + dx
x4 = end_x - dx
x3 = end_x + dx
y1 = start_y + dy
y2 = start_y - dy
y4 = end_y + dy
y3 = end_y - dy
return [x1, x2, x3, x4], [y1, y2, y3, y4]
def get_pick_rectangle_polygon(
self, start_x, start_y, end_x, end_y, width, return_most_right=False
):
"""
Finds QtGui.QPolygonF object used for drawing a rectangular
pick.
Returns
-------
QtGui.QPolygonF
"""
X, Y = self.get_pick_rectangle_corners(
start_x, start_y, end_x, end_y, width
)
p = QtGui.QPolygonF()
for x, y in zip(X, Y):
p.append(QtCore.QPointF(x, y))
if return_most_right:
ix_most_right = np.argmax(X)
x_most_right = X[ix_most_right]
y_most_right = Y[ix_most_right]
return p, (x_most_right, y_most_right)
return p
def draw_picks(self, image):
"""
Draws all current picks onto rendered locs.
Parameters
----------
image : QImage
Image containing rendered localizations
Returns
-------
QImage
Image with the drawn picks
"""
image = image.copy()
t_dialog = self.window.tools_settings_dialog
# draw circular picks
if self._pick_shape == "Circle":
# draw circular picks as points
if t_dialog.point_picks.isChecked():
painter = QtGui.QPainter(image)
painter.setBrush(QtGui.QBrush(QtGui.QColor("yellow")))
painter.setPen(QtGui.QColor("yellow"))
# yellow is barely visible on white background
if self.window.dataset_dialog.wbackground.isChecked():
painter.setBrush(QtGui.QBrush(QtGui.QColor("red")))
painter.setPen(QtGui.QColor("red"))
for i, pick in enumerate(self._picks):
# convert from camera units to display units
cx, cy = self.map_to_view(*pick)
painter.drawEllipse(QtCore.QPoint(cx, cy), 3, 3)
# annotate picks
if t_dialog.pick_annotation.isChecked():
painter.drawText(cx + 20, cy + 20, str(i))
painter.end()
# draw circles
else:
d = t_dialog.pick_diameter.value()
d *= self.width() / self.viewport_width()
painter = QtGui.QPainter(image)
painter.setPen(QtGui.QColor("yellow"))
# yellow is barely visible on white background
if self.window.dataset_dialog.wbackground.isChecked():
painter.setPen(QtGui.QColor("red"))
for i, pick in enumerate(self._picks):
# convert from camera units to display units
cx, cy = self.map_to_view(*pick)
painter.drawEllipse(cx - d / 2, cy - d / 2, d, d)
# annotate picks
if t_dialog.pick_annotation.isChecked():
painter.drawText(cx + d / 2, cy + d / 2, str(i))
painter.end()
# draw rectangular picks
elif self._pick_shape == "Rectangle":
w = t_dialog.pick_width.value()
w *= self.width() / self.viewport_width()
painter = QtGui.QPainter(image)
painter.setPen(QtGui.QColor("yellow"))
# yellow is barely visible on white background
if self.window.dataset_dialog.wbackground.isChecked():
painter.setPen(QtGui.QColor("red"))
for i, pick in enumerate(self._picks):
# convert from camera units to display units
start_x, start_y = self.map_to_view(*pick[0])
end_x, end_y = self.map_to_view(*pick[1])
# draw a straight line across the pick
painter.drawLine(start_x, start_y, end_x, end_y)
# draw a rectangle
polygon, most_right = self.get_pick_rectangle_polygon(
start_x, start_y, end_x, end_y, w, return_most_right=True
)
painter.drawPolygon(polygon)
# annotate picks
if t_dialog.pick_annotation.isChecked():
painter.drawText(*most_right, str(i))
painter.end()
return image
def draw_rectangle_pick_ongoing(self, image):
"""
Draws an ongoing rectangular pick onto image.
Parameters
----------
image : QImage
Image containing rendered localizations
Returns
-------
QImage
Image with the drawn pick
"""
image = image.copy()
painter = QtGui.QPainter(image)
painter.setPen(QtGui.QColor("green"))
# draw a line across the pick
painter.drawLine(
self.rectangle_pick_start_x,
self.rectangle_pick_start_y,
self.rectangle_pick_current_x,
self.rectangle_pick_current_y,
)
w = self.window.tools_settings_dialog.pick_width.value()
# convert from camera units to display units
w *= self.width() / self.viewport_width()
polygon = self.get_pick_rectangle_polygon(
self.rectangle_pick_start_x,
self.rectangle_pick_start_y,
self.rectangle_pick_current_x,
self.rectangle_pick_current_y,
w,
)
# draw a rectangle
painter.drawPolygon(polygon)
painter.end()
return image
def draw_points(self, image):
"""
Draws points and lines and distances between them onto image.
Parameters
----------
image : QImage
Image containing rendered localizations
Returns
-------
QImage
Image with the drawn points
"""
image = image.copy()
d = 20 # width of the drawn crosses (window pixels)
painter = QtGui.QPainter(image)
painter.setPen(QtGui.QColor("yellow"))
# yellow is barely visible on white background
if self.window.dataset_dialog.wbackground.isChecked():
painter.setPen(QtGui.QColor("red"))
cx = []
cy = []
ox = [] # together with oldpoint used for drawing
oy = [] # lines between points
oldpoint = []
pixelsize = self.window.display_settings_dlg.pixelsize.value()
for point in self._points:
if oldpoint != []:
ox, oy = self.map_to_view(*oldpoint) # convert to display units
cx, cy = self.map_to_view(*point) # convert to display units
# draw a cross
painter.drawPoint(cx, cy)
painter.drawLine(cx, cy, cx + d / 2, cy)
painter.drawLine(cx, cy, cx, cy + d / 2)
painter.drawLine(cx, cy, cx - d / 2, cy)
painter.drawLine(cx, cy, cx, cy - d / 2)
# draw a line between points and show distance
if oldpoint != []:
painter.drawLine(cx, cy, ox, oy)
font = painter.font()
font.setPixelSize(20)
painter.setFont(font)
# get distance with 2 decimal places
distance = (
float(
int(
np.sqrt(
(
(oldpoint[0] - point[0]) ** 2
+ (oldpoint[1] - point[1]) ** 2
)
)
* pixelsize
* 100
)
)
/ 100
)
painter.drawText(
(cx + ox) / 2 + d, (cy + oy) / 2 + d, str(distance) + " nm"
)
oldpoint = point
painter.end()
return image
def draw_scalebar(self, image):
"""
Draws a scalebar.
Parameters
----------
image : QImage
Image containing rendered localizations
Returns
-------
QImage
Image with the drawn scalebar
"""
if self.window.display_settings_dlg.scalebar_groupbox.isChecked():
pixelsize = self.window.display_settings_dlg.pixelsize.value()
# length (nm)
scalebar = self.window.display_settings_dlg.scalebar.value()
length_camerapxl = scalebar / pixelsize
length_displaypxl = int(
round(self.width() * length_camerapxl / self.viewport_width())
)
height = 10 # display pixels
painter = QtGui.QPainter(image)
painter.setPen(QtGui.QPen(QtCore.Qt.NoPen))
painter.setBrush(QtGui.QBrush(QtGui.QColor("white")))
# white scalebar not visible on white background
if self.window.dataset_dialog.wbackground.isChecked():
painter.setBrush(QtGui.QBrush(QtGui.QColor("black")))
# draw a rectangle
x = self.width() - length_displaypxl - 35
y = self.height() - height - 20
painter.drawRect(x, y, length_displaypxl + 0, height + 0)
# display scalebar's length
if self.window.display_settings_dlg.scalebar_text.isChecked():
font = painter.font()
font.setPixelSize(20)
painter.setFont(font)
painter.setPen(QtGui.QColor("white"))
# white scalebar not visible on white background
if self.window.dataset_dialog.wbackground.isChecked():
painter.setPen(QtGui.QColor("black"))
text_spacer = 40
text_width = length_displaypxl + 2 * text_spacer
text_height = text_spacer
painter.drawText(
x - text_spacer,
y - 25,
text_width,
text_height,
QtCore.Qt.AlignHCenter,
str(scalebar) + " nm",
)
return image
def draw_legend(self, image):
"""
Draws a legend for multichannel data.
Displayed in the top left corner, shows the color and the name
of each channel.
Parameters
----------
image : QImage
Image containing rendered localizations
Returns
-------
QImage
Image with the drawn legend
"""
if self.window.dataset_dialog.legend.isChecked():
n_channels = len(self.locs_paths)
painter = QtGui.QPainter(image)
# initial positions
x = 12
y = 20
dy = 20 # space between names
for i in range(n_channels):
if self.window.dataset_dialog.checks[i].isChecked():
painter.setPen(QtGui.QPen(QtCore.Qt.NoPen))
colordisp = self.window.dataset_dialog.colordisp_all[i]
color = colordisp.palette().color(QtGui.QPalette.Window)
painter.setPen(QtGui.QPen(color))
font = painter.font()
font.setPixelSize(12)
painter.setFont(font)
text = self.window.dataset_dialog.checks[i].text()
painter.drawText(QtCore.QPoint(x, y), text)
y += dy
return image
def draw_minimap(self, image):
"""
Draw a minimap showing the position of current viewport.
Parameters
----------
image : QImage
Image containing rendered localizations
Returns
-------
QImage
Image with the drawn minimap
"""
if self.window.display_settings_dlg.minimap.isChecked():
movie_height, movie_width = self.movie_size()
length_minimap = 100
height_minimap = movie_height / movie_width * 100
# draw in the upper right corner, overview rectangle
x = self.width() - length_minimap - 20
y = 20
painter = QtGui.QPainter(image)
painter.setPen(QtGui.QColor("white"))
if self.window.dataset_dialog.wbackground.isChecked():
painter.setPen(QtGui.QColor("black"))
painter.drawRect(x, y, length_minimap + 0, height_minimap + 0)
painter.setPen(QtGui.QColor("yellow"))
if self.window.dataset_dialog.wbackground.isChecked():
painter.setPen(QtGui.QColor("red"))
length = self.viewport_width() / movie_width * length_minimap
height = self.viewport_height() / movie_height * height_minimap
x_vp = self.viewport[0][1] / movie_width * length_minimap
y_vp = self.viewport[0][0] / movie_height * length_minimap
painter.drawRect(x + x_vp, y + y_vp, length + 0, height + 0)
return image
def draw_scene(
self,
viewport,
autoscale=False,
use_cache=False,
picks_only=False,
):
"""
Renders localizations in the given viewport and draws picks,
legend, etc.
Parameters
----------
viewport : tuple
Viewport defining the rendered FOV
autoscale : boolean (default=False)
True if contrast should be optimally adjusted
use_cache : boolean (default=False)
True if saved QImage of rendered locs is to be used
picks_only : boolean (default=False)
True if only picks and points are to be rendered
"""
if not picks_only:
# make sure viewport has the same shape as the main window
self.viewport = self.adjust_viewport_to_view(viewport)
# render locs
qimage = self.render_scene(
autoscale=autoscale, use_cache=use_cache
)
# scale image's size to the window
qimage = qimage.scaled(
self.width(),
self.height(),
QtCore.Qt.KeepAspectRatioByExpanding,
)
# draw scalebar, minimap and legend
self.qimage_no_picks = self.draw_scalebar(qimage)
self.qimage_no_picks = self.draw_minimap(self.qimage_no_picks)
self.qimage_no_picks = self.draw_legend(self.qimage_no_picks)
# adjust zoom in Display Setting sDialog
dppvp = self.display_pixels_per_viewport_pixels()
self.window.display_settings_dlg.set_zoom_silently(dppvp)
# draw picks and points
self.qimage = self.draw_picks(self.qimage_no_picks)
self.qimage = self.draw_points(self.qimage)
if self._rectangle_pick_ongoing:
self.qimage = self.draw_rectangle_pick_ongoing(self.qimage)
# convert to pixmap
self.pixmap = QtGui.QPixmap.fromImage(self.qimage)
self.setPixmap(self.pixmap)
self.window.update_info()
def draw_scene_slicer(
self,
viewport,
autoscale=False,
use_cache=False,
picks_only=False,
):
"""
Renders sliced localizations in the given viewport and draws
picks, legend, etc.
Parameters
----------
viewport : tuple
Viewport defining the current FOV
autoscale : boolean (default=False)
True if contrast should be optimally adjusted
use_cache : boolean (default=False)
True if saved QImage of rendered locs is to be used
picks_only : boolean (default=False)
True if only picks and points are to be rendered
"""
# try to get a saved pixmap
slicerposition = self.window.slicer_dialog.slicerposition
pixmap = self.window.slicer_dialog.slicer_cache.get(slicerposition)
if pixmap is None: # if no pixmap found
self.draw_scene(
viewport,
autoscale=autoscale,
use_cache=use_cache,
picks_only=picks_only,
)
self.window.slicer_dialog.slicer_cache[slicerposition] = (
self.pixmap
)
else:
self.setPixmap(pixmap)
def dropEvent(self, event):
"""
Defines what happens when a file is dropped onto the window.
If the file has ending .hdf5, attempts to load locs.
"""
urls = event.mimeData().urls()
paths = [_.toLocalFile() for _ in urls]
extensions = [os.path.splitext(_)[1].lower() for _ in paths]
if extensions == [".txt"]: # just one txt dropped
self.load_fov_drop(paths[0])
else:
paths = [
path
for path, ext in zip(paths, extensions)
if ext == ".hdf5"
]
self.add_multiple(paths)
def fit_in_view(self, autoscale=False):
""" Updates scene with all locs shown. """
movie_height, movie_width = self.movie_size()
viewport = [(0, 0), (movie_height, movie_width)]
self.update_scene(viewport=viewport, autoscale=autoscale)
def move_to_pick(self):
""" Adjust viewport to show a pick identified by its id. """
# raise error when no picks found
if len(self._picks) == 0:
raise ValueError("No picks detected")
# get pick id
pick_no, ok = QtWidgets.QInputDialog.getInt(
self, "", "Input pick number: ", 0, 0
)
if ok:
# raise error when pick id too high
if pick_no >= len(self._picks):
raise ValueError("Pick number provided too high")
else: # calculate new viewport
if self._pick_shape == "Circle":
r = (
self.window.tools_settings_dialog.pick_diameter.value()
/ 2
)
x, y = self._picks[pick_no]
x_min = x - 1.4 * r
x_max = x + 1.4 * r
y_min = y - 1.4 * r
y_max = y + 1.4 * r
else:
(xs, ys), (xe, ye) = self._picks[pick_no]
xc = np.mean([xs, xe])
yc = np.mean([ys, ye])
w = self.window.tools_settings_dialog.pick_width.value()
X, Y = self.get_pick_rectangle_corners(xs, ys, xe, ye, w)
x_min = min(X) - (0.2 * (xc - min(X)))
x_max = max(X) + (0.2 * (max(X) - xc))
y_min = min(Y) - (0.2 * (yc - min(Y)))
y_max = max(Y) + (0.2 * (max(Y) - yc))
viewport = [(y_min, x_min), (y_max, x_max)]
self.update_scene(viewport=viewport)
def get_channel(self, title="Choose a channel"):
"""
Opens an input dialog to ask for a channel.
Returns a channel index or None if no locs loaded.
Returns
-------
None if no locs loaded or channel picked, int otherwise
Index of the chosen channel
"""
n_channels = len(self.locs_paths)
if n_channels == 0:
return None
elif n_channels == 1:
return 0
elif len(self.locs_paths) > 1:
pathlist = list(self.locs_paths)
index, ok = QtWidgets.QInputDialog.getItem(
self, title, "Channel:", pathlist, editable=False
)
if ok:
return pathlist.index(index)
else:
return None
def save_channel(self, title="Choose a channel"):
"""
Opens an input dialog to ask which channel to save.
There is an option to save all channels separetely or merge
them together.
Returns
None if no locs found or channel picked, int otherwise
Index of the chosen channel
"""
n_channels = len(self.locs_paths)
if n_channels == 0:
return None
elif n_channels == 1:
return 0
elif len(self.locs_paths) > 1:
pathlist = list(self.locs_paths)
pathlist.append("Apply to all sequentially")
pathlist.append("Combine all channels")
index, ok = QtWidgets.QInputDialog.getItem(
self,
"Save localizations",
"Channel:",
pathlist,
editable=False,
)
if ok:
return pathlist.index(index)
else:
return None
def get_channel_all_seq(self, title="Choose a channel"):
"""
Opens an input dialog to ask for a channel.
Returns a channel index or None if no locs loaded.
If apply to all at once is chosen, the index is equal to the
number of channels loaded.
Returns
-------
None if no locs loaded or channel picked, int otherwise
Index of the chosen channel
"""
n_channels = len(self.locs_paths)
if n_channels == 0:
return None
elif n_channels == 1:
return 0
elif len(self.locs_paths) > 1:
pathlist = list(self.locs_paths)
pathlist.append("Apply to all sequentially")
index, ok = QtWidgets.QInputDialog.getItem(
self,
"Save localizations",
"Channel:",
pathlist,
editable=False,
)
if ok:
return pathlist.index(index)
else:
return None
def get_channel3d(self, title="Choose a channel"):
"""
Similar to View.get_channel, used in selecting 3D picks.
Adds an option to show all channels simultaneously.
"""
n_channels = len(self.locs_paths)
if n_channels == 0:
return None
elif n_channels == 1:
return 0
elif len(self.locs_paths) > 1:
pathlist = list(self.locs_paths)
pathlist.append("Show all channels")
index, ok = QtWidgets.QInputDialog.getItem(
self, "Select channel", "Channel:", pathlist, editable=False
)
if ok:
return pathlist.index(index)
else:
return None
def get_render_kwargs(self, viewport=None):
"""
Returns a dictionary to be used for the keyword arguments of
render.render.
Parameters
----------
viewport : list (default=None)
Specifies the FOV to be rendered. If None, the current
viewport is taken.
Returns
-------
dict
Contains blur method, oversampling, viewport and min blur
width
"""
# blur method
blur_button = (
self.window.display_settings_dlg.blur_buttongroup.checkedButton()
)
# oversampling
optimal_oversampling = (
self.display_pixels_per_viewport_pixels()
)
if self.window.display_settings_dlg.dynamic_disp_px.isChecked():
oversampling = optimal_oversampling
self.window.display_settings_dlg.set_disp_px_silently(
self.window.display_settings_dlg.pixelsize.value()
/ optimal_oversampling
)
else:
oversampling = float(
self.window.display_settings_dlg.pixelsize.value()
/ self.window.display_settings_dlg.disp_px_size.value()
)
if oversampling > optimal_oversampling:
QtWidgets.QMessageBox.information(
self,
"Display pixel size too low",
(
"Oversampling will be adjusted to"
" match the display pixel density."
),
)
oversampling = optimal_oversampling
self.window.display_settings_dlg.set_disp_px_silently(
self.window.display_settings_dlg.pixelsize.value()
/ optimal_oversampling
)
# viewport
if viewport is None:
viewport = self.viewport
return {
"oversampling": oversampling,
"viewport": viewport,
"blur_method": self.window.display_settings_dlg.blur_methods[
blur_button
],
"min_blur_width": float(
self.window.display_settings_dlg.min_blur_width.value()
),
}
def load_fov_drop(self, path):
"""
Checks if path is a fov .txt file (4 coordinates) and loads FOV.
Parameters
----------
path : str
Path specifiying .txt file
"""
try:
file = np.loadtxt(path)
except: # not a np array
return
if file.shape == (4,):
(x, y, w, h) = file
if w > 0 and h > 0:
viewport = [(y, x), (y + h, x + w)]
self.update_scene(viewport=viewport)
self.window.info_dialog.xy_label.setText(
"{:.2f} / {:.2f} ".format(x, y)
)
self.window.info_dialog.wh_label.setText(
"{:.2f} / {:.2f} pixel".format(w, h)
)
def load_picks(self, path):
"""
Loads picks from .yaml file.
Parameters
----------
path : str
Path specifiying .yaml file
Raises
------
ValueError
If .yaml file is not recognized
"""
# load the file
with open(path, "r") as f:
regions = yaml.full_load(f)
# Backwards compatibility for old picked region files
if "Shape" in regions:
loaded_shape = regions["Shape"]
elif "Centers" in regions and "Diameter" in regions:
loaded_shape = "Circle"
else:
raise ValueError("Unrecognized picks file")
# change pick shape in Tools Settings Dialog
shape_index = self.window.tools_settings_dialog.pick_shape.findText(
loaded_shape
)
self.window.tools_settings_dialog.pick_shape.setCurrentIndex(
shape_index
)
# assign loaded picks and pick size
if loaded_shape == "Circle":
self._picks = regions["Centers"]
self.window.tools_settings_dialog.pick_diameter.setValue(
regions["Diameter"]
)
elif loaded_shape == "Rectangle":
self._picks = regions["Center-Axis-Points"]
self.window.tools_settings_dialog.pick_width.setValue(
regions["Width"]
)
else:
raise ValueError("Unrecognized pick shape")
# update Info Dialog
self.update_pick_info_short()
self.update_scene(picks_only=True)
def subtract_picks(self, path):
"""
Clears current picks that cover the picks loaded from path.
Parameters
----------
path : str
Path specifiying .yaml file with picks
Raises
------
ValueError
If .yaml file is not recognized
NotImplementedError
Rectangular picks have not been implemented yet
"""
if self._pick_shape == "Rectangle":
raise NotImplementedError(
"Subtracting picks not implemented for rectangle picks"
)
oldpicks = self._picks.copy()
# load .yaml
with open(path, "r") as f:
regions = yaml.full_load(f)
self._picks = regions["Centers"]
diameter = regions["Diameter"]
# calculate which picks are to stay
distances = (
np.sum(
(euclidean_distances(oldpicks, self._picks) < diameter / 2)
* 1,
axis=1,
)
>= 1
)
filtered_list = [i for (i, v) in zip(oldpicks, distances) if not v]
self._picks = filtered_list
self.update_pick_info_short()
self.window.tools_settings_dialog.pick_diameter.setValue(
regions["Diameter"]
)
self.update_scene(picks_only=True)
def map_to_movie(self, position):
""" Converts coordinates from display units to camera units. """
x_rel = position.x() / self.width()
x_movie = x_rel * self.viewport_width() + self.viewport[0][1]
y_rel = position.y() / self.height()
y_movie = y_rel * self.viewport_height() + self.viewport[0][0]
return x_movie, y_movie
def map_to_view(self, x, y):
""" Converts coordinates from camera units to display units. """
cx = self.width() * (x - self.viewport[0][1]) / self.viewport_width()
cy = self.height() * (y - self.viewport[0][0]) / self.viewport_height()
return cx, cy
def max_movie_height(self):
""" Returns maximum height of all loaded images. """
return max(info[0]["Height"] for info in self.infos)
def max_movie_width(self):
""" Returns maximum width of all loaded images. """
return max([info[0]["Width"] for info in self.infos])
def mouseMoveEvent(self, event):
"""
Defines actions taken when moving mouse.
Drawing zoom-in rectangle, panning or drawing a rectangular
pick.
Parameters
----------
event : QMouseEvent
"""
if self._mode == "Zoom":
# if zooming in
if self.rubberband.isVisible():
self.rubberband.setGeometry(
QtCore.QRect(self.origin, event.pos())
)
# if panning
if self._pan:
rel_x_move = (event.x() - self.pan_start_x) / self.width()
rel_y_move = (event.y() - self.pan_start_y) / self.height()
self.pan_relative(rel_y_move, rel_x_move)
self.pan_start_x = event.x()
self.pan_start_y = event.y()
# if drawing a rectangular pick
elif self._mode == "Pick":
if self._pick_shape == "Rectangle":
if self._rectangle_pick_ongoing:
self.rectangle_pick_current_x = event.x()
self.rectangle_pick_current_y = event.y()
self.update_scene(picks_only=True)
def mousePressEvent(self, event):
"""
Defines actions taken when pressing mouse button.
Start drawing a zoom-in rectangle, start padding, start
drawing a pick rectangle.
Parameters
----------
event : QMouseEvent
"""
if self._mode == "Zoom":
# start drawing a zoom-in rectangle
if event.button() == QtCore.Qt.LeftButton:
if len(self.locs) > 0: # locs are loaded already
if not self.rubberband.isVisible():
self.origin = QtCore.QPoint(event.pos())
self.rubberband.setGeometry(
QtCore.QRect(self.origin, QtCore.QSize())
)
self.rubberband.show()
# start panning
elif event.button() == QtCore.Qt.RightButton:
self._pan = True
self.pan_start_x = event.x()
self.pan_start_y = event.y()
self.setCursor(QtCore.Qt.ClosedHandCursor)
event.accept()
else:
event.ignore()
# start drawing rectangular pick
elif self._mode == "Pick":
if event.button() == QtCore.Qt.LeftButton:
if self._pick_shape == "Rectangle":
self._rectangle_pick_ongoing = True
self.rectangle_pick_start_x = event.x()
self.rectangle_pick_start_y = event.y()
self.rectangle_pick_start = self.map_to_movie(event.pos())
def mouseReleaseEvent(self, event):
"""
Defines actions taken when releasing mouse button.
Zoom in, stop panning, add and remove picks, add and remove
measure points.
Parameters
----------
event : QMouseEvent
"""
if self._mode == "Zoom":
if (
event.button() == QtCore.Qt.LeftButton
and self.rubberband.isVisible()
): # zoom in if the zoom-in rectangle is visible
end = QtCore.QPoint(event.pos())
if end.x() > self.origin.x() and end.y() > self.origin.y():
x_min_rel = self.origin.x() / self.width()
x_max_rel = end.x() / self.width()
y_min_rel = self.origin.y() / self.height()
y_max_rel = end.y() / self.height()
viewport_height, viewport_width = self.viewport_size()
x_min = self.viewport[0][1] + x_min_rel * viewport_width
x_max = self.viewport[0][1] + x_max_rel * viewport_width
y_min = self.viewport[0][0] + y_min_rel * viewport_height
y_max = self.viewport[0][0] + y_max_rel * viewport_height
viewport = [(y_min, x_min), (y_max, x_max)]
self.update_scene(viewport)
self.rubberband.hide()
# stop panning
elif event.button() == QtCore.Qt.RightButton:
self._pan = False
self.setCursor(QtCore.Qt.ArrowCursor)
event.accept()
else:
event.ignore()
elif self._mode == "Pick":
if self._pick_shape == "Circle":
# add pick
if event.button() == QtCore.Qt.LeftButton:
x, y = self.map_to_movie(event.pos())
self.add_pick((x, y))
event.accept()
# remove pick
elif event.button() == QtCore.Qt.RightButton:
x, y = self.map_to_movie(event.pos())
self.remove_picks((x, y))
event.accept()
else:
event.ignore()
elif self._pick_shape == "Rectangle":
if event.button() == QtCore.Qt.LeftButton:
# finish drawing rectangular pick and add it
rectangle_pick_end = self.map_to_movie(event.pos())
self._rectangle_pick_ongoing = False
self.add_pick(
(self.rectangle_pick_start, rectangle_pick_end)
)
event.accept()
elif event.button() == QtCore.Qt.RightButton:
# remove pick
x, y = self.map_to_movie(event.pos())
self.remove_picks((x, y))
event.accept()
else:
event.ignore()
elif self._mode == "Measure":
if event.button() == QtCore.Qt.LeftButton:
# add measure point
x, y = self.map_to_movie(event.pos())
self.add_point((x, y))
event.accept()
elif event.button() == QtCore.Qt.RightButton:
# remove measure points
x, y = self.map_to_movie(event.pos())
self.remove_points()
event.accept()
else:
event.ignore()
def movie_size(self):
""" Returns tuple with movie height and width. """
movie_height = self.max_movie_height()
movie_width = self.max_movie_width()
return (movie_height, movie_width)
def nearest_neighbor(self):
""" Gets channels for nearest neighbor analysis. """
# choose both channels
channel1 = self.get_channel("Nearest Neighbor Analysis")
channel2 = self.get_channel("Nearest Neighbor Analysis")
self._nearest_neighbor(channel1, channel2)
def _nearest_neighbor(self, channel1, channel2):
"""
Calculates and saves distances of the nearest neighbors between
localizations in channels 1 and 2
Saves calculated distances in .csv format.
Parameters
----------
channel1 : int
Channel to calculate nearest neighbors distances
channel2 : int
Second channel to calculate nearest neighbor distances
"""
# ask how many nearest neighbors
nn_count, ok = QtWidgets.QInputDialog.getInt(
self, "Input Dialog", "Number of nearest neighbors: ", 0, 1, 100
)
if ok:
pixelsize = self.window.display_settings_dlg.pixelsize.value()
# extract x, y and z from both channels
x1 = self.locs[channel1].x * pixelsize
x2 = self.locs[channel2].x * pixelsize
y1 = self.locs[channel1].y * pixelsize
y2 = self.locs[channel2].y * pixelsize
if (
hasattr(self.locs[channel1], "z")
and hasattr(self.locs[channel2], "z")
):
z1 = self.locs[channel1].z
z2 = self.locs[channel2].z
else:
z1 = None
z2 = None
# used for avoiding zero distances (to self)
same_channel = channel1 == channel2
# get saved file name
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save nearest neighbor distances",
self.locs_paths[channel1].replace(".hdf5", "_nn.csv"),
filter="*.csv",
)
nn = postprocess.nn_analysis(
x1, x2,
y1, y2,
z1, z2,
nn_count,
same_channel,
)
# save as .csv
np.savetxt(path, nn, delimiter=',')
def display_pixels_per_viewport_pixels(self):
""" Returns optimal oversampling. """
os_horizontal = self.width() / self.viewport_width()
os_vertical = self.height() / self.viewport_height()
# The values are almost the same and we choose max
return max(os_horizontal, os_vertical)
def pan_relative(self, dy, dx):
"""
Moves viewport by a given relative distance.
Parameters
----------
dy : float
Relative displacement of the viewport in y axis
dx : float
Relative displacement of the viewport in x axis
"""
viewport_height, viewport_width = self.viewport_size()
x_move = dx * viewport_width
y_move = dy * viewport_height
x_min = self.viewport[0][1] - x_move
x_max = self.viewport[1][1] - x_move
y_min = self.viewport[0][0] - y_move
y_max = self.viewport[1][0] - y_move
viewport = [(y_min, x_min), (y_max, x_max)]
self.update_scene(viewport)
@check_pick
def show_trace(self):
""" Displays x and y coordinates of locs in picks in time. """
self.current_trace_x = 0 # used for exporing
self.current_trace_y = 0
channel = self.get_channel("Show trace")
if channel is not None:
locs = self.picked_locs(channel)
locs = stack_arrays(locs, asrecarray=True, usemask=False)
n_frames = self.infos[channel][0]["Frames"]
xvec = np.arange(n_frames)
yvec = xvec[:] * 0
yvec[locs["frame"]] = 1
self.current_trace_x = xvec
self.current_trace_y = yvec
self.channel = channel
self.canvas = GenericPlotWindow("Trace")
self.canvas.figure.clear()
# Three subplots sharing x axes
ax1, ax2, ax3 = self.canvas.figure.subplots(3, sharex=True)
# frame vs x
ax1.scatter(locs["frame"], locs["x"], s=2)
ax1.set_title("X-pos vs frame")
ax1.set_xlim(0, n_frames)
ax1.set_ylabel("X-pos [Px]")
# frame vs y
ax2.scatter(locs["frame"], locs["y"], s=2)
ax2.set_title("Y-pos vs frame")
ax2.set_ylabel("Y-pos [Px]")
# locs in time
ax3.plot(xvec, yvec, linewidth=1)
ax3.fill_between(xvec, 0, yvec, facecolor="red")
ax3.set_title("Localizations")
ax3.set_xlabel("Frames")
ax3.set_ylabel("ON")
ax3.set_ylim([-0.1, 1.1])
self.export_trace_button = QtWidgets.QPushButton("Export (*.csv)")
self.canvas.toolbar.addWidget(self.export_trace_button)
self.export_trace_button.clicked.connect(self.export_trace)
self.canvas.canvas.draw()
self.canvas.show()
def export_trace(self):
""" Saves trace as a .csv. """
trace = np.array([self.current_trace_x, self.current_trace_y])
base, ext = os.path.splitext(self.locs_paths[self.channel])
out_path = base + ".trace.txt"
# get the name for saving
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Save trace as txt", out_path, filter="*.trace.txt"
)
if path:
np.savetxt(path, trace, fmt="%i", delimiter=",")
def pick_message_box(self, params):
"""
Returns a message box for selecting picks.
Displays number of picks selected, removed, the ratio and time
elapsed. Contains 4 buttons for manipulating picks.
Parameters
----------
params : dict
Stores info about picks selected
Returns
-------
QMessageBox
With buttons for selecting picks
"""
msgBox = QtWidgets.QMessageBox(self)
msgBox.setWindowTitle("Select picks")
msgBox.setWindowIcon(self.icon)
if params["i"] == 0:
keep_ratio = 0
else:
keep_ratio = params["n_kept"] / (params["i"])
dt = time.time() - params["t0"]
msgBox.setText(
(
"Keep pick No: {} of {} ?\n"
"Picks removed: {} Picks kept: {} Keep Ratio: {:.2f} % \n"
"Time elapsed: {:.2f} Minutes, "
"Picks per Minute: {:.2f}"
).format(
params["i"] + 1,
params["n_total"],
params["n_removed"],
params["n_kept"],
keep_ratio * 100,
dt / 60,
params["i"] / dt * 60,
)
)
msgBox.addButton(
QtWidgets.QPushButton("Accept"), QtWidgets.QMessageBox.YesRole
) # keep the pick
msgBox.addButton(
QtWidgets.QPushButton("Reject"), QtWidgets.QMessageBox.NoRole
) # remove the pick
msgBox.addButton(
QtWidgets.QPushButton("Back"), QtWidgets.QMessageBox.ResetRole
) # go one pick back
msgBox.addButton(
QtWidgets.QPushButton("Cancel"), QtWidgets.QMessageBox.RejectRole
) # leave selecting picks
qr = self.frameGeometry()
cp = QtWidgets.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
msgBox.move(qr.topLeft())
return msgBox
def select_traces(self):
"""
Lets user to select picks based on their traces.
Opens self.pick_message_box to display information.
"""
removelist = [] # picks to be removed
channel = self.get_channel("Select traces")
if channel is not None:
if self._picks: # if there are picks present
params = {} # stores info about selecting picks
params["t0"] = time.time()
all_picked_locs = self.picked_locs(channel)
i = 0 # index of the currently shown pick
n_frames = self.infos[channel][0]["Frames"]
while i < len(self._picks):
fig = plt.figure(figsize=(5, 5), constrained_layout=True)
fig.canvas.set_window_title("Trace")
pick = self._picks[i]
locs = all_picked_locs[i]
locs = stack_arrays(locs, asrecarray=True, usemask=False)
# essentialy the same plotting as in self.show_trace
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312, sharex=ax1)
ax3 = fig.add_subplot(313, sharex=ax1)
xvec = np.arange(n_frames)
yvec = xvec[:] * 0
yvec[locs["frame"]] = 1
ax1.set_title(
"Scatterplot of Pick "
+ str(i + 1)
+ " of: "
+ str(len(self._picks))
+ "."
)
ax1.set_title(
"Scatterplot of Pick "
+ str(i + 1)
+ " of: "
+ str(len(self._picks))
+ "."
)
ax1.scatter(locs["frame"], locs["x"], s=2)
ax1.set_ylabel("X-pos [Px]")
ax1.set_title("X-pos vs frame")
ax1.set_xlim(0, n_frames)
plt.setp(ax1.get_xticklabels(), visible=False)
ax2.scatter(locs["frame"], locs["y"], s=2)
ax2.set_title("Y-pos vs frame")
ax2.set_ylabel("Y-pos [Px]")
plt.setp(ax2.get_xticklabels(), visible=False)
ax3.plot(xvec, yvec)
ax3.set_title("Localizations")
ax3.set_xlabel("Frames")
ax3.set_ylabel("ON")
fig.canvas.draw()
width, height = fig.canvas.get_width_height()
# View will display traces instead of rendered locs
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.setPixmap((QtGui.QPixmap(im)))
self.setAlignment(QtCore.Qt.AlignCenter)
# update info
params["n_removed"] = len(removelist)
params["n_kept"] = i - params["n_removed"]
params["n_total"] = len(self._picks)
params["i"] = i
# message box with buttons
msgBox = self.pick_message_box(params)
reply = msgBox.exec()
if reply == 0:
# accepted
if pick in removelist:
removelist.remove(pick)
elif reply == 3:
# cancel
break
elif reply == 2:
# back
if i >= 2:
i -= 2
else:
i = -1
else:
# discard
removelist.append(pick)
i += 1
plt.close()
# remove picks
for pick in removelist:
self._picks.remove(pick)
self.n_picks = len(self._picks)
self.update_pick_info_short()
self.update_scene()
@check_pick
def show_pick(self):
"""
Lets user select picks based on their 2D scatter.
Opens self.pick_message_box to display information.
"""
if self._pick_shape == "Rectangle":
raise NotImplementedError(
"Not implemented for rectangular picks"
)
print("Showing picks...")
channel = self.get_channel3d("Select Channel")
removelist = [] # picks to be removed
if channel is not None:
n_channels = len(self.locs_paths)
colors = get_colors(n_channels)
tools_dialog = self.window.tools_settings_dialog
r = tools_dialog.pick_diameter.value() / 2
if channel is (len(self.locs_paths)):
all_picked_locs = []
for k in range(len(self.locs_paths)):
all_picked_locs.append(self.picked_locs(k))
if self._picks:
params = {} # info about selecting
params["t0"] = time.time()
i = 0
while i < len(self._picks):
fig = plt.figure(figsize=(5, 5))
fig.canvas.set_window_title("Scatterplot of Pick")
pick = self._picks[i]
# plot scatter
ax = fig.add_subplot(111)
ax.set_title(
"Scatterplot of Pick "
+ str(i + 1)
+ " of: "
+ str(len(self._picks))
+ "."
)
for l in range(len(self.locs_paths)):
locs = all_picked_locs[l][i]
# locs = stack_arrays(
# locs, asrecarray=True, usemask=False
# )
ax.scatter(locs["x"], locs["y"], c=colors[l], s=2)
# adjust x and y lim
x_min = pick[0] - r
x_max = pick[0] + r
y_min = pick[1] - r
y_max = pick[1] + r
ax.set_xlabel("X [Px]")
ax.set_ylabel("Y [Px]")
ax.set_xlim([x_min, x_max])
ax.set_ylim([y_min, y_max])
plt.axis("equal")
fig.canvas.draw()
width, height = fig.canvas.get_width_height()
# scatter will be displayed instead of
# rendered locs
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.setPixmap((QtGui.QPixmap(im)))
self.setAlignment(QtCore.Qt.AlignCenter)
# update selection info
params["n_removed"] = len(removelist)
params["n_kept"] = i - params["n_removed"]
params["n_total"] = len(self._picks)
params["i"] = i
msgBox = self.pick_message_box(params)
reply = msgBox.exec()
if reply == 0:
# acepted
if pick in removelist:
removelist.remove(pick)
elif reply == 3:
# cancel
break
elif reply == 2:
# back
if i >= 2:
i -= 2
else:
i = -1
else:
# discard
removelist.append(pick)
i += 1
plt.close()
else:
all_picked_locs = self.picked_locs(channel)
if self._picks:
params = {}
params["t0"] = time.time()
i = 0
while i < len(self._picks):
pick = self._picks[i]
fig = plt.figure(figsize=(5, 5))
fig.canvas.set_window_title("Scatterplot of Pick")
ax = fig.add_subplot(111)
ax.set_title(
"Scatterplot of Pick "
+ str(i + 1)
+ " of: "
+ str(len(self._picks))
+ "."
)
locs = all_picked_locs[i]
locs = stack_arrays(
locs, asrecarray=True, usemask=False
)
x_min = pick[0] - r
x_max = pick[0] + r
y_min = pick[1] - r
y_max = pick[1] + r
ax.scatter(
locs["x"], locs["y"], c=colors[channel], s=2
)
ax.set_xlabel("X [Px]")
ax.set_ylabel("Y [Px]")
ax.set_xlim([x_min, x_max])
ax.set_ylim([y_min, y_max])
plt.axis("equal")
fig.canvas.draw()
width, height = fig.canvas.get_width_height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.setPixmap((QtGui.QPixmap(im)))
self.setAlignment(QtCore.Qt.AlignCenter)
params["n_removed"] = len(removelist)
params["n_kept"] = i - params["n_removed"]
params["n_total"] = len(self._picks)
params["i"] = i
msgBox = self.pick_message_box(params)
reply = msgBox.exec()
if reply == 0:
# accepted
if pick in removelist:
removelist.remove(pick)
elif reply == 3:
# cancel
break
elif reply == 2:
# back
if i >= 2:
i -= 2
else:
i = -1
else:
# discard
removelist.append(pick)
i += 1
plt.close()
for pick in removelist:
self._picks.remove(pick)
self.n_picks = len(self._picks)
self.update_pick_info_short()
self.update_scene()
@check_pick
def show_pick_3d(self):
"""
Lets user select picks based on their 3D scatter.
Uses PlotDialog for displaying the scatter.
"""
print("Show pick 3D")
channel = self.get_channel3d("Show Pick 3D")
pixelsize = self.window.display_settings_dlg.pixelsize.value()
removelist = []
if channel is not None:
n_channels = len(self.locs_paths)
colors = get_colors(n_channels)
if channel is (len(self.locs_paths)):
# Combined
all_picked_locs = []
for k in range(len(self.locs_paths)):
all_picked_locs.append(self.picked_locs(k))
if self._picks:
for i, pick in enumerate(self._picks):
reply = PlotDialog.getParams(
all_picked_locs, i, len(self._picks), 0, colors
)
if reply == 1:
pass # accepted
elif reply == 2:
break
else:
# discard
removelist.append(pick)
else:
all_picked_locs = self.picked_locs(channel)
if self._picks:
for i, pick in enumerate(self._picks):
reply = PlotDialog.getParams(
all_picked_locs, i, len(self._picks), 1, 1
)
if reply == 1:
pass # accepted
elif reply == 2:
break
else:
# discard
removelist.append(pick)
for pick in removelist:
self._picks.remove(pick)
self.n_picks = len(self._picks)
self.update_pick_info_short()
self.update_scene()
@check_pick
def show_pick_3d_iso(self):
"""
Lets user select picks based on their 3D scatter and
projections.
Uses PlotDialogIso for displaying picks.
"""
channel = self.get_channel3d("Show Pick 3D")
removelist = []
if channel is not None:
n_channels = len(self.locs_paths)
colors = get_colors(n_channels)
if channel is (len(self.locs_paths)):
# combined
all_picked_locs = []
for k in range(len(self.locs_paths)):
all_picked_locs.append(self.picked_locs(k))
if self._picks:
for i, pick in enumerate(self._picks):
reply = PlotDialogIso.getParams(
all_picked_locs,
i,
len(self._picks),
0,
colors,
)
if reply == 1:
pass # accepted
elif reply == 2:
break
else:
# discard
removelist.append(pick)
else:
all_picked_locs = self.picked_locs(channel)
if self._picks:
for i, pick in enumerate(self._picks):
reply = PlotDialogIso.getParams(
all_picked_locs,
i,
len(self._picks),
1,
1,
)
if reply == 1:
pass
# accepted
elif reply == 2:
break
else:
# discard
removelist.append(pick)
for pick in removelist:
self._picks.remove(pick)
self.n_picks = len(self._picks)
self.update_pick_info_short()
self.update_scene()
@check_pick
def analyze_cluster(self):
""" Clusters picked locs using k-means clustering. """
print("Analyzing clusters...")
channel = self.get_channel3d("Show Pick 3D")
removelist = []
saved_locs = []
clustered_locs = []
pixelsize = self.window.display_settings_dlg.pixelsize.value()
if channel is not None:
n_channels = len(self.locs_paths)
colors = get_colors(n_channels)
# combined locs
if channel is (len(self.locs_paths)):
all_picked_locs = []
for k in range(len(self.locs_paths)):
all_picked_locs.append(self.picked_locs(k))
if self._picks:
for i, pick in enumerate(self._picks):
# 3D
if hasattr(all_picked_locs[0], "z"):
# k-means clustering
reply = ClsDlg3D.getParams(
all_picked_locs,
i,
len(self._picks),
0,
colors,
pixelsize,
)
# 2D
else:
# k-means clustering
reply = ClsDlg2D.getParams(
all_picked_locs,
i,
len(self._picks),
0,
colors,
)
if reply == 1:
# accepted
pass
elif reply == 2:
# canceled
break
else:
# discard
removelist.append(pick)
# one channel
else:
all_picked_locs = self.picked_locs(channel)
if self._picks:
n_clusters, ok = QtWidgets.QInputDialog.getInt(
self,
"Input Dialog",
"Enter inital number of clusters:",
10,
)
for i, pick in enumerate(self._picks):
reply = 3
while reply == 3:
# 3D
if hasattr(all_picked_locs[0], "z"):
# k-means clustering
reply, nc, l_locs, c_locs = ClsDlg3D.getParams(
all_picked_locs,
i,
len(self._picks),
n_clusters,
1,
pixelsize,
)
# 2D
else:
# k-means clustering
reply, nc, l_locs, c_locs = ClsDlg2D.getParams(
all_picked_locs,
i,
len(self._picks),
n_clusters,
1,
)
n_clusters = nc
if reply == 1:
# accepted
saved_locs.append(l_locs)
clustered_locs.extend(c_locs)
elif reply == 2:
# canceled
break
else:
# discarded
removelist.append(pick)
# saved picked locs
if saved_locs != []:
base, ext = os.path.splitext(self.locs_paths[channel])
out_path = base + "_cluster.hdf5"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Save picked localizations", out_path, filter="*.hdf5"
)
if path:
saved_locs = stack_arrays(
saved_locs, asrecarray=True, usemask=False
)
if saved_locs is not None:
d = self.window.tools_settings_dialog.pick_diameter.value()
pick_info = {
"Generated by:": "Picasso Render",
"Pick Diameter:": d,
}
io.save_locs(
path, saved_locs, self.infos[channel] + [pick_info]
)
# save pick properties
base, ext = os.path.splitext(path)
out_path = base + "_pickprops.hdf5"
# TODO: save pick properties
r_max = 2 * max(
self.infos[channel][0]["Height"],
self.infos[channel][0]["Width"],
)
max_dark, ok = QtWidgets.QInputDialog.getInt(
self, "Input Dialog", "Enter gap size:", 3
)
out_locs = []
progress = lib.ProgressDialog(
"Calculating kinetics", 0, len(clustered_locs), self
)
progress.set_value(0)
dark = np.empty(len(clustered_locs))
datatype = clustered_locs[0].dtype
for i, pick_locs in enumerate(clustered_locs):
if not hasattr(pick_locs, "len"):
pick_locs = postprocess.link(
pick_locs,
self.infos[channel],
r_max=r_max,
max_dark_time=max_dark,
)
pick_locs = postprocess.compute_dark_times(pick_locs)
dark[i] = estimate_kinetic_rate(pick_locs.dark)
out_locs.append(pick_locs)
progress.set_value(i + 1)
out_locs = stack_arrays(out_locs, asrecarray=True, usemask=False)
n_groups = len(clustered_locs)
progress = lib.ProgressDialog(
"Calculating pick properties", 0, n_groups, self
)
pick_props = postprocess.groupprops(out_locs)
n_units = self.window.info_dialog.calculate_n_units(dark)
pick_props = lib.append_to_rec(pick_props, n_units, "n_units")
influx = self.window.info_dialog.influx_rate.value()
info = self.infos[channel] + [
{"Generated by": "Picasso: Render", "Influx rate": influx}
]
io.save_datasets(out_path, info, groups=pick_props)
for pick in removelist:
self._picks.remove(pick)
self.n_picks = len(self._picks)
self.update_pick_info_short()
self.update_scene()
@check_picks
def filter_picks(self):
""" Filters picks by number of locs. """
channel = self.get_channel("Filter picks by locs")
if channel is not None:
locs = self.all_locs[channel]
info = self.infos[channel]
d = self.window.tools_settings_dialog.pick_diameter.value()
r = d / 2
# index locs in a grid
index_blocks = self.get_index_blocks(channel)
if self._picks:
removelist = [] # picks to remove
loccount = [] # n_locs in picks
progress = lib.ProgressDialog(
"Counting in picks..", 0, len(self._picks) - 1, self
)
progress.set_value(0)
progress.show()
for i, pick in enumerate(self._picks):
x, y = pick
# extract locs at a given region
block_locs = postprocess.get_block_locs_at(
x, y, index_blocks
)
# extract the locs around the pick
pick_locs = lib.locs_at(x, y, block_locs, r)
locs = stack_arrays(
pick_locs, asrecarray=True, usemask=False
)
loccount.append(len(locs))
progress.set_value(i)
progress.close()
# plot histogram with n_locs in picks
fig = plt.figure()
fig.canvas.set_window_title("Localizations in Picks")
ax = fig.add_subplot(111)
ax.set_title("Localizations in Picks ")
n, bins, patches = ax.hist(
loccount,
bins='auto',
density=True,
facecolor="green",
alpha=0.75,
)
ax.set_xlabel("Number of localizations")
ax.set_ylabel("Counts")
fig.canvas.draw()
width, height = fig.canvas.get_width_height()
# display the histogram instead of the rendered locs
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.setPixmap((QtGui.QPixmap(im)))
self.setAlignment(QtCore.Qt.AlignCenter)
# filter picks by n_locs
minlocs, ok = QtWidgets.QInputDialog.getInt(
self,
"Input Dialog",
"Enter minimum number of localizations:",
)
if ok:
maxlocs, ok2 = QtWidgets.QInputDialog.getInt(
self,
"Input Dialog",
"Enter maximum number of localizations:",
max(loccount),
minlocs,
)
if ok2:
progress = lib.ProgressDialog(
"Removing picks..", 0, len(self._picks) - 1, self
)
progress.set_value(0)
progress.show()
for i, pick in enumerate(self._picks):
if loccount[i] > maxlocs:
removelist.append(pick)
elif loccount[i] < minlocs:
removelist.append(pick)
progress.set_value(i)
for pick in removelist:
self._picks.remove(pick)
self.n_picks = len(self._picks)
self.update_pick_info_short()
progress.close()
self.update_scene()
def rmsd_at_com(self, locs):
"""
Calculates root mean square displacement at center of mass.
"""
com_x = locs.x.mean()
com_y = locs.y.mean()
return np.sqrt(np.mean((locs.x - com_x) ** 2 + (locs.y - com_y) ** 2))
def index_locs(self, channel, fast_render=False):
"""
Indexes localizations from a given channel in a grid with grid
size equal to the pick radius.
"""
if fast_render:
locs = self.locs[channel]
else:
locs = self.all_locs[channel]
info = self.infos[channel]
d = self.window.tools_settings_dialog.pick_diameter.value()
size = d / 2
status = lib.StatusDialog("Indexing localizations...", self.window)
index_blocks = postprocess.get_index_blocks(
locs, info, size
)
status.close()
self.index_blocks[channel] = index_blocks
def get_index_blocks(self, channel, fast_render=False):
"""
Calls self.index_locs if not calculated earlier.
Returns indexed locs from a given channel.
"""
if self.index_blocks[channel] is None or fast_render:
self.index_locs(channel, fast_render=fast_render)
return self.index_blocks[channel]
@check_picks
def pick_similar(self):
"""
Searches picks similar to the current picks.
Focuses on the number of locs and their root mean square
displacement from center of mass. Std is defined in Tools
Settings Dialog.
Raises
------
NotImplementedError
If pick shape is rectangle
"""
if self._pick_shape == "Rectangle":
raise NotImplementedError(
"Pick similar not implemented for rectangle picks"
)
channel = self.get_channel("Pick similar")
if channel is not None:
info = self.infos[channel]
d = self.window.tools_settings_dialog.pick_diameter.value()
r = d / 2
d2 = d ** 2
std_range = (
self.window.tools_settings_dialog.pick_similar_range.value()
)
# extract n_locs and rmsd from current picks
index_blocks = self.get_index_blocks(channel)
n_locs = []
rmsd = []
for i, pick in enumerate(self._picks):
x, y = pick
block_locs = postprocess.get_block_locs_at(x, y, index_blocks)
pick_locs = lib.locs_at(x, y, block_locs, r)
n_locs.append(len(pick_locs))
rmsd.append(self.rmsd_at_com(pick_locs))
# calculate min and max n_locs and rmsd for picking similar
mean_n_locs = np.mean(n_locs)
mean_rmsd = np.mean(rmsd)
std_n_locs = np.std(n_locs)
std_rmsd = np.std(rmsd)
min_n_locs = mean_n_locs - std_range * std_n_locs
max_n_locs = mean_n_locs + std_range * std_n_locs
min_rmsd = mean_rmsd - std_range * std_rmsd
max_rmsd = mean_rmsd + std_range * std_rmsd
# x, y coordinates of found regions:
x_similar = np.array([_[0] for _ in self._picks])
y_similar = np.array([_[1] for _ in self._picks])
# preparations for grid search
x_range = np.arange(d / 2, info[0]["Width"], np.sqrt(3) * d / 2)
y_range_base = np.arange(d / 2, info[0]["Height"] - d / 2, d)
y_range_shift = y_range_base + d / 2
locs_temp, size, _, _, block_starts, block_ends, K, L = (
index_blocks
)
locs_x = locs_temp.x
locs_y = locs_temp.y
locs_xy = np.stack((locs_x, locs_y))
x_r = np.uint64(x_range / size)
y_r1 = np.uint64(y_range_shift / size)
y_r2 = np.uint64(y_range_base / size)
status = lib.StatusDialog("Picking similar...", self.window)
# pick similar
x_similar, y_similar = postprocess.pick_similar(
x_range, y_range_shift, y_range_base,
min_n_locs, max_n_locs, min_rmsd, max_rmsd,
x_r, y_r1, y_r2,
locs_xy, block_starts, block_ends, K, L,
x_similar, y_similar, r, d2,
)
# add picks
similar = list(zip(x_similar, y_similar))
self._picks = []
self.add_picks(similar)
status.close()
def picked_locs(
self,
channel,
add_group=True,
fast_render=False,
):
"""
Returns picked localizations in the specified channel.
Parameters
----------
channel : int
Channel of locs to be processed
add_group : boolean (default=True)
True if group id should be added to locs. Each pick will be
assigned a different id
fast_render : boolean
If True, takes self.locs, i.e. after randomly sampling a
fraction of self.all_locs. If False, takes self.all_locs
Returns
-------
list
List of np.recarrays, each containing locs from one pick
"""
if len(self._picks):
picked_locs = []
progress = lib.ProgressDialog(
"Creating localization list", 0, len(self._picks), self
)
progress.set_value(0)
if self._pick_shape == "Circle":
d = self.window.tools_settings_dialog.pick_diameter.value()
r = d / 2
index_blocks = self.get_index_blocks(
channel, fast_render=fast_render
)
for i, pick in enumerate(self._picks):
x, y = pick
block_locs = postprocess.get_block_locs_at(
x, y, index_blocks
)
group_locs = lib.locs_at(x, y, block_locs, r)
if add_group:
group = i * np.ones(len(group_locs), dtype=np.int32)
group_locs = lib.append_to_rec(
group_locs, group, "group"
)
group_locs.sort(kind="mergesort", order="frame")
picked_locs.append(group_locs)
progress.set_value(i + 1)
elif self._pick_shape == "Rectangle":
w = self.window.tools_settings_dialog.pick_width.value()
if fast_render:
channel_locs = self.locs[channel]
else:
channel_locs = self.all_locs[channel]
for i, pick in enumerate(self._picks):
(xs, ys), (xe, ye) = pick
X, Y = self.get_pick_rectangle_corners(xs, ys, xe, ye, w)
x_min = min(X)
x_max = max(X)
y_min = min(Y)
y_max = max(Y)
group_locs = channel_locs[channel_locs.x > x_min]
group_locs = group_locs[group_locs.x < x_max]
group_locs = group_locs[group_locs.y > y_min]
group_locs = group_locs[group_locs.y < y_max]
group_locs = lib.locs_in_rectangle(group_locs, X, Y)
# store rotated coordinates in x_rot and y_rot
angle = 0.5 * np.pi - np.arctan2((ye - ys), (xe - xs))
x_shifted = group_locs.x - xs
y_shifted = group_locs.y - ys
x_pick_rot = x_shifted * np.cos(
angle
) - y_shifted * np.sin(angle)
y_pick_rot = x_shifted * np.sin(
angle
) + y_shifted * np.cos(angle)
group_locs = lib.append_to_rec(
group_locs, x_pick_rot, "x_pick_rot"
)
group_locs = lib.append_to_rec(
group_locs, y_pick_rot, "y_pick_rot"
)
if add_group:
group = i * np.ones(len(group_locs), dtype=np.int32)
group_locs = lib.append_to_rec(
group_locs, group, "group"
)
group_locs.sort(kind="mergesort", order="frame")
picked_locs.append(group_locs)
progress.set_value(i + 1)
return picked_locs
def remove_picks(self, position):
"""
Deletes picks found at a given position.
Parameters
----------
position : tuple
Specifies x and y coordinates
"""
x, y = position
new_picks = [] # picks to be kept
if self._pick_shape == "Circle":
pick_diameter_2 = (
self.window.tools_settings_dialog.pick_diameter.value() ** 2
)
for x_, y_ in self._picks:
d2 = (x - x_) ** 2 + (y - y_) ** 2
if d2 > pick_diameter_2:
new_picks.append((x_, y_))
elif self._pick_shape == "Rectangle":
width = self.window.tools_settings_dialog.pick_width.value()
x = np.array([x])
y = np.array([y])
for pick in self._picks:
(start_x, start_y), (end_x, end_y) = pick
X, Y = self.get_pick_rectangle_corners(
start_x, start_y, end_x, end_y, width
)
# do not check if rectangle has no size
if not Y[0] == Y[1]:
if not lib.check_if_in_rectangle(
x, y, np.array(X), np.array(Y)
)[0]:
new_picks.append(pick)
# delete picks and add new_picks
self._picks = []
if len(new_picks) == 0: # no picks left
self.update_pick_info_short()
self.update_scene(picks_only=True)
else:
self.add_picks(new_picks)
def remove_picked_locs(self):
""" Gets channel for removing picked localizations. """
channel = self.get_channel_all_seq("Remove picked localizations")
if channel is len(self.locs_paths): # apply to all channels
for channel in range(len(self.locs)):
self._remove_picked_locs(channel)
elif channel is not None: # apply to a single channel
self._remove_picked_locs(channel)
def _remove_picked_locs(self, channel):
"""
Deletes localizations in picks in channel.
Temporarily adds index to localizations to compare which
localizations were picked.
Parameters
----------
channel : int
Index of the channel were localizations are removed
"""
index = np.arange(len(self.all_locs[channel]), dtype=np.int32)
self.all_locs[channel] = lib.append_to_rec(
self.all_locs[channel], index, "index"
) # used for indexing picked localizations
# if locs were indexed before, they do not have the index
# attribute
if self._pick_shape == "Circle":
self.index_locs(channel)
all_picked_locs = self.picked_locs(channel, add_group=False)
idx = np.array([], dtype=np.int32)
for picked_locs in all_picked_locs:
idx = np.concatenate((idx, picked_locs.index))
self.all_locs[channel] = np.delete(self.all_locs[channel], idx)
self.all_locs[channel] = lib.remove_from_rec(
self.all_locs[channel], "index"
)
self.locs[channel] = self.all_locs[channel].copy()
# fast rendering
self.window.fast_render_dialog.sample_locs()
self.update_scene()
def remove_points(self):
""" Removes all distance measurement points. """
self._points = []
self.update_scene()
def render_scene(
self, autoscale=False, use_cache=False, cache=True, viewport=None
):
"""
Returns QImage with rendered localizations.
Parameters
----------
autoscale : boolean (default=False)
True if optimally adjust contrast
use_cache : boolean (default=False)
True if use stored image
cache : boolena (default=True)
True if save image
viewport : tuple (default=None)
Viewport to be rendered. If None, takes current viewport
Returns
-------
QImage
Shows rendered locs; 8 bit
"""
# get oversampling, blur method, etc
kwargs = self.get_render_kwargs(viewport=viewport)
n_channels = len(self.locs)
# render single or multi channel data
if n_channels == 1:
self.render_single_channel(
kwargs,
autoscale=autoscale,
use_cache=use_cache,
cache=cache,
)
else:
self.render_multi_channel(
kwargs,
autoscale=autoscale,
use_cache=use_cache,
cache=cache,
)
# add alpha channel (no transparency)
self._bgra[:, :, 3].fill(255)
# build QImage
Y, X = self._bgra.shape[:2]
qimage = QtGui.QImage(
self._bgra.data, X, Y, QtGui.QImage.Format_RGB32
)
return qimage
def read_colors(self, n_channels=None):
"""
Finds currently selected colors for multicolor rendering.
Parameters
----------
n_channels : int
Number of channels to be rendered. If None, it is taken
automatically as the number of locs files loaded.
Returns
-------
list
List of lists with RGB values from 0 to 1 for each channel.
"""
if n_channels is None:
n_channels = len(self.locs)
colors = get_colors(n_channels) # automatic colors
# color each channel one by one
for i in range(len(self.locs)):
# change colors if not automatic coloring
if not self.window.dataset_dialog.auto_colors.isChecked():
# get color from Dataset Dialog
color = (
self.window.dataset_dialog.colorselection[i].currentText()
)
# if default color
if color in self.window.dataset_dialog.default_colors:
colors_array = np.array(
self.window.dataset_dialog.default_colors,
dtype=object,
)
index = np.where(colors_array == color)[0][0]
# assign color
colors[i] = tuple(self.window.dataset_dialog.rgbf[index])
# if hexadecimal is given
elif is_hexadecimal(color):
colorstring = color.lstrip("#")
rgbval = tuple(
int(colorstring[i: i + 2], 16) / 255 for i in (0, 2, 4)
)
# assign color
colors[i] = rgbval
else:
warning = (
"The color selection not recognnised in the channel "
" {}. Please choose one of the options provided or "
" type the hexadecimal code for your color of choice, "
" starting with '#', e.g. '#ffcdff' for pink.".format(
self.window.dataset_dialog.checks[i].text()
)
)
QtWidgets.QMessageBox.information(self, "Warning", warning)
break
# reverse colors if white background
if self.window.dataset_dialog.wbackground.isChecked():
tempcolor = colors[i]
inverted = tuple([1 - _ for _ in tempcolor])
colors[i] = inverted
return colors
def render_multi_channel(
self,
kwargs,
locs=None,
autoscale=False,
use_cache=False,
cache=True,
):
"""
Renders and paints multichannel localizations.
Also used when other multi-color data is used (clustered or
picked locs, render by property)
Parameters
----------
kwargs : dict
Contains blur method, etc. See self.get_render_kwargs
autoscale : boolean (default=False)
True if optimally adjust contrast
locs : np.recarray (default=None)
Locs to be rendered. If None, self.locs is used
use_cache : boolean (default=False)
True if use stored image
cache : boolena (default=True)
True if save image
Returns
-------
np.array
8 bit array with 4 channels (rgb and alpha)
"""
# get localizations for rendering
if locs is None:
# if slicing is used, locs are indexed and changing slices deletes
# all localizations
if self.window.slicer_dialog.slicer_radio_button.isChecked():
locs = copy.copy(self.locs)
else:
locs = self.locs
# if slicing, show only current slice from every channel
for i in range(len(locs)):
if hasattr(locs[i], "z"):
if self.window.slicer_dialog.slicer_radio_button.isChecked():
z_min = self.window.slicer_dialog.slicermin
z_max = self.window.slicer_dialog.slicermax
in_view = (locs[i].z > z_min) & (locs[i].z <= z_max)
locs[i] = locs[i][in_view]
if use_cache: # used saved image
n_locs = self.n_locs
image = self.image
else: # render each channel one by one
# get image shape (to avoid rendering unchecked channels)
(y_min, x_min), (y_max, x_max) = kwargs["viewport"]
X, Y = (
int(np.ceil(kwargs["oversampling"] * (x_max - x_min))),
int(np.ceil(kwargs["oversampling"] * (y_max - y_min)))
)
# if single channel is rendered
if len(self.locs) == 1:
renderings = [render.render(_, **kwargs) for _ in locs]
else:
renderings = [
render.render(_, **kwargs)
if self.window.dataset_dialog.checks[i].isChecked()
else [0, np.zeros((Y, X))]
for i, _ in enumerate(locs)
] # renders only channels that are checked in dataset dialog
# renderings = [render.render(_, **kwargs) for _ in locs]
n_locs = sum([_[0] for _ in renderings])
image = np.array([_[1] for _ in renderings])
if cache: # store image
self.n_locs = n_locs
self.image = image
# adjust contrast
image = self.scale_contrast(image, autoscale=autoscale)
Y, X = image.shape[1:]
# array with rgb and alpha channels
bgra = np.zeros((Y, X, 4), dtype=np.float32)
colors = self.read_colors(n_channels=len(locs))
# adjust for relative intensity from Dataset Dialog
for i in range(len(self.locs)):
iscale = self.window.dataset_dialog.intensitysettings[i].value()
image[i] = iscale * image[i]
# color rgb channels and store in bgra
for color, image in zip(colors, image):
bgra[:, :, 0] += color[2] * image
bgra[:, :, 1] += color[1] * image
bgra[:, :, 2] += color[0] * image
bgra = np.minimum(bgra, 1) # minimum value of each pixel is 1
if self.window.dataset_dialog.wbackground.isChecked():
bgra = -(bgra - 1)
self._bgra = self.to_8bit(bgra) # convert to 8 bit
return self._bgra
def render_single_channel(
self, kwargs, autoscale=False, use_cache=False, cache=True,
):
"""
Renders single channel localizations.
Calls render_multi_channel in case of clustered or picked locs,
rendering by property)
Parameters
----------
kwargs : dict
Contains blur method, etc. See self.get_render_kwargs
autoscale : boolean (default=False)
True if optimally adjust contrast
use_cache : boolean (default=False)
True if use stored image
cache : boolena (default=True)
True if save image
Returns
-------
np.array
8 bit array with 4 channels (rgb and alpha)
"""
# get np.recarray
locs = self.locs[0]
# if render by property
if self.x_render_state:
locs = self.x_locs
return self.render_multi_channel(
kwargs, locs=locs, autoscale=autoscale, use_cache=use_cache
)
# if locs have group identity (e.g. clusters)
if hasattr(locs, "group") and locs.group.size:
locs = [locs[self.group_color == _] for _ in range(N_GROUP_COLORS)]
return self.render_multi_channel(
kwargs, locs=locs, autoscale=autoscale, use_cache=use_cache
)
# if slicing, show only the current slice
if hasattr(locs, "z"):
if self.window.slicer_dialog.slicer_radio_button.isChecked():
z_min = self.window.slicer_dialog.slicermin
z_max = self.window.slicer_dialog.slicermax
in_view = (locs.z > z_min) & (locs.z <= z_max)
locs = locs[in_view]
if use_cache: # use saved image
n_locs = self.n_locs
image = self.image
else: # render locs
n_locs, image = render.render(locs, **kwargs, info=self.infos[0])
if cache: # store image
self.n_locs = n_locs
self.image = image
# adjust contrast and convert to 8 bits
image = self.scale_contrast(image, autoscale=autoscale)
image = self.to_8bit(image)
# paint locs using the colormap of choice (Display Settings
# Dialog)
cmap = self.window.display_settings_dlg.colormap.currentText()
if cmap == "Custom":
cmap = np.uint8(
np.round(255 * self.custom_cmap)
)
else:
cmap = np.uint8(
np.round(255 * plt.get_cmap(cmap)(np.arange(256)))
)
# return a 4 channel (rgb and alpha) array
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order="C")
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
# invert colors if white background
if self.window.dataset_dialog.wbackground.isChecked():
self._bgra = -(self._bgra - 255)
return self._bgra
def resizeEvent(self, event):
""" Defines what happens when window is resized. """
self.update_scene()
def save_picked_locs(self, path, channel):
"""
Saves picked locs from a given channel to path as a .hdf5 file.
Parameters
----------
path : str
Path for saving picked localizations
channel : int
Channel of locs to be saved
"""
# extract picked localizations and stack them
locs = self.picked_locs(channel, add_group=True)
locs = stack_arrays(locs, asrecarray=True, usemask=False)
# save picked locs with .yaml
if locs is not None:
pick_info = {
"Generated by": "Picasso Render : Pick",
"Pick Shape": self._pick_shape,
}
if self._pick_shape == "Circle":
d = self.window.tools_settings_dialog.pick_diameter.value()
pick_info["Pick Diameter"] = d
elif self._pick_shape == "Rectangle":
w = self.window.tools_settings_dialog.pick_width.value()
pick_info["Pick Width"] = w
io.save_locs(path, locs, self.infos[channel] + [pick_info])
def save_picked_locs_multi(self, path):
"""
Saves picked locs combined from all channels to path.
Parameters
----------
path : str
Path for saving localizations
"""
# for each channel stack locs from all picks and combine them
for channel in range(len(self.locs_paths)):
if channel == 0:
locs = self.picked_locs(channel)
locs = stack_arrays(locs, asrecarray=True, usemask=False)
else:
templocs = self.picked_locs(channel)
templocs = stack_arrays(
templocs, asrecarray=True, usemask=False
)
locs = np.append(locs, templocs)
# save
locs = locs.view(np.recarray)
if locs is not None:
d = self.window.tools_settings_dialog.pick_diameter.value()
pick_info = {
"Generated by:": "Picasso Render : Pick",
"Pick Shape:": self._pick_shape,
}
if self._pick_shape == "Circle":
d = self.window.tools_settings_dialog.pick_diameter.value()
pick_info["Pick Diameter"] = d
elif self._pick_shape == "Rectangle":
w = self.window.tools_settings_dialog.pick_width.value()
pick_info["Pick Width"] = w
io.save_locs(path, locs, self.infos[0] + [pick_info])
def save_pick_properties(self, path, channel):
"""
Saves picks' properties in a given channel to path.
Properties include number of locs, mean and std of all locs
dtypes (x, y, photons, etc) and others.
Parameters
----------
path : str
Path for saving picks' properties
channel : int
Channel of locs to be saved
Raises
------
NotImplementedError
If rectangular pick is chosen
"""
if self._pick_shape == "Rectangle":
raise NotImplementedError(
"Rectangular pick not implemented yet."
)
picked_locs = self.picked_locs(channel)
pick_diameter = self.window.tools_settings_dialog.pick_diameter.value()
r_max = min(pick_diameter, 1)
max_dark = self.window.info_dialog.max_dark_time.value()
out_locs = []
progress = lib.ProgressDialog(
"Calculating kinetics", 0, len(picked_locs), self
)
progress.set_value(0)
dark = np.empty(len(picked_locs)) # estimated mean dark time
length = np.empty(len(picked_locs)) # estimated mean bright time
no_locs = np.empty(len(picked_locs)) # number of locs
for i, pick_locs in enumerate(picked_locs):
no_locs[i] = len(pick_locs)
if no_locs[i] > 0:
if not hasattr(pick_locs, "len"):
pick_locs = postprocess.link(
pick_locs,
self.infos[channel],
r_max=r_max,
max_dark_time=max_dark,
)
pick_locs = postprocess.compute_dark_times(pick_locs)
length[i] = estimate_kinetic_rate(pick_locs.len)
dark[i] = estimate_kinetic_rate(pick_locs.dark)
out_locs.append(pick_locs)
progress.set_value(i + 1)
out_locs = stack_arrays(out_locs, asrecarray=True, usemask=False)
n_groups = len(picked_locs)
progress = lib.StatusDialog("Calculating pick properties", self)
# get mean and std of each dtype (x, y, photons, etc)
pick_props = postprocess.groupprops(out_locs)
progress.close()
# QPAINT estimate of number of binding sites
n_units = self.window.info_dialog.calculate_n_units(dark)
pick_props = lib.append_to_rec(pick_props, n_units, "n_units")
pick_props = lib.append_to_rec(pick_props, no_locs, "locs")
pick_props = lib.append_to_rec(pick_props, length, "length_cdf")
pick_props = lib.append_to_rec(pick_props, dark, "dark_cdf")
influx = self.window.info_dialog.influx_rate.value()
info = self.infos[channel] + [
{"Generated by": "Picasso: Render", "Influx rate": influx}
]
io.save_datasets(path, info, groups=pick_props)
def save_picks(self, path):
"""
Saves picked regions in .yaml format to path.
Parameters
----------
path : str
Path for saving pick regions
"""
if self._pick_shape == "Circle":
d = self.window.tools_settings_dialog.pick_diameter.value()
picks = {
"Diameter": float(d),
"Centers": [[float(_[0]), float(_[1])] for _ in self._picks],
}
elif self._pick_shape == "Rectangle":
w = self.window.tools_settings_dialog.pick_width.value()
picks = {
"Width": float(w),
"Center-Axis-Points": [
[
[float(s[0]), float(s[1])],
[float(e[0]), float(e[1])],
] for s, e in self._picks
],
}
picks["Shape"] = self._pick_shape
with open(path, "w") as f:
yaml.dump(picks, f)
def scale_contrast(self, image, autoscale=False):
"""
Scales image based on contrast values from Display Settings
Dialog.
Parameters
----------
image : np.array or list of np.arrays
Array with rendered locs (grayscale)
autoscale : boolean (default=False)
If True, finds optimal contrast
Returns
-------
image : np.array or list of np.arrays
Scaled image(s)
"""
if autoscale: # find optimum contrast
if image.ndim == 2:
max_ = image.max()
else:
max_ = min(
[
_.max()
for _ in image # single channel locs with only
if _.max() != 0 # one group have
] # N_GROUP_COLORS - 1 images of
) # only zeroes
upper = INITIAL_REL_MAXIMUM * max_
self.window.display_settings_dlg.silent_minimum_update(0)
self.window.display_settings_dlg.silent_maximum_update(upper)
upper = self.window.display_settings_dlg.maximum.value()
lower = self.window.display_settings_dlg.minimum.value()
if upper == lower:
upper = lower + 1 / (10 ** 6)
self.window.display_settings_dlg.silent_maximum_update(upper)
image = (image - lower) / (upper - lower)
image[~np.isfinite(image)] = 0
image = np.minimum(image, 1.0)
image = np.maximum(image, 0.0)
return image
def show_legend(self):
""" Displays legend for rendering by property. """
parameter = self.window.display_settings_dlg.parameter.currentText()
n_colors = self.window.display_settings_dlg.color_step.value()
min_val = self.window.display_settings_dlg.minimum_render.value()
max_val = self.window.display_settings_dlg.maximum_render.value()
colors = get_colors(n_colors)
fig1 = plt.figure(figsize=(5, 1))
ax1 = fig1.add_subplot(111, aspect="equal")
color_spacing = 10 / len(colors)
xpos = 0
for i in range(len(colors)):
ax1.add_patch(
patches.Rectangle((xpos, 0), color_spacing, 1, color=colors[i])
)
xpos += color_spacing
x = np.arange(0, 11, 2.5)
ax1.set_xlim([0, 10])
ax1.get_yaxis().set_visible(False)
labels = np.linspace(min_val, max_val, 5)
plt.xticks(x, labels)
plt.title(parameter)
fig1.show()
def activate_render_property(self):
""" Assigns locs by color to render a chosen property. """
self.deactivate_property_menu() # blocks changing render parameters
if self.window.display_settings_dlg.render_check.isChecked():
self.x_render_state = True
parameter = (
self.window.display_settings_dlg.parameter.currentText()
) # frame or x or y, etc
colors = self.window.display_settings_dlg.color_step.value()
min_val = self.window.display_settings_dlg.minimum_render.value()
max_val = self.window.display_settings_dlg.maximum_render.value()
x_step = (max_val - min_val) / colors
# index each loc according to its parameter's value
self.x_color = np.floor(
(self.locs[0][parameter] - min_val) / x_step
)
# values above and below will be fixed:
self.x_color[self.x_color < 0] = 0
self.x_color[self.x_color > colors] = colors
x_locs = []
# attempt using cached data
for cached_entry in self.x_render_cache:
if cached_entry["parameter"] == parameter:
if cached_entry["colors"] == colors:
if (cached_entry["min_val"] == min_val) & (
cached_entry["max_val"] == max_val
):
x_locs = cached_entry["locs"]
break
# if no cached data found
if x_locs == []:
pb = lib.ProgressDialog(
"Indexing " + parameter, 0, colors, self
)
pb.set_value(0)
# assign locs by color
for i in tqdm(range(colors + 1)):
x_locs.append(self.locs[0][self.x_color == i])
pb.set_value(i + 1)
pb.close()
# cache
entry = {}
entry["parameter"] = parameter
entry["colors"] = colors
entry["locs"] = x_locs
entry["min_val"] = min_val
entry["max_val"] = max_val
# Do not store too many datasets in cache
if len(self.x_render_cache) < 10:
self.x_render_cache.append(entry)
else:
self.x_render_cache.insert(0, entry)
del self.x_render_cache[-1]
self.x_locs = x_locs
self.update_scene()
self.window.display_settings_dlg.show_legend.setEnabled(True)
else:
self.x_render_state = False
self.activate_property_menu() # allows changing render parameters
def activate_property_menu(self):
""" Allows changing render parameters. """
self.window.display_settings_dlg.minimum_render.setEnabled(True)
self.window.display_settings_dlg.maximum_render.setEnabled(True)
self.window.display_settings_dlg.color_step.setEnabled(True)
def deactivate_property_menu(self):
""" Blocks changing render parameters. """
self.window.display_settings_dlg.minimum_render.setEnabled(False)
self.window.display_settings_dlg.maximum_render.setEnabled(False)
self.window.display_settings_dlg.color_step.setEnabled(False)
def set_property(self):
""" Activates rendering by property. """
self.window.display_settings_dlg.render_check.setEnabled(False)
parameter = self.window.display_settings_dlg.parameter.currentText()
min_val = np.min(self.locs[0][parameter])
max_val = np.max(self.locs[0][parameter])
if min_val >= 0:
lower = 0
else:
lower = min_val * 100
if max_val >= 0:
upper = max_val * 100
else:
upper = -min_val * 100
self.window.display_settings_dlg.maximum_render.blockSignals(True)
self.window.display_settings_dlg.minimum_render.blockSignals(True)
self.window.display_settings_dlg.maximum_render.setRange(lower, upper)
self.window.display_settings_dlg.maximum_render.setValue(max_val)
self.window.display_settings_dlg.minimum_render.setValue(min_val)
self.window.display_settings_dlg.maximum_render.blockSignals(False)
self.window.display_settings_dlg.minimum_render.blockSignals(False)
self.activate_property_menu()
self.window.display_settings_dlg.render_check.setEnabled(True)
self.window.display_settings_dlg.render_check.setCheckState(False)
self.activate_render_property()
def set_mode(self, action):
"""
Sets self._mode for QMouseEvents.
Activated when Zoom, Pick or Measure is chosen from Tools menu
in the main window.
Parameters
----------
action : QAction
Action defined in Window.__init__: ("Zoom", "Pick" or
"Measure")
"""
self._mode = action.text()
self.update_cursor()
def on_pick_shape_changed(self, pick_shape_index):
"""
If new shape is chosen, asks user to delete current picks,
assigns attributes and updates scene.
Parameters
----------
pick_shape_index : int
Index of current ToolsSettingsDialog.pick_shape
"""
t_dialog = self.window.tools_settings_dialog
current_text = (
t_dialog.pick_shape.currentText()
)
if current_text == self._pick_shape:
return
if len(self._picks):
qm = QtWidgets.QMessageBox()
qm.setWindowTitle("Changing pick shape")
ret = qm.question(
self,
"",
"This action will delete any existing picks. Continue?",
qm.Yes | qm.No,
)
if ret == qm.No:
shape_index = t_dialog.pick_shape.findText(
self._pick_shape
)
self.window.tools_settings_dialog.pick_shape.setCurrentIndex(
shape_index
)
return
self._pick_shape = current_text
self._picks = []
self.update_cursor()
self.update_scene(picks_only=True)
self.update_pick_info_short()
def set_zoom(self, zoom):
"""
Zooms in/out to the given value.
Called by changing zoom in Display Settings Dialog.
Parameters
----------
zoom : float
Value of zoom to change to
"""
current_zoom = self.display_pixels_per_viewport_pixels()
self.zoom(current_zoom / zoom)
def sizeHint(self):
""" Returns recommended window size. """
return QtCore.QSize(*self._size_hint)
def to_8bit(self, image):
"""
Converts image to 8 bit ready to convert to QImage.
Parameters
----------
image : np.array
Image to be converted, with values between 0.0 and 1.0
Returns
-------
np.array
Image converted to 8 bit
"""
return np.round(255 * image).astype("uint8")
def to_left(self):
""" Called on pressing left arrow; moves FOV. """
self.pan_relative(0, 0.8)
def to_right(self):
""" Called on pressing right arrow; moves FOV. """
self.pan_relative(0, -0.8)
def to_up(self):
""" Called on pressing up arrow; moves FOV. """
self.pan_relative(0.8, 0)
def to_down(self):
""" Called on pressing down arrow; moves FOV. """
self.pan_relative(-0.8, 0)
def show_drift(self):
""" Plots current drift. """
channel = self.get_channel("Show drift")
if channel is not None:
drift = self._drift[channel]
if drift is None:
QtWidgets.QMessageBox.information(
self,
"Driftfile error",
(
"No driftfile found."
" Nothing to display."
" Please perform drift correction first"
" or load a .txt drift file."
),
)
else:
self.plot_window = DriftPlotWindow(self)
if hasattr(self._drift[channel], "z"):
self.plot_window.plot_3d(drift)
else:
self.plot_window.plot_2d(drift)
self.plot_window.show()
def undrift(self):
"""
Undrifts with RCC.
See Wang Y., et al. Optics Express. 2014
"""
channel = self.get_channel("Undrift")
if channel is not None:
info = self.infos[channel]
n_frames = info[0]["Frames"]
# get segmentation (number of frames that are considered
# in RCC at once)
if n_frames < 1000:
default_segmentation = int(n_frames / 4)
else:
default_segmentation = 1000
segmentation, ok = QtWidgets.QInputDialog.getInt(
self, "Undrift by RCC", "Segmentation:", default_segmentation
)
if ok:
locs = self.all_locs[channel]
info = self.infos[channel]
n_segments = postprocess.n_segments(info, segmentation)
seg_progress = lib.ProgressDialog(
"Generating segments", 0, n_segments, self
)
n_pairs = int(n_segments * (n_segments - 1) / 2)
rcc_progress = lib.ProgressDialog(
"Correlating image pairs", 0, n_pairs, self
)
try:
start_time = time.time()
# find drift and apply it to locs
drift, _ = postprocess.undrift(
locs,
info,
segmentation,
False,
seg_progress.set_value,
rcc_progress.set_value,
)
finish_time = time.time()
print(
"RCC drift estimate running time [seconds]: ",
np.round(finish_time-start_time, 1)
)
# sanity check and assign attributes
locs = lib.ensure_sanity(locs, info)
self.all_locs[channel] = locs
self.locs[channel] = copy.copy(locs)
self.index_blocks[channel] = None
self.add_drift(channel, drift)
self.update_scene()
self.show_drift()
except Exception as e:
QtWidgets.QMessageBox.information(
self,
"RCC Error",
(
"RCC failed. \nConsider changing segmentation "
"and make sure there are enough locs per frame.\n"
"The following exception occured:\n\n {}".format(e)
),
)
rcc_progress.set_value(n_pairs)
self.update_scene()
@check_picks
def undrift_from_picked(self):
""" Gets channel for undrifting from picked locs. """
channel = self.get_channel("Undrift from picked")
if channel is not None:
self._undrift_from_picked(channel)
@check_picks
def undrift_from_picked2d(self):
"""
Gets channel for undrifting from picked locs in 2D.
Available when 3D data is loaded.
"""
channel = self.get_channel("Undrift from picked")
if channel is not None:
self._undrift_from_picked2d(channel)
def _undrift_from_picked_coordinate(
self, channel, picked_locs, coordinate
):
"""
Calculates drift in a given coordinate.
Parameters
----------
channel : int
Channel where locs are being undrifted
picked_locs : list
List of np.recarrays with locs for each pick
coordinate : str
Spatial coordinate where drift is to be found
Returns
-------
np.array
Contains average drift across picks for all frames
"""
n_picks = len(picked_locs)
n_frames = self.infos[channel][0]["Frames"]
# Drift per pick per frame
drift = np.empty((n_picks, n_frames))
drift.fill(np.nan)
# Remove center of mass offset
for i, locs in enumerate(picked_locs):
coordinates = getattr(locs, coordinate)
drift[i, locs.frame] = coordinates - np.mean(coordinates)
# Mean drift over picks
drift_mean = np.nanmean(drift, 0)
# Square deviation of each pick's drift to mean drift along frames
sd = (drift - drift_mean) ** 2
# Mean of square deviation for each pick
msd = np.nanmean(sd, 1)
# New mean drift over picks
# where each pick is weighted according to its msd
nan_mask = np.isnan(drift)
drift = np.ma.MaskedArray(drift, mask=nan_mask)
drift_mean = np.ma.average(drift, axis=0, weights=1/msd)
drift_mean = drift_mean.filled(np.nan)
# Linear interpolation for frames without localizations
def nan_helper(y):
return np.isnan(y), lambda z: z.nonzero()[0]
nans, nonzero = nan_helper(drift_mean)
drift_mean[nans] = np.interp(
nonzero(nans), nonzero(~nans), drift_mean[~nans]
)
return drift_mean
def _undrift_from_picked(self, channel):
"""
Undrifts based on picked locs in a given channel.
Parameters
----------
channel : int
Channel to be undrifted
"""
picked_locs = self.picked_locs(channel)
status = lib.StatusDialog("Calculating drift...", self)
drift_x = self._undrift_from_picked_coordinate(
channel, picked_locs, "x"
) # find drift in x
drift_y = self._undrift_from_picked_coordinate(
channel, picked_locs, "y"
) # find drift in y
# Apply drift
self.all_locs[channel].x -= drift_x[self.all_locs[channel].frame]
self.all_locs[channel].y -= drift_y[self.all_locs[channel].frame]
self.locs[channel].x -= drift_x[self.locs[channel].frame]
self.locs[channel].y -= drift_y[self.locs[channel].frame]
# A rec array to store the applied drift
drift = (drift_x, drift_y)
drift = np.rec.array(drift, dtype=[("x", "f"), ("y", "f")])
# If z coordinate exists, also apply drift there
if all([hasattr(_, "z") for _ in picked_locs]):
drift_z = self._undrift_from_picked_coordinate(
channel, picked_locs, "z"
)
self.all_locs[channel].z -= drift_z[self.all_locs[channel].frame]
self.locs[channel].z -= drift_z[self.locs[channel].frame]
drift = lib.append_to_rec(drift, drift_z, "z")
# Cleanup
self.index_blocks[channel] = None
self.add_drift(channel, drift)
status.close()
self.update_scene()
def _undrift_from_picked2d(self, channel):
"""
Undrifts in x and y based on picked locs in a given channel.
Parameters
----------
channel : int
Channel to be undrifted
"""
picked_locs = self.picked_locs(channel)
status = lib.StatusDialog("Calculating drift...", self)
drift_x = self._undrift_from_picked_coordinate(
channel, picked_locs, "x"
)
drift_y = self._undrift_from_picked_coordinate(
channel, picked_locs, "y"
)
# Apply drift
self.all_locs[channel].x -= drift_x[self.all_locs[channel].frame]
self.all_locs[channel].y -= drift_y[self.all_locs[channel].frame]
self.locs[channel].x -= drift_x[self.locs[channel].frame]
self.locs[channel].y -= drift_y[self.locs[channel].frame]
# A rec array to store the applied drift
drift = (drift_x, drift_y)
drift = np.rec.array(drift, dtype=[("x", "f"), ("y", "f")])
# Cleanup
self.index_blocks[channel] = None
self.add_drift(channel, drift)
status.close()
self.update_scene()
def undo_drift(self):
""" Gets channel for undoing drift. """
channel = self.get_channel("Undo drift")
if channel is not None:
self._undo_drift(channel)
def _undo_drift(self, channel):
"""
Deletes the latest drift in a given channel.
Parameters
----------
channel : int
Channel to undo drift
"""
drift = self.currentdrift[channel]
drift.x = -drift.x
drift.y = -drift.y
self.all_locs[channel].x -= drift.x[self.all_locs[channel].frame]
self.all_locs[channel].y -= drift.y[self.all_locs[channel].frame]
self.locs[channel].x -= drift.x[self.locs[channel].frame]
self.locs[channel].y -= drift.y[self.locs[channel].frame]
if hasattr(drift, "z"):
drift.z = -drift.z
self.all_locs[channel].z -= drift.z[self.all_locs[channel].frame]
self.locs[channel].z -= drift.z[self.locs[channel].frame]
self.add_drift(channel, drift)
self.update_scene()
def add_drift(self, channel, drift):
"""
Assigns attributes and saves .txt drift file
Parameters
----------
channel : int
Channel where drift is to be added
drift : np.recarray
Contains drift in each coordinate
"""
timestr = time.strftime("%Y%m%d_%H%M%S")[2:]
base, ext = os.path.splitext(self.locs_paths[channel])
driftfile = base + "_" + timestr + "_drift.txt"
self._driftfiles[channel] = driftfile
if self._drift[channel] is None:
self._drift[channel] = drift
else:
self._drift[channel].x += drift.x
self._drift[channel].y += drift.y
if hasattr(drift, "z"):
if hasattr(self._drift[channel], "z"):
self._drift[channel].z += drift.z
else:
self._drift[channel] = lib.append_to_rec(
self._drift[channel], drift.z, "z"
)
self.currentdrift[channel] = copy.copy(drift)
np.savetxt(
driftfile,
self._drift[channel],
newline="\r\n",
)
def apply_drift(self):
"""
Applies drift to locs from a .txt file.
Assigns attributes and shifts self.locs and self.all_locs.
"""
channel = self.get_channel("Apply drift")
if channel is not None:
path, exe = QtWidgets.QFileDialog.getOpenFileName(
self, "Load drift file", filter="*.txt", directory=None
)
if path:
drift = np.loadtxt(path, delimiter=' ')
if hasattr(self.locs[channel], "z"):
drift = (drift[:,0], drift[:,1], drift[:,2])
drift = np.rec.array(
drift,
dtype=[("x", "f"), ("y", "f"), ("z", "f")],
)
self.all_locs[channel].x -= drift.x[
self.all_locs[channel].frame
]
self.all_locs[channel].y -= drift.y[
self.all_locs[channel].frame
]
self.all_locs[channel].z -= drift.z[
self.all_locs[channel].frame
]
self.locs[channel].x -= drift.x[
self.locs[channel].frame
]
self.locs[channel].y -= drift.y[
self.locs[channel].frame
]
self.locs[channel].z -= drift.z[
self.locs[channel].frame
]
else:
drift = (drift[:,0], drift[:,1])
drift = np.rec.array(
drift,
dtype=[("x", "f"), ("y", "f")],
)
self.all_locs[channel].x -= drift.x[
self.all_locs[channel].frame
]
self.all_locs[channel].y -= drift.y[
self.all_locs[channel].frame
]
self.locs[channel].x -= drift.x[
self.locs[channel].frame
]
self.locs[channel].y -= drift.y[
self.locs[channel].frame
]
self._drift[channel] = drift
self._driftfiles[channel] = path
self.currentdrift[channel] = copy.copy(drift)
self.index_blocks[channel] = None
self.update_scene()
def unfold_groups(self):
"""
Shifts grouped locs across x axis.
Useful for locs that were processed with Picasso: Average.
"""
if len(self.all_locs) > 1:
raise NotImplementedError(
"Please load only one channel."
)
if not hasattr(self, "unfold_status"):
self.unfold_status = "folded"
if self.unfold_status == "folded":
if hasattr(self.all_locs[0], "group"):
self.all_locs[0].x += self.all_locs[0].group * 2
groups = np.unique(self.all_locs[0].group)
if self._picks:
if self._pick_shape == "Rectangle":
raise NotImplementedError(
"Unfolding not implemented for rectangle picks"
)
for j in range(len(self._picks)):
for i in range(len(groups) - 1):
position = self._picks[j][:]
positionlist = list(position)
positionlist[0] += (i + 1) * 2
position = tuple(positionlist)
self._picks.append(position)
# Update width information
self.oldwidth = self.infos[0][0]["Width"]
minwidth = np.ceil(
np.mean(self.all_locs[0].x)
+ np.max(self.all_locs[0].x)
- np.min(self.all_locs[0].x)
)
self.infos[0][0]["Width"] = int(
np.max([self.oldwidth, minwidth])
)
self.locs[0] = copy.copy(self.all_locs[0])
self.fit_in_view()
self.unfold_status = "unfolded"
self.n_picks = len(self._picks)
self.update_pick_info_short()
else:
self.refold_groups()
self.clear_picks()
def unfold_groups_square(self):
"""
Shifts grouped locs onto a rectangular grid of chosen length.
Useful for locs that were processed with Picasso: Average.
"""
if len(self.all_locs) > 1:
raise NotImplementedError(
"Please load only one channel."
)
n_square, ok = QtWidgets.QInputDialog.getInt(
self,
"Input Dialog",
"Set number of elements per row and column:",
100,
)
spacing, ok = QtWidgets.QInputDialog.getInt(
self,
"Input Dialog",
"Set distance between elements:",
2,
)
if hasattr(self.all_locs[0], "group"):
self.all_locs[0].x += (
np.mod(self.all_locs[0].group, n_square)
* spacing
)
self.all_locs[0].y += (
np.floor(self.all_locs[0].group / n_square)
* spacing
)
mean_x = np.mean(self.locs[0].x)
mean_y = np.mean(self.locs[0].y)
self.all_locs[0].x -= mean_x
self.all_locs[0].y -= np.mean(self.all_locs[0].y)
offset_x = np.absolute(np.min(self.all_locs[0].x))
offset_y = np.absolute(np.min(self.all_locs[0].y))
self.all_locs[0].x += offset_x
self.all_locs[0].y += offset_y
if self._picks:
if self._pick_shape == "Rectangle":
raise NotImplementedError(
"Not implemented for rectangle picks"
)
# Also unfold picks
groups = np.unique(self.all_locs[0].group)
shift_x = (
np.mod(groups, n_square)
* spacing
- mean_x
+ offset_x
)
shift_y = (
np.floor(groups / n_square)
* spacing
- mean_y
+ offset_y
)
for j in range(len(self._picks)):
for k in range(len(groups)):
x_pick, y_pick = self._picks[j]
self._picks.append(
(x_pick + shift_x[k], y_pick + shift_y[k])
)
self.n_picks = len(self._picks)
self.update_pick_info_short()
# Update width information
self.infos[0][0]["Height"] = int(np.ceil(np.max(self.all_locs[0].y)))
self.infos[0][0]["Width"] = int(np.ceil(np.max(self.all_locs[0].x)))
self.locs[0] = copy.copy(self.all_locs[0])
self.fit_in_view()
def refold_groups(self):
""" Refolds grouped locs across x axis. """
if len(self.all_locs) > 1:
raise NotImplementedError(
"Please load only one channel."
)
if hasattr(self.all_locs[0], "group"):
self.all_locs[0].x -= self.all_locs[0].group * 2
self.locs[0] = copy.copy(self.all_locs[0])
self.fit_in_view()
self.infos[0][0]["Width"] = self.oldwidth
self.unfold_status == "folded"
def update_cursor(self):
""" Changes cursor according to self._mode. """
if self._mode == "Zoom" or self._mode == "Measure":
self.unsetCursor() # normal cursor
elif self._mode == "Pick":
if self._pick_shape == "Circle": # circle
diameter = (
self.window.tools_settings_dialog.pick_diameter.value()
)
diameter = self.width() * diameter / self.viewport_width()
# remote desktop crashes sometimes for high diameter
if diameter < 100:
pixmap_size = ceil(diameter)
pixmap = QtGui.QPixmap(pixmap_size, pixmap_size)
pixmap.fill(QtCore.Qt.transparent)
painter = QtGui.QPainter(pixmap)
painter.setPen(QtGui.QColor("white"))
if self.window.dataset_dialog.wbackground.isChecked():
painter.setPen(QtGui.QColor("black"))
offset = (pixmap_size - diameter) / 2
painter.drawEllipse(offset, offset, diameter, diameter)
painter.end()
cursor = QtGui.QCursor(pixmap)
self.setCursor(cursor)
else:
self.unsetCursor()
elif self._pick_shape == "Rectangle":
self.unsetCursor()
def update_pick_info_long(self):
""" Called when evaluating picks statistics in Info Dialog. """
if len(self._picks) == 0:
warning = "No picks found. Please pick first."
QtWidgets.QMessageBox.information(self, "Warning", warning)
return
if self._pick_shape == "Rectangle":
warning = "Not supported for rectangular picks."
QtWidgets.QMessageBox.information(self, "Warning", warning)
return
channel = self.get_channel("Calculate pick info")
if channel is not None:
d = self.window.tools_settings_dialog.pick_diameter.value()
t = self.window.info_dialog.max_dark_time.value()
r_max = min(d, 1)
info = self.infos[channel]
picked_locs = self.picked_locs(channel)
n_picks = len(picked_locs)
N = np.empty(n_picks) # number of locs per pick
rmsd = np.empty(n_picks) # rmsd in each pick
length = np.empty(n_picks) # estimated mean bright time
dark = np.empty(n_picks) # estimated mean dark time
has_z = hasattr(picked_locs[0], "z")
if has_z:
rmsd_z = np.empty(n_picks)
new_locs = [] # linked locs in each pick
progress = lib.ProgressDialog(
"Calculating pick statistics", 0, len(picked_locs), self
)
progress.set_value(0)
for i, locs in enumerate(picked_locs):
if len(locs) > 0:
N[i] = len(locs)
com_x = np.mean(locs.x)
com_y = np.mean(locs.y)
rmsd[i] = np.sqrt(
np.mean((locs.x - com_x) ** 2 + (locs.y - com_y) ** 2)
)
if has_z:
rmsd_z[i] = np.sqrt(
np.mean((locs.z - np.mean(locs.z)) ** 2)
)
if not hasattr(locs, "len"):
locs = postprocess.link(
locs, info, r_max=r_max, max_dark_time=t
)
locs = postprocess.compute_dark_times(locs)
length[i] = estimate_kinetic_rate(locs.len)
dark[i] = estimate_kinetic_rate(locs.dark)
new_locs.append(locs)
else:
self.remove_picks(self._picks[i])
progress.set_value(i + 1)
# update labels in info dialog
self.window.info_dialog.n_localizations_mean.setText(
"{:.2f}".format(np.nanmean(N))
) # mean number of locs per pick
self.window.info_dialog.n_localizations_std.setText(
"{:.2f}".format(np.nanstd(N))
) # std number of locs per pick
self.window.info_dialog.rmsd_mean.setText(
"{:.2}".format(np.nanmean(rmsd))
) # mean rmsd per pick
self.window.info_dialog.rmsd_std.setText(
"{:.2}".format(np.nanstd(rmsd))
) # std rmsd per pick
if has_z:
self.window.info_dialog.rmsd_z_mean.setText(
"{:.2f}".format(np.nanmean(rmsd_z))
) # mean rmsd in z per pick
self.window.info_dialog.rmsd_z_std.setText(
"{:.2f}".format(np.nanstd(rmsd_z))
) # std rmsd in z per pick
pooled_locs = stack_arrays(
new_locs, usemask=False, asrecarray=True
)
fit_result_len = fit_cum_exp(pooled_locs.len)
fit_result_dark = fit_cum_exp(pooled_locs.dark)
self.window.info_dialog.length_mean.setText(
"{:.2f}".format(np.nanmean(length))
) # mean bright time
self.window.info_dialog.length_std.setText(
"{:.2f}".format(np.nanstd(length))
) # std bright time
self.window.info_dialog.dark_mean.setText(
"{:.2f}".format(np.nanmean(dark))
) # mean dark time
self.window.info_dialog.dark_std.setText(
"{:.2f}".format(np.nanstd(dark))
) # std dark time
self.window.info_dialog.pick_info = {
"pooled dark": estimate_kinetic_rate(pooled_locs.dark),
"length": length,
"dark": dark,
}
self.window.info_dialog.update_n_units()
self.window.info_dialog.pick_hist_window.plot(
pooled_locs, fit_result_len, fit_result_dark
)
def update_pick_info_short(self):
""" Updates number of picks in Info Dialog. """
self.window.info_dialog.n_picks.setText(str(len(self._picks)))
def update_scene(
self,
viewport=None,
autoscale=False,
use_cache=False,
picks_only=False,
):
"""
Updates the view of rendered locs as well as cursor.
Parameters
----------
viewport : tuple (default=None)
Viewport to be rendered. If None self.viewport is taken
autoscale : boolean (default=False)
True if optimally adjust contrast
use_cache : boolean (default=False)
True if use stored image
cache : boolena (default=True)
True if save image
"""
# Clear slicer cache
self.window.slicer_dialog.slicer_cache = {}
n_channels = len(self.locs)
if n_channels:
viewport = viewport or self.viewport
self.draw_scene(
viewport,
autoscale=autoscale,
use_cache=use_cache,
picks_only=picks_only,
)
self.update_cursor()
def update_scene_slicer(
self,
viewport=None,
autoscale=False,
use_cache=False,
picks_only=False,
):
"""
Updates the view of rendered locs when they are sliced.
Parameters
----------
viewport : tuple (default=None)
Viewport to be rendered. If None self.viewport is taken
autoscale : boolean (default=False)
True if optimally adjust contrast
use_cache : boolean (default=False)
True if use stored image
cache : boolena (default=True)
True if save image
"""
n_channels = len(self.locs)
if n_channels:
viewport = viewport or self.viewport
self.draw_scene_slicer(
viewport,
autoscale=autoscale,
use_cache=use_cache,
picks_only=picks_only,
)
self.update_cursor()
def viewport_center(self, viewport=None):
"""
Finds viewport's center (pixels).
Parameters
----------
viewport: tuple (default=None)
Viewport to be evaluated. If None self.viewport is taken
Returns
tuple
Contains x and y coordinates of viewport's center (pixels)
"""
if viewport is None:
viewport = self.viewport
return (
((viewport[1][0] + viewport[0][0]) / 2),
((viewport[1][1] + viewport[0][1]) / 2),
)
def viewport_height(self, viewport=None):
"""
Finds viewport's height.
Parameters
----------
viewport: tuple (default=None)
Viewport to be evaluated. If None self.viewport is taken
Returns
float
Viewport's height (pixels)
"""
if viewport is None:
viewport = self.viewport
return viewport[1][0] - viewport[0][0]
def viewport_size(self, viewport=None):
"""
Finds viewport's height and width.
Parameters
----------
viewport: tuple (default=None)
Viewport to be evaluated. If None self.viewport is taken
Returns
tuple
Viewport's height and width (pixels)
"""
if viewport is None:
viewport = self.viewport
return self.viewport_height(viewport), self.viewport_width(viewport)
def viewport_width(self, viewport=None):
"""
Finds viewport's width.
Parameters
----------
viewport: tuple (default=None)
Viewport to be evaluated. If None self.viewport is taken
Returns
float
Viewport's width (pixels)
"""
if viewport is None:
viewport = self.viewport
return viewport[1][1] - viewport[0][1]
def relative_position(self, viewport_center, cursor_position):
"""
Finds the position of the cursor relative to the viewport's
center.
Parameters
----------
viewport_center : tuple
Specifies the position of viewport's center
cursor_position : tuple
Specifies the position of the cursor
Returns
-------
tuple
Current cursor's position with respect to viewport's
center
"""
rel_pos_x = (
(cursor_position[0] - viewport_center[1])
/ self.viewport_width()
)
rel_pos_y = (
(cursor_position[1] - viewport_center[0])
/ self.viewport_height()
)
return rel_pos_x, rel_pos_y
def zoom(self, factor, cursor_position=None):
"""
Changes zoom relatively to factor.
If zooms via wheelEvent, zooming is centered around cursor's
position.
Parameters
----------
factor : float
Relative zoom magnitude
cursor_position : tuple (default=None)
Cursor's position on the screen. If None, zooming is
centered around viewport's center
"""
viewport_height, viewport_width = self.viewport_size()
new_viewport_height = viewport_height * factor
new_viewport_width = viewport_width * factor
if cursor_position is not None: # wheelEvent
old_viewport_center = self.viewport_center()
rel_pos_x, rel_pos_y = self.relative_position(
old_viewport_center, cursor_position
) #this stays constant before and after zooming
new_viewport_center_x = (
cursor_position[0] - rel_pos_x * new_viewport_width
)
new_viewport_center_y = (
cursor_position[1] - rel_pos_y * new_viewport_height
)
else:
new_viewport_center_y, new_viewport_center_x = (
self.viewport_center()
)
new_viewport = [
(
new_viewport_center_y - new_viewport_height/2,
new_viewport_center_x - new_viewport_width/2,
),
(
new_viewport_center_y + new_viewport_height/2,
new_viewport_center_x + new_viewport_width/2,
),
]
self.update_scene(new_viewport)
def zoom_in(self):
""" Zooms in by a constant factor. """
self.zoom(1 / ZOOM)
def zoom_out(self):
""" Zooms out by a constant factor. """
self.zoom(ZOOM)
def wheelEvent(self, QWheelEvent):
"""
Defines what happens when mouse wheel is used.
Press Ctrl/Command to zoom in/out.
"""
modifiers = QtWidgets.QApplication.keyboardModifiers()
if modifiers == QtCore.Qt.ControlModifier:
direction = QWheelEvent.angleDelta().y()
position = self.map_to_movie(QWheelEvent.pos())
if direction > 0:
self.zoom(1 / ZOOM, cursor_position=position)
else:
self.zoom(ZOOM, cursor_position=position)
class Window(QtWidgets.QMainWindow):
"""
Main Picasso: Render window class.
...
Attributes
----------
actions_3d : list
specifies actions that are displayed for 3D data only
dataset_dialog : DatasetDialog
instance of the dialog for multichannel display
dialogs : list
Contains all dialogs that are closed when reseting Render
display_settings_dlg : DisplaySettingsDialog
instance of the dialog for display settings
info_dialog : InfoDialog
instance of the dialog storing information about data and picks
fast_render_dialog: FastRenderDialog
instance of the dialog for sampling a fraction of locs to speed
up rendering
mask_settings_dialog : MaskSettingsDialog
isntance of the dialog for masking image
menu_bar : QMenuBar
menu bar with menus: File, View, Tools, Postprocess
menus : list
contains View, Tools and Postprocess menus
plugins : list
contains plugins loaded from picasso/gui/plugins
slicer_dialog : SlicerDialog
instance of the dialog for slicing 3D data in z axis
tools_settings_dialog : ToolsSettingsDialog
instance of the dialog for customising picks
view : View
instance of the class for displaying rendered localizations
window_rot : RotationWindow
instance of the class for displaying 3D data with rotation
x_spiral : np.array
x coordinates before the last spiral action in ApplyDialog
y_spiral : np.array
y coordinates before the last spiral action in ApplyDialog
Methods
-------
closeEvent(event)
Changes user settings and closes all dialogs
export_complete()
Exports the whole field of view as .png or .tif
export_current()
Exports current view as .png or .tif
export_multi()
Asks the user to choose a type of export
export_fov_ims()
Exports current FOV to .ims
export_ts()
Exports locs as .csv for ThunderSTORM
export_txt()
Exports locs as .txt for ImageJ
export_txt_imaris()
Exports locs as .txt for IMARIS
export_txt_nis()
Exports locs as .txt for NIS
export_xyz_chimera()
Exports locs as .xyz for CHIMERA
export_3d_visp()
Exports locs as .3d for ViSP
initUI(plugins_loaded)
Initializes the main window
load_picks()
Loads picks from a .yaml file
load_user_settings()
Loads colormap and current directory
open_apply_dialog()
Loads expression and applies it to locs
open_file_dialog()
Opens localizaitons .hdf5 file(s)
open_rotated_locs()
Opens rotated localizations .hdf5 file(s)
remove_group()
Displayed locs will have no group information
resizeEvent(event)
Updates window size when resizing
save_locs()
Saves localizations in a given channel (or all channels)
save_picked_locs()
Saves picked localizations in a given channel (or all channels)
save_picks()
Saves picks as .yaml
save_pick_properties()
Saves pick properties in a given channel
subtract_picks()
Subtracts picks from a .yaml file
remove_locs()
Resets Window
rot_win()
Opens/updates RotationWindow
update_info()
Updates Window's size and median loc prec in InfoDialog
"""
def __init__(self, plugins_loaded=False):
super().__init__()
self.initUI(plugins_loaded)
def initUI(self, plugins_loaded):
"""
Initializes the main window. Builds dialogs and menu bar.
Parameters
----------
plugins_loaded : boolean
If True, plugins have been loaded before.
"""
# general
self.setWindowTitle("Picasso: Render")
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "render.ico")
icon = QtGui.QIcon(icon_path)
self.icon = icon
self.setWindowIcon(icon)
self.view = View(self) # displays rendered locs
self.view.setMinimumSize(1, 1)
self.setCentralWidget(self.view)
# set up dialogs
self.display_settings_dlg = DisplaySettingsDialog(self)
self.tools_settings_dialog = ToolsSettingsDialog(self)
self.view._pick_shape = (
self.tools_settings_dialog.pick_shape.currentText()
)
self.tools_settings_dialog.pick_shape.currentIndexChanged.connect(
self.view.on_pick_shape_changed
)
self.mask_settings_dialog = MaskSettingsDialog(self)
self.slicer_dialog = SlicerDialog(self)
self.info_dialog = InfoDialog(self)
self.dataset_dialog = DatasetDialog(self)
self.fast_render_dialog = FastRenderDialog(self)
self.window_rot = RotationWindow(self)
self.test_clusterer_dialog = TestClustererDialog(self)
self.dialogs = [
self.display_settings_dlg,
self.dataset_dialog,
self.info_dialog,
self.mask_settings_dialog,
self.tools_settings_dialog,
self.slicer_dialog,
self.window_rot,
self.fast_render_dialog,
self.test_clusterer_dialog,
]
# menu bar
self.menu_bar = self.menuBar()
# menu bar - File
file_menu = self.menu_bar.addMenu("File")
open_action = file_menu.addAction("Open")
open_action.setShortcut(QtGui.QKeySequence.Open)
open_action.triggered.connect(self.open_file_dialog)
open_rot_action = file_menu.addAction("Open rotated localizations")
open_rot_action.setShortcut("Ctrl+Shift+O")
open_rot_action.triggered.connect(self.open_rotated_locs)
save_action = file_menu.addAction("Save localizations")
save_action.setShortcut("Ctrl+S")
save_action.triggered.connect(self.save_locs)
save_picked_action = file_menu.addAction("Save picked localizations")
save_picked_action.setShortcut("Ctrl+Shift+S")
save_picked_action.triggered.connect(self.save_picked_locs)
save_pick_properties_action = file_menu.addAction(
"Save pick properties"
)
save_pick_properties_action.triggered.connect(
self.save_pick_properties
)
save_picks_action = file_menu.addAction("Save pick regions")
save_picks_action.triggered.connect(self.save_picks)
load_picks_action = file_menu.addAction("Load pick regions")
load_picks_action.triggered.connect(self.load_picks)
file_menu.addSeparator()
export_current_action = file_menu.addAction("Export current view")
export_current_action.setShortcut("Ctrl+E")
export_current_action.triggered.connect(self.export_current)
export_complete_action = file_menu.addAction("Export complete image")
export_complete_action.setShortcut("Ctrl+Shift+E")
export_complete_action.triggered.connect(self.export_complete)
file_menu.addSeparator()
export_multi_action = file_menu.addAction("Export localizations")
export_multi_action.triggered.connect(self.export_multi)
if IMSWRITER:
export_ims_action = file_menu.addAction("Export ROI for Imaris")
export_ims_action.triggered.connect(self.export_fov_ims)
file_menu.addSeparator()
delete_action = file_menu.addAction("Remove all localizations")
delete_action.triggered.connect(self.remove_locs)
# menu bar - View
view_menu = self.menu_bar.addMenu("View")
display_settings_action = view_menu.addAction("Display settings")
display_settings_action.setShortcut("Ctrl+D")
display_settings_action.triggered.connect(
self.display_settings_dlg.show
)
view_menu.addAction(display_settings_action)
dataset_action = view_menu.addAction("Files")
dataset_action.setShortcut("Ctrl+F")
dataset_action.triggered.connect(self.dataset_dialog.show)
view_menu.addSeparator()
to_left_action = view_menu.addAction("Left")
to_left_action.setShortcuts(["Left", "A"])
to_left_action.triggered.connect(self.view.to_left)
to_right_action = view_menu.addAction("Right")
to_right_action.setShortcuts(["Right", "D"])
to_right_action.triggered.connect(self.view.to_right)
to_up_action = view_menu.addAction("Up")
to_up_action.setShortcuts(["Up", "W"])
to_up_action.triggered.connect(self.view.to_up)
to_down_action = view_menu.addAction("Down")
to_down_action.setShortcuts(["Down", "S"])
to_down_action.triggered.connect(self.view.to_down)
view_menu.addSeparator()
zoom_in_action = view_menu.addAction("Zoom in")
zoom_in_action.setShortcuts(["Ctrl++", "Ctrl+="])
zoom_in_action.triggered.connect(self.view.zoom_in)
view_menu.addAction(zoom_in_action)
zoom_out_action = view_menu.addAction("Zoom out")
zoom_out_action.setShortcut("Ctrl+-")
zoom_out_action.triggered.connect(self.view.zoom_out)
view_menu.addAction(zoom_out_action)
fit_in_view_action = view_menu.addAction("Fit image to window")
fit_in_view_action.setShortcut("Ctrl+W")
fit_in_view_action.triggered.connect(self.view.fit_in_view)
view_menu.addAction(fit_in_view_action)
view_menu.addSeparator()
info_action = view_menu.addAction("Show info")
info_action.setShortcut("Ctrl+I")
info_action.triggered.connect(self.info_dialog.show)
view_menu.addAction(info_action)
slicer_action = view_menu.addAction("Slice")
slicer_action.triggered.connect(self.slicer_dialog.initialize)
rot_win_action = view_menu.addAction("Update rotation window")
rot_win_action.setShortcut("Ctrl+Shift+R")
rot_win_action.triggered.connect(self.rot_win)
# menu bar - Tools
tools_menu = self.menu_bar.addMenu("Tools")
tools_actiongroup = QtWidgets.QActionGroup(self.menu_bar)
zoom_tool_action = tools_actiongroup.addAction(
QtWidgets.QAction("Zoom", tools_menu, checkable=True)
)
zoom_tool_action.setShortcut("Ctrl+Z")
tools_menu.addAction(zoom_tool_action)
zoom_tool_action.setChecked(True)
pick_tool_action = tools_actiongroup.addAction(
QtWidgets.QAction("Pick", tools_menu, checkable=True)
)
pick_tool_action.setShortcut("Ctrl+P")
tools_menu.addAction(pick_tool_action)
measure_tool_action = tools_actiongroup.addAction(
QtWidgets.QAction("Measure", tools_menu, checkable=True)
)
measure_tool_action.setShortcut("Ctrl+M")
tools_menu.addAction(measure_tool_action)
tools_actiongroup.triggered.connect(self.view.set_mode)
tools_menu.addSeparator()
tools_settings_action = tools_menu.addAction("Tools settings")
tools_settings_action.setShortcut("Ctrl+T")
tools_settings_action.triggered.connect(
self.tools_settings_dialog.show
)
pick_similar_action = tools_menu.addAction("Pick similar")
pick_similar_action.setShortcut("Ctrl+Shift+P")
pick_similar_action.triggered.connect(self.view.pick_similar)
clear_picks_action = tools_menu.addAction("Clear picks")
clear_picks_action.triggered.connect(self.view.clear_picks)
clear_picks_action.setShortcut("Ctrl+C")
remove_locs_picks_action = tools_menu.addAction(
"Remove localizations in picks"
)
remove_locs_picks_action.triggered.connect(
self.view.remove_picked_locs
)
move_to_pick_action = tools_menu.addAction("Move to pick")
move_to_pick_action.triggered.connect(self.view.move_to_pick)
tools_menu.addSeparator()
show_trace_action = tools_menu.addAction("Show trace")
show_trace_action.setShortcut("Ctrl+R")
show_trace_action.triggered.connect(self.view.show_trace)
tools_menu.addSeparator()
select_traces_action = tools_menu.addAction("Select picks (trace)")
select_traces_action.triggered.connect(self.view.select_traces)
plotpick_action = tools_menu.addAction("Select picks (XY scatter)")
plotpick_action.triggered.connect(self.view.show_pick)
plotpick3d_action = tools_menu.addAction("Select picks (XYZ scatter)")
plotpick3d_action.triggered.connect(self.view.show_pick_3d)
plotpick3d_iso_action = tools_menu.addAction(
"Select picks (XYZ scatter, 4 panels)"
)
plotpick3d_iso_action.triggered.connect(self.view.show_pick_3d_iso)
filter_picks_action = tools_menu.addAction("Filter picks by locs")
filter_picks_action.triggered.connect(self.view.filter_picks)
pickadd_action = tools_menu.addAction("Subtract pick regions")
pickadd_action.triggered.connect(self.subtract_picks)
tools_menu.addSeparator()
cluster_action = tools_menu.addAction("Cluster in pick (k-means)")
cluster_action.triggered.connect(self.view.analyze_cluster)
tools_menu.addSeparator()
mask_action = tools_menu.addAction("Mask image")
mask_action.triggered.connect(self.mask_settings_dialog.init_dialog)
tools_menu.addSeparator()
fast_render_action = tools_menu.addAction("Fast rendering")
fast_render_action.triggered.connect(self.fast_render_dialog.show)
# menu bar - Postprocess
postprocess_menu = self.menu_bar.addMenu("Postprocess")
undrift_action = postprocess_menu.addAction("Undrift by RCC")
undrift_action.setShortcut("Ctrl+U")
undrift_action.triggered.connect(self.view.undrift)
undrift_from_picked_action = postprocess_menu.addAction(
"Undrift from picked"
)
undrift_from_picked_action.setShortcut("Ctrl+Shift+U")
undrift_from_picked_action.triggered.connect(
self.view.undrift_from_picked
)
undrift_from_picked2d_action = postprocess_menu.addAction(
"Undrift from picked (2D)"
)
undrift_from_picked2d_action.triggered.connect(
self.view.undrift_from_picked2d
)
drift_action = postprocess_menu.addAction("Undo drift")
drift_action.triggered.connect(self.view.undo_drift)
drift_action = postprocess_menu.addAction("Show drift")
drift_action.triggered.connect(self.view.show_drift)
apply_drift_action = postprocess_menu.addAction(
"Apply drift from an external file"
)
apply_drift_action.triggered.connect(self.view.apply_drift)
postprocess_menu.addSeparator()
group_action = postprocess_menu.addAction("Remove group info")
group_action.triggered.connect(self.remove_group)
unfold_action = postprocess_menu.addAction("Unfold / Refold groups")
unfold_action.triggered.connect(self.view.unfold_groups)
unfold_action_square = postprocess_menu.addAction(
"Unfold groups (square)"
)
unfold_action_square.triggered.connect(self.view.unfold_groups_square)
postprocess_menu.addSeparator()
link_action = postprocess_menu.addAction("Link localizations")
link_action.triggered.connect(self.view.link)
align_action = postprocess_menu.addAction(
"Align channels (RCC or from picked)"
)
align_action.triggered.connect(self.view.align)
combine_action = postprocess_menu.addAction("Combine locs in picks")
combine_action.triggered.connect(self.view.combine)
postprocess_menu.addSeparator()
apply_action = postprocess_menu.addAction(
"Apply expression to localizations"
)
apply_action.setShortcut("Ctrl+A")
apply_action.triggered.connect(self.open_apply_dialog)
postprocess_menu.addSeparator()
clustering_menu = postprocess_menu.addMenu("Clustering")
dbscan_action = clustering_menu.addAction("DBSCAN")
dbscan_action.triggered.connect(self.view.dbscan)
hdbscan_action = clustering_menu.addAction("HDBSCAN")
hdbscan_action.triggered.connect(self.view.hdbscan)
clusterer_action = clustering_menu.addAction("SMLM clusterer")
clusterer_action.triggered.connect(self.view.smlm_clusterer)
test_cluster_action = clustering_menu.addAction("Test clusterer")
test_cluster_action.triggered.connect(
self.test_clusterer_dialog.show
)
postprocess_menu.addSeparator()
nn_action = postprocess_menu.addAction("Nearest Neighbor Analysis")
nn_action.triggered.connect(self.view.nearest_neighbor)
self.load_user_settings()
# Define 3D entries
self.actions_3d = [
plotpick3d_action,
plotpick3d_iso_action,
slicer_action,
undrift_from_picked2d_action,
rot_win_action
]
# set them invisible; if 3D is loaded later, they can be used
for action in self.actions_3d:
action.setVisible(False)
# De-select all menus until file is loaded
self.menus = [file_menu, view_menu, tools_menu, postprocess_menu]
for menu in self.menus[1:]:
menu.setDisabled(True)
# add plugins; if it's the first initialization
# (plugins_loaded=False), they are not added because they're
# loaded in __main___. Otherwise, (remove all locs) plugins
# need to be added to the menu bar.
if plugins_loaded:
try:
for plugin in self.plugins:
plugin.execute()
except:
pass
def closeEvent(self, event):
"""
Changes user settings and closes all dialogs.
Parameters
----------
event : QCloseEvent
"""
settings = io.load_user_settings()
settings["Render"][
"Colormap"
] = self.display_settings_dlg.colormap.currentText()
if self.view.locs_paths != []:
settings["Render"]["PWD"] = os.path.dirname(
self.view.locs_paths[0]
)
io.save_user_settings(settings)
QtWidgets.qApp.closeAllWindows()
def export_current(self):
""" Exports current view as .png or .tif. """
try:
base, ext = os.path.splitext(self.view.locs_paths[0])
except AttributeError:
return
out_path = base + ".png"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Save image", out_path, filter="*.png;;*.tif"
)
if path:
self.view.qimage.save(path)
self.view.setMinimumSize(1, 1)
def export_complete(self):
""" Exports the whole field of view as .png or .tif. """
try:
base, ext = os.path.splitext(self.view.locs_paths[0])
except AttributeError:
return
out_path = base + ".png"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Save image", out_path, filter="*.png;;*.tif"
)
if path:
movie_height, movie_width = self.view.movie_size()
viewport = [(0, 0), (movie_height, movie_width)]
qimage = self.view.render_scene(cache=False, viewport=viewport)
qimage.save(path)
def export_txt(self):
"""
Exports locs as .txt for ImageJ.
Saves frames, x and y.
"""
channel = self.view.get_channel(
"Save localizations as txt (frames,x,y)"
)
if channel is not None:
base, ext = os.path.splitext(self.view.locs_paths[channel])
out_path = base + ".frc.txt"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save localizations as txt (frames,x,y)",
out_path,
filter="*.frc.txt",
)
if path:
locs = self.view.all_locs[channel]
loctxt = locs[["frame", "x", "y"]].copy()
np.savetxt(
path,
loctxt,
fmt=["%.1i", "%.5f", "%.5f"],
newline="\r\n",
delimiter=" ",
)
def export_txt_nis(self):
""" Exports locs as .txt for NIS. """
channel = self.view.get_channel(
(
"Save localizations as txt for NIS "
"(x,y,z,channel,width,bg,length,area,frame)"
)
)
pixelsize = self.display_settings_dlg.pixelsize.value()
z_header = b"X\tY\tZ\tChannel\tWidth\tBG\tLength\tArea\tFrame\r\n"
header = b"X\tY\tChannel\tWidth\tBG\tLength\tArea\tFrame\r\n"
if channel is not None:
base, ext = os.path.splitext(self.view.locs_paths[channel])
out_path = base + ".nis.txt"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
(
"Save localizations as txt for NIS "
"(x,y,z,channel,width,bg,length,area,frame)"
),
out_path,
filter="*.nis.txt",
)
if path:
locs = self.view.all_locs[channel]
if hasattr(locs, "z"):
loctxt = locs[
["x", "y", "z", "sx", "bg", "photons", "frame"]
].copy()
loctxt = [
(
row[0] * pixelsize,
row[1] * pixelsize,
row[2] * pixelsize,
1,
row[3] * pixelsize,
row[4],
1,
row[5],
row[6] + 1,
)
for row in loctxt
]
with open(path, "wb") as f:
f.write(z_header)
np.savetxt(
f,
loctxt,
fmt=[
"%.2f",
"%.2f",
"%.2f",
"%.i",
"%.2f",
"%.i",
"%.i",
"%.i",
"%.i",
],
newline="\r\n",
delimiter="\t",
)
print("File saved to {}".format(path))
else:
loctxt = locs[
["x", "y", "sx", "bg", "photons", "frame"]
].copy()
loctxt = [
(
row[0] * pixelsize,
row[1] * pixelsize,
1,
row[2] * pixelsize,
row[3],
1,
row[4],
row[5] + 1,
)
for row in loctxt
]
with open(path, "wb") as f:
f.write(header)
np.savetxt(
f,
loctxt,
fmt=[
"%.2f",
"%.2f",
"%.i",
"%.2f",
"%.i",
"%.i",
"%.i",
"%.i",
],
newline="\r\n",
delimiter="\t",
)
print("File saved to {}".format(path))
def export_xyz_chimera(self):
"""
Exports locs as .xyz for CHIMERA. Contains only x, y, z.
Shows a warning if no z coordinate found.
"""
channel = self.view.get_channel(
"Save localizations as xyz for chimera (molecule,x,y,z)"
)
pixelsize = self.display_settings_dlg.pixelsize.value()
if channel is not None:
base, ext = os.path.splitext(self.view.locs_paths[channel])
out_path = base + ".chi.xyz"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save localizations as xyz for chimera (molecule,x,y,z)",
out_path,
)
if path:
locs = self.view.all_locs[channel]
if hasattr(locs, "z"):
loctxt = locs[["x", "y", "z"]].copy()
loctxt = [
(
1,
row[0] * pixelsize,
row[1] * pixelsize,
row[2] * pixelsize,
)
for row in loctxt
]
with open(path, "wb") as f:
f.write(b"Molecule export\r\n")
np.savetxt(
f,
loctxt,
fmt=["%i", "%.5f", "%.5f", "%.5f"],
newline="\r\n",
delimiter="\t",
)
print("File saved to {}".format(path))
else:
QtWidgets.QMessageBox.information(
self, "Dataset error", "Data has no z. Export skipped."
)
def export_3d_visp(self):
"""
Exports locs as .3d for ViSP.
Shows a warning if no z coordinate found.
"""
channel = self.view.get_channel(
"Save localizations as xyz for chimera (molecule,x,y,z)"
)
pixelsize = self.display_settings_dlg.pixelsize.value()
if channel is not None:
base, ext = os.path.splitext(self.view.locs_paths[channel])
out_path = base + ".visp.3d"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save localizations as xyz for chimera (molecule,x,y,z)",
out_path,
)
if path:
locs = self.view.all_locs[channel]
if hasattr(locs, "z"):
locs = locs[["x", "y", "z", "photons", "frame"]].copy()
locs.x *= pixelsize
locs.y *= pixelsize
locs.z *= pixelsize
with open(path, "wb") as f:
np.savetxt(
f,
locs,
fmt=["%.1f", "%.1f", "%.1f", "%.1f", "%d"],
newline="\r\n",
)
print("Saving complete.")
else:
QtWidgets.QMessageBox.information(
self, "Dataset error", "Data has no z. Export skipped."
)
def export_multi(self):
""" Asks the user to choose a type of export. """
items = [
".txt for FRC (ImageJ)",
".txt for NIS",
".xyz for Chimera",
".3d for ViSP",
".csv for ThunderSTORM",
]
item, ok = QtWidgets.QInputDialog.getItem(
self, "Select Export", "Formats", items, 0, False
)
if ok and item:
if item == ".txt for FRC (ImageJ)":
self.export_txt()
elif item == ".txt for NIS":
self.export_txt_nis()
elif item == ".xyz for Chimera":
self.export_xyz_chimera()
elif item == ".3d for ViSP":
self.export_3d_visp()
elif item == ".csv for ThunderSTORM":
self.export_ts()
else:
print("This should never happen")
def export_ts(self):
""" Exports locs as .csv for ThunderSTORM. """
channel = self.view.get_channel(
"Save localizations as csv for ThunderSTORM"
)
pixelsize = self.display_settings_dlg.pixelsize.value()
if channel is not None:
base, ext = os.path.splitext(self.view.locs_paths[channel])
out_path = base + ".csv"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Save csv to", out_path, filter="*.csv"
)
if path:
stddummy = 0
locs = self.view.all_locs[channel]
if hasattr(locs, "len"): # Linked locs -> add detections
if hasattr(locs, "z"):
loctxt = locs[
[
"frame",
"x",
"y",
"sx",
"sy",
"photons",
"bg",
"lpx",
"lpy",
"z",
"len",
]
].copy()
loctxt = [
(
index,
row[0],
row[1] * pixelsize,
row[2] * pixelsize,
row[9] * pixelsize,
row[3] * pixelsize,
row[4] * pixelsize,
row[5],
row[6],
stddummy,
(row[7] + row[8]) / 2 * pixelsize,
row[10],
)
for index, row in enumerate(loctxt)
]
header = ""
for element in [
"id",
"frame",
"x [nm]",
"y [nm]",
"z [nm]",
"sigma1 [nm]",
"sigma2 [nm]",
"intensity [photon]",
"offset [photon]",
"bkgstd [photon]",
"uncertainty_xy [nm]",
"detections",
]:
header += '"' + element + '",'
header = header[:-1] + "\r\n"
with open(path, "wb") as f:
f.write(str.encode(header))
np.savetxt(
f,
loctxt,
fmt=[
"%.i",
"%.i",
"%.2f",
"%.2f",
"%.2f",
"%.2f",
"%.2f",
"%.i",
"%.i",
"%.i",
"%.2f",
"%.i",
],
newline="\r\n",
delimiter=",",
)
print("File saved to {}".format(path))
else:
loctxt = locs[
[
"frame",
"x",
"y",
"sx",
"sy",
"photons",
"bg",
"lpx",
"lpy",
"len",
]
].copy()
loctxt = [
(
index,
row[0],
row[1] * pixelsize,
row[2] * pixelsize,
(row[3] + row[4]) / 2 * pixelsize,
row[5],
row[6],
stddummy,
(row[7] + row[8]) / 2 * pixelsize,
row[9],
)
for index, row in enumerate(loctxt)
]
header = ""
for element in [
"id",
"frame",
"x [nm]",
"y [nm]",
"sigma [nm]",
"intensity [photon]",
"offset [photon]",
"bkgstd [photon]",
"uncertainty_xy [nm]",
"detections",
]:
header += '"' + element + '",'
header = header[:-1] + "\r\n"
with open(path, "wb") as f:
f.write(str.encode(header))
np.savetxt(
f,
loctxt,
fmt=[
"%.i",
"%.i",
"%.2f",
"%.2f",
"%.2f",
"%.i",
"%.i",
"%.i",
"%.2f",
"%.i",
],
newline="\r\n",
delimiter=",",
)
print("File saved to {}".format(path))
else:
if hasattr(locs, "z"):
loctxt = locs[
[
"frame",
"x",
"y",
"sx",
"sy",
"photons",
"bg",
"lpx",
"lpy",
"z",
]
].copy()
loctxt = [
(
index,
row[0],
row[1] * pixelsize,
row[2] * pixelsize,
row[9] * pixelsize,
row[3] * pixelsize,
row[4] * pixelsize,
row[5],
row[6],
stddummy,
(row[7] + row[8]) / 2 * pixelsize,
)
for index, row in enumerate(loctxt)
]
header = ""
for element in [
"id",
"frame",
"x [nm]",
"y [nm]",
"z [nm]",
"sigma1 [nm]",
"sigma2 [nm]",
"intensity [photon]",
"offset [photon]",
"bkgstd [photon]",
"uncertainty_xy [nm]",
]:
header += '"' + element + '",'
header = header[:-1] + "\r\n"
with open(path, "wb") as f:
f.write(str.encode(header))
np.savetxt(
f,
loctxt,
fmt=[
"%.i",
"%.i",
"%.2f",
"%.2f",
"%.2f",
"%.2f",
"%.2f",
"%.i",
"%.i",
"%.i",
"%.2f",
],
newline="\r\n",
delimiter=",",
)
print("File saved to {}".format(path))
else:
loctxt = locs[
[
"frame",
"x",
"y",
"sx",
"sy",
"photons",
"bg",
"lpx",
"lpy",
]
].copy()
loctxt = [
(
index,
row[0],
row[1] * pixelsize,
row[2] * pixelsize,
(row[3] + row[4]) / 2 * pixelsize,
row[5],
row[6],
stddummy,
(row[7] + row[8]) / 2 * pixelsize,
)
for index, row in enumerate(loctxt)
]
header = ""
for element in [
"id",
"frame",
"x [nm]",
"y [nm]",
"sigma [nm]",
"intensity [photon]",
"offset [photon]",
"bkgstd [photon]",
"uncertainty_xy [nm]",
]:
header += '"' + element + '",'
header = header[:-1] + "\r\n"
with open(path, "wb") as f:
f.write(str.encode(header))
np.savetxt(
f,
loctxt,
fmt=[
"%.i",
"%.i",
"%.2f",
"%.2f",
"%.2f",
"%.i",
"%.i",
"%.i",
"%.2f",
],
newline="\r\n",
delimiter=",",
)
print("File saved to {}".format(path))
def export_fov_ims(self):
""" Exports current FOV to .ims """
base, ext = os.path.splitext(self.view.locs_paths[0])
out_path = base + ".ims"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Export FOV as ims", out_path, filter="*.ims"
)
channel_base, ext_ = os.path.splitext(path)
if os.path.isfile(path):
os.remove(path)
if path:
status = lib.StatusDialog("Exporting ROIs..", self)
n_channels = len(self.view.locs_paths)
viewport = self.view.viewport
oversampling = (
self.display_settings_dlg.pixelsize.value()
/ self.display_settings_dlg.disp_px_size.value()
)
maximum = self.display_settings_dlg.maximum.value()
pixelsize = self.display_settings_dlg.pixelsize.value()
ims_fields = {
'ExtMin0':0,
'ExtMin1':0,
'ExtMin2':-0.5,
'ExtMax2':0.5,
}
for k,v in ims_fields.items():
try:
val = self.view.infos[0][0][k]
ims_fields[k] = None
except KeyError:
pass
(y_min, x_min), (y_max, x_max) = viewport
z_mins = []
z_maxs = []
to_render = []
has_z = True
for channel in range(n_channels):
if self.dataset_dialog.checks[channel].isChecked():
locs = self.view.locs[channel]
in_view = (
(locs.x > x_min)
& (locs.x <= x_max)
& (locs.y > y_min)
& (locs.y <= y_max)
)
add_dict = {}
add_dict["Generated by"] = "Picasso Render (IMS Export)"
for k,v in ims_fields.items():
if v is not None:
add_dict[k] = v
info = self.view.infos[channel] + [add_dict]
io.save_locs(
f"{channel_base}_ch_{channel}.hdf5",
locs[in_view],
info,
)
if hasattr(locs, "z"):
z_min = locs.z[in_view].min()
z_max = locs.z[in_view].max()
z_mins.append(z_min)
z_maxs.append(z_max)
else:
has_z = False
to_render.append(channel)
if not has_z:
if len(z_mins) > 0:
raise NotImplementedError(
"Can't export mixed files with and without z."
)
if has_z:
z_min = min(z_mins)
z_max = max(z_maxs)
else:
z_min, z_max = 0, 0
all_img = []
for idx, channel in enumerate(to_render):
locs = self.view.locs[channel]
if has_z:
n, image = render.render_hist3d(
locs,
oversampling,
y_min, x_min, y_max, x_max, z_min, z_max,
pixelsize,
)
else:
n, image = render.render_hist(
locs,
oversampling,
y_min, x_min, y_max, x_max,
)
image = image / maximum * 65535
data = image.astype('uint16')
data = np.rot90(np.fliplr(data))
all_img.append(data)
s_image = np.stack(all_img, axis=-1).T.copy()
colors = self.view.read_colors()
colors_ims = [PW.Color(*list(colors[_]), 1) for _ in to_render]
numpy_to_imaris(
s_image,
path,
colors_ims,
oversampling,
viewport,
info,
z_min,
z_max,
pixelsize,
)
status.close()
def load_picks(self):
""" Loads picks from a .yaml file. """
path, ext = QtWidgets.QFileDialog.getOpenFileName(
self, "Load pick regions", filter="*.yaml"
)
if path:
self.view.load_picks(path)
def subtract_picks(self):
"""
Subtracts picks from a .yaml file.
See View.subtract_picks.
"""
if self.view._picks:
path, ext = QtWidgets.QFileDialog.getOpenFileName(
self, "Load pick regions", filter="*.yaml"
)
if path:
self.view.subtract_picks(path)
else:
warning = "No picks found. Please pick first."
QtWidgets.QMessageBox.information(self, "Warning", warning)
def load_user_settings(self):
"""
Loads colormap and current directory (ones used last time).
"""
settings = io.load_user_settings()
colormap = settings["Render"]["Colormap"]
if len(colormap) == 0:
colormap = "magma"
for index in range(self.display_settings_dlg.colormap.count()):
if self.display_settings_dlg.colormap.itemText(index) == colormap:
self.display_settings_dlg.colormap.setCurrentIndex(index)
break
pwd = []
try:
pwd = settings["Render"]["PWD"]
except Exception as e:
print(e)
pass
if len(pwd) == 0:
pwd = []
self.pwd = pwd
def open_apply_dialog(self):
""" Loads expression and applies it to locs. """
cmd, channel, ok = ApplyDialog.getCmd(self)
if ok:
input = cmd.split()
if input[0] == "flip" and len(input) == 3:
# Distinguish flipping in xy and z
if "z" in input:
print("xyz")
var_1 = input[1]
var_2 = input[2]
if var_1 == "z":
var_2 = "z"
var_1 = input[2]
pixelsize = self.display_settings_dlg.pixelsize.value()
templocs = self.view.locs[channel][var_1].copy()
movie_height, movie_width = self.view.movie_size()
if var_1 == "x":
dist = movie_width
else:
dist = movie_height
self.view.locs[channel][var_1] = (
self.view.locs[channel][var_2] / pixelsize + dist / 2
) # exchange w. info
self.view.all_locs[channel][var_1] = (
self.view.all_locs[channel[var_2]]
/ pixelsize
+ dist / 2
)
self.view.locs[channel][var_2] = templocs * pixelsize
self.view.all_locs[channel][var_2] = templocs * pixelsize
else:
var_1 = input[1]
var_2 = input[2]
templocs = self.view.locs[channel][var_1].copy()
self.view.locs[channel][var_1] = self.view.locs[channel][
var_2
]
self.view.all_locs[channel][var_1] = self.view.all_locs[
channel
][var_2]
self.view.locs[channel][var_2] = templocs
self.view.all_locs[channel][var_2] = templocs
elif input[0] == "spiral" and len(input) == 3:
# spiral uses radius and turns
radius = float(input[1])
turns = int(input[2])
maxframe = self.view.infos[channel][0]["Frames"]
self.x_spiral = self.view.locs[channel]["x"].copy()
self.y_spiral = self.view.locs[channel]["y"].copy()
scale_time = maxframe / (turns * 2 * np.pi)
scale_x = turns * 2 * np.pi
x = self.view.locs[channel]["frame"] / scale_time
self.view.locs[channel]["x"] = (
x * np.cos(x)
) / scale_x * radius + self.view.locs[channel]["x"]
self.view.all_locs[channel]["x"] = (
x * np.cos(x)
) / scale_x * radius + self.view.all_locs[channel]["x"]
self.view.locs[channel]["y"] = (
x * np.sin(x)
) / scale_x * radius + self.view.locs[channel]["y"]
self.view.all_locs[channel]["y"] = (
x * np.sin(x)
) / scale_x * radius + self.view.all_locs[channel]["y"]
elif input[0] == "uspiral":
try:
self.view.locs[channel]["x"] = self.x_spiral
self.view.all_locs[channel]["x"] = self.x_spiral
self.view.locs[channel]["y"] = self.y_spiral
self.view.all_locs[channel]["y"] = self.y_spiral
self.display_settings_dlg.render_check.setChecked(False)
except:
QtWidgets.QMessageBox.information(
self,
"Uspiral error",
"Localizations have not been spiraled yet."
)
else:
vars = self.view.locs[channel].dtype.names
exec(cmd, {k: self.view.locs[channel][k] for k in vars})
exec(cmd, {k: self.view.all_locs[channel][k] for k in vars})
lib.ensure_sanity(
self.view.locs[channel], self.view.infos[channel]
)
lib.ensure_sanity(
self.view.all_locs[channel], self.view.infos[channel]
)
self.view.index_blocks[channel] = None
self.view.update_scene()
def open_file_dialog(self):
""" Opens localizations .hdf5 file(s). """
if self.pwd == []:
paths, ext = QtWidgets.QFileDialog.getOpenFileNames(
self, "Add localizations", filter="*.hdf5"
)
else:
paths, ext = QtWidgets.QFileDialog.getOpenFileNames(
self, "Add localizations", directory=self.pwd, filter="*.hdf5"
)
if paths:
self.pwd = paths[0]
self.view.add_multiple(paths)
def open_rotated_locs(self):
"""
Opens rotated localizations .hdf5 file(s).
In addition to normal file opening, it also requires to load
info about the pick and rotation.
"""
# self.remove_locs()
if self.pwd == []:
path, ext = QtWidgets.QFileDialog.getOpenFileNames(
self, "Add localizations", filter="*.hdf5"
)
else:
path, ext = QtWidgets.QFileDialog.getOpenFileNames(
self, "Add localizations", directory=self.pwd, filter="*.hdf5"
)
if path:
self.pwd = path[0]
self.view.add_multiple(path)
if "Pick" in self.view.infos[0][-1]:
self.view._picks = []
self.view._picks.append(self.view.infos[0][-1]["Pick"])
self.view._pick_shape = self.view.infos[0][-1]["Pick shape"]
if self.view._pick_shape == "Circle":
self.tools_settings_dialog.pick_diameter.setValue(
self.view.infos[0][-1]["Pick size"]
)
else:
self.tools_settings_dialog.pick_width.setValue(
self.view.infos[0][-1]["Pick size"]
)
self.window_rot.view_rot.angx = self.view.infos[0][-1]["angx"]
self.window_rot.view_rot.angy = self.view.infos[0][-1]["angy"]
self.window_rot.view_rot.angz = self.view.infos[0][-1]["angz"]
self.rot_win()
def resizeEvent(self, event):
""" Updates window size when resizing. """
self.update_info()
def remove_group(self):
""" Displayed locs will have no group information. """
channel = self.view.get_channel("Remove group")
if channel is not None:
self.view.locs[channel] = lib.remove_from_rec(
self.view.locs[channel], "group"
)
self.view.all_locs[channel] = lib.remove_from_rec(
self.view.all_locs[channel], "group"
)
self.view.update_scene()
def save_pick_properties(self):
"""
Saves pick properties in a given channel (or all channels).
"""
channel = self.view.get_channel_all_seq("Save localizations")
if channel is not None:
if channel == len(self.view.locs_paths):
print("Save all at once.")
suffix, ok = QtWidgets.QInputDialog.getText(
self,
"Input Dialog",
"Enter suffix",
QtWidgets.QLineEdit.Normal,
"_pickprops",
)
if ok:
for channel in tqdm(range(len(self.view.locs_paths))):
base, ext = os.path.splitext(
self.view.locs_paths[channel]
)
out_path = base + suffix + ".hdf5"
self.view.save_pick_properties(out_path, channel)
else:
base, ext = os.path.splitext(self.view.locs_paths[channel])
out_path = base + "_pickprops.hdf5"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Save pick properties", out_path, filter="*.hdf5"
)
if path:
self.view.save_pick_properties(path, channel)
def save_locs(self):
"""
Saves localizations in a given channel (or all channels).
"""
channel = self.view.save_channel("Save localizations")
if channel is not None:
# combine all channels
if channel is (len(self.view.locs_paths) + 1):
base, ext = os.path.splitext(self.view.locs_paths[0])
out_path = base + "_multi.hdf5"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save picked localizations",
out_path,
filter="*.hdf5",
)
if path:
# combine locs from all channels
all_locs = stack_arrays(
self.view.all_locs,
asrecarray=True,
usemask=False,
autoconvert=True,
)
all_locs.sort(kind="mergesort", order="frame")
info = self.view.infos[0] + [
{
"Generated by": "Picasso Render Combine",
"Paths to combined files": self.view.locs_paths,
}
]
io.save_locs(path, all_locs, info)
# save all channels one by one
elif channel is (len(self.view.locs_paths)):
suffix, ok = QtWidgets.QInputDialog.getText(
self,
"Input Dialog",
"Enter suffix",
QtWidgets.QLineEdit.Normal,
"_arender",
)
if ok:
for channel in range(len(self.view.locs_paths)):
base, ext = os.path.splitext(
self.view.locs_paths[channel]
)
out_path = base + suffix + ".hdf5"
info = self.view.infos[channel] + [
{
"Generated by": "Picasso Render",
"Last driftfile": self.view._driftfiles[
channel
],
}
]
io.save_locs(
out_path, self.view.all_locs[channel], info
)
# save one channel only
else:
base, ext = os.path.splitext(self.view.locs_paths[channel])
out_path = base + "_render.hdf5"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Save localizations", out_path, filter="*.hdf5"
)
if path:
info = self.view.infos[channel] + [
{
"Generated by": "Picasso Render",
"Last driftfile": self.view._driftfiles[channel],
}
]
io.save_locs(path, self.view.all_locs[channel], info)
def save_picked_locs(self):
"""
Saves picked localizations in a given channel (or all channels).
"""
channel = self.view.save_channel("Save picked localizations")
if channel is not None:
# combine channels to one .hdf5
if channel is (len(self.view.locs_paths) + 1):
base, ext = os.path.splitext(self.view.locs_paths[0])
out_path = base + "_picked_multi.hdf5"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save picked localizations",
out_path,
filter="*.hdf5",
)
if path:
self.view.save_picked_locs_multi(path)
# save channels one by one
elif channel is (len(self.view.locs_paths)):
suffix, ok = QtWidgets.QInputDialog.getText(
self,
"Input Dialog",
"Enter suffix",
QtWidgets.QLineEdit.Normal,
"_apicked",
)
if ok:
for channel in range(len(self.view.locs_paths)):
base, ext = os.path.splitext(
self.view.locs_paths[channel]
)
out_path = base + suffix + ".hdf5"
self.view.save_picked_locs(out_path, channel)
# save one channel only
else:
base, ext = os.path.splitext(self.view.locs_paths[channel])
out_path = base + "_picked.hdf5"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save picked localizations",
out_path,
filter="*.hdf5",
)
if path:
self.view.save_picked_locs(path, channel)
def save_picks(self):
""" Saves picks as .yaml. """
base, ext = os.path.splitext(self.view.locs_paths[0])
out_path = base + "_picks.yaml"
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Save pick regions", out_path, filter="*.yaml"
)
if path:
self.view.save_picks(path)
def remove_locs(self):
""" Resets Window. """
for dialog in self.dialogs:
dialog.close()
self.menu_bar.clear() #otherwise the menu bar is doubled
self.initUI(plugins_loaded=True)
def rot_win(self):
""" Opens/updates RotationWindow. """
if len(self.view._picks) == 0:
raise ValueError("Pick a region to rotate.")
elif len(self.view._picks) > 1:
raise ValueError("Pick only one region.")
self.window_rot.view_rot.load_locs(update_window=True)
self.window_rot.show()
self.window_rot.view_rot.update_scene(autoscale=True)
def update_info(self):
"""
Updates Window's size and median loc prec in InfoDialog.
"""
self.info_dialog.width_label.setText(
"{} pixel".format((self.view.width()))
)
self.info_dialog.height_label.setText(
"{} pixel".format((self.view.height()))
)
self.info_dialog.locs_label.setText("{:,}".format(self.view.n_locs))
try:
self.info_dialog.xy_label.setText(
"{:.2f} / {:.2f} ".format(
self.view.viewport[0][1], self.view.viewport[0][0]
)
)
self.info_dialog.wh_label.setText(
"{:.2f} / {:.2f} pixel".format(
self.view.viewport_width(), self.view.viewport_height()
)
)
except AttributeError:
pass
try:
self.info_dialog.change_fov.x_box.setValue(
self.view.viewport[0][1]
)
self.info_dialog.change_fov.y_box.setValue(
self.view.viewport[0][0]
)
self.info_dialog.change_fov.w_box.setValue(
self.view.viewport_width()
)
self.info_dialog.change_fov.h_box.setValue(
self.view.viewport_height()
)
except AttributeError:
pass
try:
self.info_dialog.fit_precision.setText(
"{:.3} nm".format(
self.view.median_lp
* self.display_settings_dlg.pixelsize.value()
)
)
except AttributeError:
pass
def main():
app = QtWidgets.QApplication(sys.argv)
window = Window()
window.plugins = []
# load plugins from picasso/gui/plugins
from . import plugins
def iter_namespace(pkg):
return pkgutil.iter_modules(pkg.__path__, pkg.__name__ + ".")
plugins = [
importlib.import_module(name)
for finder, name, ispkg
in iter_namespace(plugins)
]
for plugin in plugins:
p = plugin.Plugin(window)
if p.name == "render":
p.execute()
window.plugins.append(p)
window.show()
def excepthook(type, value, tback):
lib.cancel_dialogs()
QtCore.QCoreApplication.instance().processEvents()
message = "".join(traceback.format_exception(type, value, tback))
errorbox = QtWidgets.QMessageBox.critical(
window, "An error occured", message
)
errorbox.exec_()
sys.__excepthook__(type, value, tback)
sys.excepthook = excepthook
sys.exit(app.exec_())
if __name__ == "__main__":
main() | mit | 6da063298bdf8582876bcc1d2a7c8bfc | 34.902986 | 84 | 0.519884 | 4.17631 | false | false | false | false |
openforcefield/openff-toolkit | openff/toolkit/topology/molecule.py | 1 | 217774 | """
Molecular chemical entity representation and routines to interface with cheminformatics toolkits
.. todo::
* Our main philosophy here is to keep the object contents of topology objects easily serializable/deserializable
* Have ``Molecule`` raise an exception if loading/creating molecules with unspecified stereochemistry?
* Create ``FrozenMolecule`` to represent immutable molecule
* Make ``Atom`` and ``Bond`` an inner class of Molecule?
* Add ``Molecule.from_smarts()`` or ``.from_tagged_smiles()`` to allow a tagged SMARTS string
(where tags are zero-indexed atom indices) to be used to create a molecule with the given atom numbering.
* How can we make the ``Molecule`` API more useful to codes like perses that modify molecules on the fly?
* Use `attrs <http://www.attrs.org/>`_ for convenient class initialization?
* JSON/BSON representations of objects?
* Generalize Molecule infrastructure to provide "plug-in" support for cheminformatics toolkits
* Do we need a way to write a bunch of molecules to a file, or serialize a set of molecules to a file?
We currently don't have a way to do that through the ``Molecule`` API, even though there is a way to
read multiple molecules via ``Molecules.from_file()``.
* Should we allow the removal of atoms too?
* Should invalidation of cached properties be handled via something like a tracked list?
* Refactor toolkit encapsulation to generalize and provide only a few major toolkit methods and toolkit objects
that can be queried for features
* Speed up overall import time by putting non-global imports only where they are needed
"""
import json
import operator
import pathlib
import warnings
from collections import OrderedDict, UserDict
from copy import deepcopy
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
Generator,
List,
Optional,
Set,
TextIO,
Tuple,
Union,
)
import networkx as nx
import numpy as np
from openff.units import unit
from openff.units.elements import MASSES, SYMBOLS
from openff.utilities.exceptions import MissingOptionalDependencyError
from packaging import version
from openff.toolkit.utils.exceptions import (
HierarchySchemeNotFoundException,
HierarchySchemeWithIteratorNameAlreadyRegisteredException,
IncompatibleUnitError,
InvalidAtomMetadataError,
InvalidBondOrderError,
InvalidConformerError,
MultipleMoleculesInPDBError,
SmilesParsingError,
UnsupportedFileTypeError,
)
from openff.toolkit.utils.serialization import Serializable
from openff.toolkit.utils.toolkits import (
DEFAULT_AROMATICITY_MODEL,
GLOBAL_TOOLKIT_REGISTRY,
InvalidToolkitRegistryError,
OpenEyeToolkitWrapper,
RDKitToolkitWrapper,
ToolkitRegistry,
ToolkitWrapper,
UndefinedStereochemistryError,
)
from openff.toolkit.utils.utils import get_data_file_path, requires_package
if TYPE_CHECKING:
from openff.units.unit import Quantity
from openff.toolkit.topology._mm_molecule import _SimpleAtom, _SimpleMolecule
# TODO: Can we have the `ALLOWED_*_MODELS` list automatically appear in the docstrings below?
# TODO: Should `ALLOWED_*_MODELS` be objects instead of strings?
# TODO: Should these be imported from `openff.toolkit.cheminformatics.aromaticity_models` and `.bondorder_models`?
# TODO: Allow all OpenEye aromaticity models to be used with OpenEye names?
# Only support OEAroModel_MDL in RDKit version?
def _molecule_deprecation(old_method, new_method):
warnings.warn(
f"Molecule.{old_method} is deprecated. Use Molecule.{new_method} instead.",
MoleculeDeprecationWarning,
)
class MoleculeDeprecationWarning(UserWarning):
"""Warning for deprecated portions of the Molecule API."""
class Particle(Serializable):
"""
Base class for all particles in a molecule.
A particle object could be an ``Atom`` or similar.
.. warning :: This API is experimental and subject to change.
"""
@property
def molecule(self):
r"""
The ``Molecule`` this particle is part of.
.. todo::
* Should we have a single unique ``Molecule`` for each molecule
type in the system, or if we have multiple copies of the same
molecule, should we have multiple ``Molecule``\ s?
"""
return self._molecule
@molecule.setter
def molecule(self, molecule):
"""
Set the particle's molecule pointer. Note that this will only work if the particle currently
doesn't have a molecule
"""
err = f"{type(self).__name__} already has an associated molecule"
assert self._molecule is None, err
self._molecule = molecule
@property
def molecule_particle_index(self):
"""
Returns the index of this particle in its molecule
"""
return self._molecule.particles.index(self)
@property
def name(self):
"""
The name of the particle
"""
return self._name
def to_dict(self):
"""Convert to dictionary representation."""
# Implement abstract method Serializable.to_dict()
raise NotImplementedError() # TODO
@classmethod
def from_dict(cls, d):
"""Static constructor from dictionary representation."""
# Implement abstract method Serializable.to_dict()
raise NotImplementedError() # TODO
class AtomMetadataDict(UserDict):
def __init__(self, *args, **kwargs):
self.data = {}
self.update(dict(*args, **kwargs))
def __setitem__(self, key, value):
if not isinstance(key, str):
raise InvalidAtomMetadataError(
f"Attempted to set atom metadata with a non-string key. (key: {key}"
)
if not isinstance(value, (str, int)):
raise InvalidAtomMetadataError(
f"Attempted to set atom metadata with a non-string or integer "
f"value. (value: {value})"
)
super().__setitem__(key, value)
class Atom(Particle):
"""
A chemical atom.
.. todo::
* Should ``Atom`` objects be immutable or mutable?
* Do we want to support the addition of arbitrary additional properties,
such as floating point quantities (e.g. ``charge``), integral
quantities (such as ``id`` or ``serial`` index in a PDB file),
or string labels (such as Lennard-Jones types)?
.. todo :: Allow atoms to have associated properties.
.. warning :: This API is experimental and subject to change.
"""
def __init__(
self,
atomic_number,
formal_charge,
is_aromatic,
name=None,
molecule=None,
stereochemistry=None,
metadata=None,
):
"""
Create an immutable Atom object.
Object is serializable and immutable.
.. todo :: Use attrs to validate?
.. todo :: We can add setters if we need to.
Parameters
----------
atomic_number : int
Atomic number of the atom
formal_charge : int or openff.units.unit.Quantity-wrapped int with dimension "charge"
Formal charge of the atom
is_aromatic : bool
If True, atom is aromatic; if False, not aromatic
stereochemistry : str, optional, default=None
Either 'R' or 'S' for specified stereochemistry, or None for ambiguous stereochemistry
name : str, optional, default=None
An optional name to be associated with the atom
metadata : dict[str: (int, str)], default=None
An optional dictionary where keys are strings and values are strings or ints. This is intended
to record atom-level information used to inform hierarchy definition and iteration, such as
grouping atom by residue and chain.
Examples
--------
Create a non-aromatic carbon atom
>>> atom = Atom(6, 0, False)
Create a chiral carbon atom
>>> atom = Atom(6, 0, False, stereochemistry='R', name='CT')
"""
self._atomic_number = atomic_number
# Use the setter here, since it will handle either ints or Quantities
if hasattr(formal_charge, "units"):
# Faster check than ` == unit.dimensionless`
if str(formal_charge.units) == "":
raise Exception
self.formal_charge = formal_charge
self._is_aromatic = is_aromatic
self._stereochemistry = stereochemistry
if name is None:
name = ""
self._name = name
self._molecule = molecule
# From Jeff: I'm going to assume that this is implicit in the parent Molecule's ordering of atoms
# self._molecule_atom_index = molecule_atom_index
self._bonds = list()
if metadata is None:
self._metadata = AtomMetadataDict()
else:
self._metadata = AtomMetadataDict(metadata)
# TODO: We can probably avoid an explicit call and determine this dynamically
# from self._molecule (maybe caching the result) to get rid of some bookkeeping.
# TODO: Should stereochemistry be reset/cleared/recomputed upon addition of a bond?
def add_bond(self, bond):
"""Adds a bond that this atom is involved in
.. todo :: Is this how we want to keep records?
Parameters
----------
bond: an openff.toolkit.topology.molecule.Bond
A bond involving this atom
"""
self._bonds.append(bond)
def to_dict(self):
"""Return a dict representation of the atom."""
# TODO
atom_dict = OrderedDict()
atom_dict["atomic_number"] = self._atomic_number
atom_dict["formal_charge"] = self._formal_charge.m_as(unit.elementary_charge)
atom_dict["is_aromatic"] = self._is_aromatic
atom_dict["stereochemistry"] = self._stereochemistry
# TODO: Should we let atoms have names?
atom_dict["name"] = self._name
atom_dict["metadata"] = dict(self._metadata)
# TODO: Should this be implicit in the atom ordering when saved?
# atom_dict['molecule_atom_index'] = self._molecule_atom_index
return atom_dict
@classmethod
def from_dict(cls, atom_dict):
"""Create an Atom from a dict representation."""
return cls(**atom_dict)
@property
def metadata(self):
"""
The atom's metadata dictionary
"""
return self._metadata
@property
def formal_charge(self):
"""
The atom's formal charge
"""
return self._formal_charge
@formal_charge.setter
def formal_charge(self, other):
"""
Set the atom's formal charge. Accepts either ints or unit-wrapped ints with units of charge.
"""
if isinstance(other, int):
self._formal_charge = unit.Quantity(other, unit.elementary_charge)
elif isinstance(other, unit.Quantity):
# Faster to check equality than convert, so short-circuit
if other.units is unit.elementary_charge:
self.formal_charge = other
elif other.units in unit.elementary_charge.compatible_units():
self._formal_charge = other
else:
raise IncompatibleUnitError(
f"Cannot set formal charge with a quantity with units {other.units}"
)
elif hasattr(other, "unit"):
from openmm import unit as openmm_unit
if not isinstance(other, openmm_unit.Quantity):
raise IncompatibleUnitError(
"Unsupported type passed to formal_charge setter. "
"Found object of type {type(other)}."
)
from openff.units.openmm import from_openmm
converted = from_openmm(other)
if converted.units in unit.elementary_charge.compatible_units():
self._formal_charge = converted
else:
raise IncompatibleUnitError(
f"Cannot set formal charge with a quantity with units {converted.units}"
)
else:
raise ValueError
@property
def partial_charge(self):
"""
The partial charge of the atom, if any.
Returns
-------
unit-wrapped float with dimension of atomic charge, or None if no charge has been specified
"""
if self._molecule._partial_charges is None:
return None
else:
index = self.molecule_atom_index
return self._molecule._partial_charges[index]
@property
def is_aromatic(self):
"""
The atom's is_aromatic flag
"""
return self._is_aromatic
@property
def stereochemistry(self):
"""
The atom's stereochemistry (if defined, otherwise None)
"""
return self._stereochemistry
@stereochemistry.setter
def stereochemistry(self, value):
"""Set the atoms stereochemistry
Parameters
----------
value : str
The stereochemistry around this atom, allowed values are "CW", "CCW", or None,
"""
# if (value != 'CW') and (value != 'CCW') and not(value is None):
# raise Exception(
# "Atom stereochemistry setter expected 'CW', 'CCW', or None. ""
# "Received {} (type {})".format(value, type(value))"
# )
self._stereochemistry = value
@property
def atomic_number(self) -> int:
"""
The integer atomic number of the atom.
"""
return self._atomic_number
@property
def symbol(self) -> str:
"""
Return the symbol implied by the atomic number of this atom
"""
return SYMBOLS[self.atomic_number]
@property
def mass(self) -> "Quantity":
"""
The standard atomic weight (abundance-weighted isotopic mass) of the atomic site.
The mass is reported in units of Dalton.
"""
# This is assumed elsewhere in the codebase to be in units of Dalton, which is what is
# reported by MASSES as of openff-units v0.1.5. There may be performance implications if
# other functions need to verify or convert units.
# https://github.com/openforcefield/openff-toolkit/pull/1182#discussion_r802078273
return MASSES[self.atomic_number]
@property
def name(self):
"""
The name of this atom, if any
"""
return self._name
@name.setter
def name(self, other):
"""
Parameters
----------
other : string
The new name for this atom
"""
if type(other) != str:
raise Exception(
f"In setting atom name. Expected str, received {other} (type {type(other)})."
)
self._name = other
# TODO: How are we keeping track of bonds, angles, etc?
@property
def bonds(self):
"""
The list of ``Bond`` objects this atom is involved in.
"""
return self._bonds
# for bond in self._bonds:
# yield bond
@property
# def bonded_to(self):
def bonded_atoms(self):
"""
The list of ``Atom`` objects this atom is involved in bonds with
"""
for bond in self._bonds:
for atom in bond.atoms:
if atom is not self:
# TODO: This seems dangerous. Ask John for a better way
yield atom
def is_bonded_to(self, atom2):
"""
Determine whether this atom is bound to another atom
Parameters
----------
atom2: openff.toolkit.topology.molecule.Atom
a different atom in the same molecule
Returns
-------
bool
Whether this atom is bound to atom2
"""
# TODO: Sanity check (check for same molecule?)
assert self != atom2
for bond in self._bonds:
for bonded_atom in bond.atoms:
if atom2 == bonded_atom:
return True
return False
def is_in_ring(self, toolkit_registry=GLOBAL_TOOLKIT_REGISTRY) -> bool:
"""
Return whether or not this atom is in a ring(s) (of any size)
This Atom is expected to be attached to a molecule (`Atom.molecule`).
Parameters
----------
toolkit_registry: openff.toolkit.utils.toolkits.ToolkitRegistry, default=GLOBAL_TOOLKIT_REGISTRY
:class:`ToolkitRegistry` to use to enumerate the tautomers.
"""
_is_in_ring = toolkit_registry.call("atom_is_in_ring", self)
return _is_in_ring
@property
def molecule_atom_index(self):
"""
The index of this Atom within the the list of atoms in the parent ``Molecule``.
"""
if self._molecule is None:
raise ValueError("This Atom does not belong to a Molecule object")
if "_molecule_atom_index" in self.__dict__:
return self._molecule_atom_index
self._molecule_atom_index = self._molecule.atoms.index(self)
return self._molecule_atom_index
def __repr__(self):
# TODO: Also include which molecule this atom belongs to?
return f"Atom(name={self._name}, atomic number={self._atomic_number})"
def __str__(self):
# TODO: Also include which molecule this atom belongs to?
return "<Atom name='{}' atomic number='{}'>".format(
self._name, self._atomic_number
)
# =============================================================================================
# Bond Stereochemistry
# =============================================================================================
# class BondStereochemistry(Serializable):
# """
# Bond stereochemistry representation
# """
# def __init__(self, stereo_type, neighbor1, neighbor2):
# """
#
# Parameters
# ----------
# stereo_type
# neighbor1
# neighbor2
# """
# assert isinstance(neighbor1, Atom)
# assert isinstance(neighbor2, Atom)
# # Use stereo_type @setter to check stereo type is a permitted value
# self.stereo_type = stereo_type
# self._neighbor1 = neighbor1
# self._neighbor2 = neighbor2
# def to_dict(self):
# bs_dict = OrderedDict()
# bs_dict['stereo_type'] = self._stereo_type
# bs_dict['neighbor1_index'] = self._neighbor1.molecule_atom_index
# bs_dict['neighbor2_index'] = self._neighbor2.molecule_atom_index
# return bs_dict
# classmethod
# def from_dict(cls, molecule, bs_dict):
# neighbor1 = molecule.atoms[bs_dict['neighbor1_index']]
# neighbor2 = molecule.atoms[bs_dict['neighbor2_index']]
# return cls.__init__(bs_dict['stereo_type'], neighbor1, neighbor2)
# @property
# def stereo_type(self):
# return self._stereo_type
# @stereo_type.setter
# def stereo_type(self, value):
# assert (value == 'CIS') or (value == 'TRANS') or (value is None)
# self._stereo_type = value
# @property
# def neighbor1(self):
# return self._neighbor1
# @property
# def neighbor2(self):
# return self._neighbor2
# @property
# def neighbors(self):
# return (self._neighbor1, self._neighbor2)
class Bond(Serializable):
"""
Chemical bond representation.
.. warning :: This API is experimental and subject to change.
.. todo :: Allow bonds to have associated properties.
Attributes
----------
atom1, atom2 : openff.toolkit.topology.Atom
Atoms involved in the bond
bond_order : int
The (integer) bond order of this bond.
is_aromatic : bool
Whether or not this bond is aromatic.
fractional_bond_order : float, optional
The fractional bond order, or partial bond order of this bond.
stereochemstry : str, optional, default=None
A string representing this stereochemistry of this bond.
.. warning :: This API is experimental and subject to change.
"""
def __init__(
self,
atom1,
atom2,
bond_order,
is_aromatic,
fractional_bond_order=None,
stereochemistry=None,
):
"""
Create a new chemical bond.
"""
assert type(atom1) == Atom
assert type(atom2) == Atom
assert atom1.molecule is atom2.molecule
assert isinstance(atom1.molecule, FrozenMolecule)
self._molecule = atom1.molecule
self._atom1 = atom1
self._atom2 = atom2
atom1.add_bond(self)
atom2.add_bond(self)
# TODO: Check bondtype and fractional_bond_order are valid?
# TODO: Dative bonds
self._fractional_bond_order = fractional_bond_order
self._bond_order = bond_order
self._is_aromatic = is_aromatic
self._stereochemistry = stereochemistry
def to_dict(self):
"""
Return a dict representation of the bond.
"""
bond_dict = OrderedDict()
bond_dict["atom1"] = self.atom1.molecule_atom_index
bond_dict["atom2"] = self.atom2.molecule_atom_index
bond_dict["bond_order"] = self._bond_order
bond_dict["is_aromatic"] = self._is_aromatic
bond_dict["stereochemistry"] = self._stereochemistry
bond_dict["fractional_bond_order"] = self._fractional_bond_order
return bond_dict
@classmethod
def from_dict(cls, molecule, d):
"""Create a Bond from a dict representation."""
# TODO
d["molecule"] = molecule
d["atom1"] = molecule.atoms[d["atom1"]]
d["atom2"] = molecule.atoms[d["atom2"]]
return cls(*d)
@property
def atom1(self):
return self._atom1
@property
def atom2(self):
return self._atom2
@property
def atom1_index(self):
return self.molecule.atoms.index(self._atom1)
@property
def atom2_index(self):
return self.molecule.atoms.index(self._atom2)
@property
def atoms(self):
return (self._atom1, self._atom2)
@property
def bond_order(self):
return self._bond_order
@bond_order.setter
def bond_order(self, value):
if isinstance(value, int):
self._bond_order = value
else:
raise InvalidBondOrderError(
"Only integer bond orders may be passed to `Bond.bond_order` setter. "
"For aromatic bonds, instead kekulize the input structure and use "
"the resulting integer bond orders. If performing partial bond "
"order-based parameter interpolation, consider using "
"`Bond.fractional_bond_order`."
)
@property
def fractional_bond_order(self):
return self._fractional_bond_order
@fractional_bond_order.setter
def fractional_bond_order(self, value):
self._fractional_bond_order = value
@property
def stereochemistry(self):
return self._stereochemistry
@property
def is_aromatic(self):
return self._is_aromatic
@property
def molecule(self):
return self._molecule
@molecule.setter
def molecule(self, value):
"""
Sets the Bond's parent molecule. Can not be changed after assignment
"""
assert self._molecule is None
self._molecule = value
@property
def molecule_bond_index(self):
"""
The index of this Bond within the the list of bonds in ``Molecules``.
"""
if self._molecule is None:
raise ValueError("This Atom does not belong to a Molecule object")
return self._molecule.bonds.index(self)
def is_in_ring(self, toolkit_registry=GLOBAL_TOOLKIT_REGISTRY) -> bool:
"""
Return whether or not this bond is in a ring(s) (of any size)
This Bond is expected to be attached to a molecule (`Bond.molecule`).
Note: Bonds containing atoms that are only in separate rings, i.e. the central bond in a biphenyl,
are not considered to be bonded by this criteria.
Parameters
----------
toolkit_registry: openff.toolkit.utils.toolkits.ToolkitRegistry, default=GLOBAL_TOOLKIT_REGISTRY
:class:`ToolkitRegistry` to use to enumerate the tautomers.
Returns
-------
is_in_ring: bool
Whether or not this bond is in a ring.
"""
_is_in_ring = toolkit_registry.call("bond_is_in_ring", self)
return _is_in_ring
def __repr__(self):
return f"Bond(atom1 index={self.atom1_index}, atom2 index={self.atom2_index})"
def __str__(self):
return (
f"<Bond atom1 index='{self.atom1_index}', atom2 index='{self.atom2_index}'>"
)
# TODO: How do we automatically trigger invalidation of cached properties if an ``Atom`` or ``Bond`` is modified,
# rather than added/deleted via the API? The simplest resolution is simply to make them immutable.
class FrozenMolecule(Serializable):
"""
Immutable chemical representation of a molecule, such as a small molecule or biopolymer.
.. todo :: What other API calls would be useful for supporting biopolymers
as small molecules? Perhaps iterating over chains and residues?
Examples
--------
Create a molecule from a sdf file
>>> from openff.toolkit.utils import get_data_file_path
>>> sdf_filepath = get_data_file_path('molecules/ethanol.sdf')
>>> molecule = FrozenMolecule.from_file(sdf_filepath)
Convert to OpenEye OEMol object
>>> oemol = molecule.to_openeye()
Create a molecule from an OpenEye molecule
>>> molecule = FrozenMolecule.from_openeye(oemol)
Convert to RDKit Mol object
>>> rdmol = molecule.to_rdkit()
Create a molecule from an RDKit molecule
>>> molecule = FrozenMolecule.from_rdkit(rdmol)
Create a molecule from IUPAC name (requires the OpenEye toolkit)
>>> molecule = FrozenMolecule.from_iupac('imatinib')
Create a molecule from SMILES
>>> molecule = FrozenMolecule.from_smiles('Cc1ccccc1')
.. warning :: This API is experimental and subject to change.
"""
def __init__(
self,
other=None,
file_format=None,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
allow_undefined_stereo=False,
):
r"""
Create a new FrozenMolecule object
.. todo ::
* If a filename or file-like object is specified but the file
contains more than one molecule, what is the proper behavior?
Read just the first molecule, or raise an exception if more
than one molecule is found?
* Should we also support SMILES strings or IUPAC names for
``other``\ ?
Parameters
----------
other : optional, default=None
If specified, attempt to construct a copy of the molecule from
the specified object. This can be any one of the following:
* a :class:`Molecule` object
* a file that can be used to construct a :class:`Molecule` object
* an ``openeye.oechem.OEMol``
* an ``rdkit.Chem.rdchem.Mol``
* a serialized :class:`Molecule` object
file_format : str, optional, default=None
If providing a file-like object, you must specify the format
of the data. If providing a file, the file format will attempt
to be guessed from the suffix.
toolkit_registry : a :class:`ToolkitRegistry` or
:class:`ToolkitWrapper` object, optional,
default=GLOBAL_TOOLKIT_REGISTRY :class:`ToolkitRegistry`
or :class:`ToolkitWrapper` to use for I/O operations
allow_undefined_stereo : bool, default=False
If loaded from a file and ``False``, raises an exception if
undefined stereochemistry is detected during the molecule's
construction.
Examples
--------
Create an empty molecule:
>>> empty_molecule = FrozenMolecule()
Create a molecule from a file that can be used to construct a molecule,
using either a filename or file-like object:
>>> from openff.toolkit.utils import get_data_file_path
>>> sdf_filepath = get_data_file_path('molecules/ethanol.sdf')
>>> molecule = FrozenMolecule(sdf_filepath)
>>> molecule = FrozenMolecule(open(sdf_filepath, 'r'), file_format='sdf')
>>> import gzip
>>> mol2_gz_filepath = get_data_file_path('molecules/toluene.mol2.gz')
>>> molecule = FrozenMolecule(gzip.GzipFile(mol2_gz_filepath, 'r'), file_format='mol2')
Create a molecule from another molecule:
>>> molecule_copy = FrozenMolecule(molecule)
Convert to OpenEye OEMol object
>>> oemol = molecule.to_openeye()
Create a molecule from an OpenEye molecule:
>>> molecule = FrozenMolecule(oemol)
Convert to RDKit Mol object
>>> rdmol = molecule.to_rdkit()
Create a molecule from an RDKit molecule:
>>> molecule = FrozenMolecule(rdmol)
Convert the molecule into a dictionary and back again:
>>> serialized_molecule = molecule.to_dict()
>>> molecule_copy = FrozenMolecule(serialized_molecule)
"""
self._cached_smiles = None
# Figure out if toolkit_registry is a whole registry, or just a single wrapper
if isinstance(toolkit_registry, ToolkitRegistry):
pass
elif isinstance(toolkit_registry, ToolkitWrapper):
toolkit = toolkit_registry
toolkit_registry = ToolkitRegistry(toolkit_precedence=[])
toolkit_registry.add_toolkit(toolkit)
else:
raise InvalidToolkitRegistryError(
"'toolkit_registry' must be either a ToolkitRegistry or a ToolkitWrapper"
)
if other is None:
self._initialize()
else:
loaded = False
# Start a list of the ValueErrors the following logic encounters, so we can print it out
# if there turned out to be no way to load this input
value_errors = list()
if isinstance(other, FrozenMolecule) and not loaded:
self._copy_initializer(other)
loaded = True
if isinstance(other, Molecule) and not loaded:
# TODO: This will need to be updated once FrozenMolecules and Molecules are significantly different
self._copy_initializer(other)
loaded = True
if isinstance(other, dict) and not loaded:
self._initialize_from_dict(other)
loaded = True
# Check through the toolkit registry to find a compatible wrapper for loading
if not loaded:
try:
# Each ToolkitWrapper may provide a from_object method, which turns some particular type(s)
# of object into OFFMols. For example, RDKitToolkitWrapper's from_object method will
# return an OFFMol if provided with an RDMol, or raise a ValueError if it is provided
# an OEMol (or anything else). This makes the assumption that any non-ValueError errors raised
# by the toolkit _really are_ bad and should be raised immediately, which may be a bad assumption.
result = toolkit_registry.call(
"from_object",
other,
allow_undefined_stereo=allow_undefined_stereo,
raise_exception_types=[UndefinedStereochemistryError],
_cls=self.__class__,
)
# NotImplementedError should never be raised... Only from_file and from_file_obj are provided
# in the base ToolkitWrapper class and require overwriting, so from_object should be excluded
# except NotImplementedError as e:
# raise e
# The toolkit registry will aggregate all errors except UndefinedStereochemistryErrors into a single
# ValueError, which we should catch and and store that here.
except ValueError as e:
value_errors.append(e)
else:
self._copy_initializer(result)
loaded = True
# TODO: Make this compatible with file-like objects (I couldn't figure out how to make an oemolistream
# from a fileIO object)
if isinstance(other, str) or hasattr(other, "read") and not loaded:
try:
mol = Molecule.from_file(
other,
file_format=file_format,
toolkit_registry=toolkit_registry,
allow_undefined_stereo=allow_undefined_stereo,
) # returns a list only if multiple molecules are found
if type(mol) == list:
raise ValueError(
"Specified file or file-like object must contain exactly one molecule"
)
except ValueError as e:
value_errors.append(e)
else:
self._copy_initializer(mol)
loaded = True
# If none of the above methods worked, raise a ValueError summarizing the
# errors from the different loading attempts
if not loaded:
msg = (
f"Cannot construct openff.toolkit.topology.Molecule from {other}\n"
)
for value_error in value_errors:
msg += str(value_error)
raise ValueError(msg)
@property
def has_unique_atom_names(self) -> bool:
"""``True`` if the molecule has unique atom names, ``False`` otherwise."""
return _has_unique_atom_names(self)
def generate_unique_atom_names(self):
"""
Generate unique atom names from the element symbol and count.
Names are generated from the elemental symbol and the number of times
that element is found in the molecule. The character 'x' is appended to
these generated names to reduce the odds that they clash with an atom
name or type imported from another source. For example, generated atom
names might begin 'C1x', 'H1x', 'O1x', 'C2x', etc.
"""
return _generate_unique_atom_names(self)
def _validate(self):
"""
Validate the molecule, ensuring it has unique atom names
"""
if not self.has_unique_atom_names:
self.generate_unique_atom_names()
def strip_atom_stereochemistry(
self, smarts, toolkit_registry=GLOBAL_TOOLKIT_REGISTRY
):
"""Delete stereochemistry information for certain atoms, if it is present.
This method can be used to "normalize" molecules imported from different cheminformatics
toolkits, which differ in which atom centers are considered stereogenic.
Parameters
----------
smarts: str or ChemicalEnvironment
Tagged SMARTS with a single atom with index 1. Any matches for this atom will have any assigned
stereocheistry information removed.
toolkit_registry : a :class:`ToolkitRegistry` or :class:`ToolkitWrapper` object, optional,
default=GLOBAL_TOOLKIT_REGISTRY
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for I/O operations
"""
from openff.toolkit.typing.chemistry.environment import AtomChemicalEnvironment
chem_env = AtomChemicalEnvironment(smarts)
matches = self.chemical_environment_matches(
chem_env, toolkit_registry=toolkit_registry
)
for match in set(matches):
atom_idx = match[0]
self.atoms[atom_idx].stereochemistry = None
####################################################################################################
# Safe serialization
####################################################################################################
def to_dict(self):
"""
Return a dictionary representation of the molecule.
.. todo ::
* Document the representation standard.
* How do we do version control with this standard?
Returns
-------
molecule_dict : OrderedDict
A dictionary representation of the molecule.
"""
from openff.toolkit.utils.utils import serialize_numpy
molecule_dict = OrderedDict()
molecule_dict["name"] = self._name
# From Jeff: If we go the properties-as-dict route, then _properties should, at
# the top level, be a dict. Should we go through recursively and ensure all values are dicts too?
molecule_dict["atoms"] = [atom.to_dict() for atom in self._atoms]
molecule_dict["bonds"] = [bond.to_dict() for bond in self._bonds]
# TODO: Charges
# TODO: Properties
# From Jeff: We could have the onerous requirement that all "properties" have to_dict() functions.
# Or we could restrict properties to simple stuff (ints, strings, floats, and the like)
# Or pickle anything unusual
# Or not allow user-defined properties at all (just use our internal _cached_properties)
# molecule_dict['properties'] = dict([(key, value._to_dict()) for key.value in self._properties])
# TODO: Assuming "simple stuff" properties right now, figure out a better standard
molecule_dict["properties"] = deepcopy(self._properties)
if hasattr(self, "_cached_properties"):
molecule_dict["cached_properties"] = deepcopy(self._cached_properties)
# TODO: Conformers
if self._conformers is None:
molecule_dict["conformers"] = None
else:
molecule_dict["conformers"] = []
molecule_dict[
"conformers_unit"
] = "angstrom" # Have this defined as a class variable?
for conf in self._conformers:
conf_unitless = conf.m_as(unit.angstrom)
conf_serialized, conf_shape = serialize_numpy((conf_unitless))
molecule_dict["conformers"].append(conf_serialized)
if self._partial_charges is None:
molecule_dict["partial_charges"] = None
molecule_dict["partial_charges_unit"] = None
else:
charges_unitless = self._partial_charges.m_as(unit.elementary_charge)
charges_serialized, charges_shape = serialize_numpy(charges_unitless)
molecule_dict["partial_charges"] = charges_serialized
molecule_dict["partial_charges_unit"] = "elementary_charge"
molecule_dict["hierarchy_schemes"] = dict()
for iter_name, hier_scheme in self._hierarchy_schemes.items():
molecule_dict["hierarchy_schemes"][iter_name] = hier_scheme.to_dict()
return molecule_dict
def __hash__(self):
"""
Returns a hash of this molecule. Used when checking molecule uniqueness in Topology creation.
Returns
-------
string
"""
return hash(self.to_smiles())
# @cached_property
def ordered_connection_table_hash(self):
"""Compute an ordered hash of the atoms and bonds in the molecule"""
if self._ordered_connection_table_hash is not None:
return self._ordered_connection_table_hash
id = ""
for atom in self.atoms:
id += f"{atom.symbol}_{atom.formal_charge}_{atom.stereochemistry}__"
for bond in self.bonds:
id += f"{bond.bond_order}_{bond.stereochemistry}_{bond.atom1_index}_{bond.atom2_index}__"
# return hash(id)
self._ordered_connection_table_hash = hash(id)
return self._ordered_connection_table_hash
@classmethod
def from_dict(cls, molecule_dict):
"""
Create a new Molecule from a dictionary representation
Parameters
----------
molecule_dict : OrderedDict
A dictionary representation of the molecule.
Returns
-------
molecule : Molecule
A Molecule created from the dictionary representation
"""
# This implementation is a compromise to let this remain as a classmethod
mol = cls()
mol._initialize_from_dict(molecule_dict)
return mol
def _initialize_from_dict(self, molecule_dict):
"""
Initialize the molecule from a dictionary representation
Parameters
----------
molecule_dict : OrderedDict
A dictionary representation of the molecule.
"""
# TODO: Provide useful exception messages if there are any failures
from openff.toolkit.utils.utils import deserialize_numpy
self._initialize()
self.name = molecule_dict["name"]
for atom_dict in molecule_dict["atoms"]:
self._add_atom(**atom_dict)
for bond_dict in molecule_dict["bonds"]:
bond_dict["atom1"] = int(bond_dict["atom1"])
bond_dict["atom2"] = int(bond_dict["atom2"])
self._add_bond(**bond_dict)
if molecule_dict["partial_charges"] is None:
self._partial_charges = None
else:
charges_shape = (self.n_atoms,)
partial_charges_unitless = deserialize_numpy(
molecule_dict["partial_charges"], charges_shape
)
pc_unit = getattr(unit, molecule_dict["partial_charges_unit"])
partial_charges = unit.Quantity(partial_charges_unitless, pc_unit)
self._partial_charges = partial_charges
if molecule_dict["conformers"] is None:
self._conformers = None
else:
self._conformers = list()
for ser_conf in molecule_dict["conformers"]:
# TODO: Update to use string_to_quantity
conformers_shape = (self.n_atoms, 3)
conformer_unitless = deserialize_numpy(ser_conf, conformers_shape)
c_unit = getattr(unit, molecule_dict["conformers_unit"])
conformer = unit.Quantity(conformer_unitless, c_unit)
self._conformers.append(conformer)
self._properties = deepcopy(molecule_dict["properties"])
for iter_name, hierarchy_scheme_dict in molecule_dict[
"hierarchy_schemes"
].items():
# It's important that we do NOT call `add_hierarchy_scheme` here, since we
# need to deserialize these HierarchyElements exactly as they were serialized,
# even if that conflicts with the current values in atom metadata.
new_hier_scheme = HierarchyScheme(
self,
tuple(hierarchy_scheme_dict["uniqueness_criteria"]),
iter_name,
)
self._hierarchy_schemes[iter_name] = new_hier_scheme
for element_dict in hierarchy_scheme_dict["hierarchy_elements"]:
new_hier_scheme.add_hierarchy_element(
tuple(element_dict["identifier"]), element_dict["atom_indices"]
)
def __repr__(self):
"""Return a summary of this molecule; SMILES if valid, Hill formula if not."""
description = f"Molecule with name '{self.name}'"
try:
smiles = self.to_smiles()
except Exception:
hill = self.to_hill_formula()
return description + f" with bad SMILES and Hill formula '{hill}'"
return description + f" and SMILES '{smiles}'"
def _initialize(self):
"""
Clear the contents of the current molecule.
"""
self._name = ""
self._atoms = list()
self._bonds = list() # List of bonds between Atom objects
self._properties = {} # Attached properties to be preserved
# self._cached_properties = None # Cached properties (such as partial charges) can be recomputed as needed
self._partial_charges = None
self._conformers = None # Optional conformers
self._hill_formula = None # Cached Hill formula
self._hierarchy_schemes = dict()
self._invalidate_cached_properties()
def _copy_initializer(self, other):
"""
Copy contents of the specified molecule
.. todo :: Should this be a ``@staticmethod`` where we have an explicit copy constructor?
Parameters
----------
other : optional
Overwrite the state of this FrozenMolecule with the specified FrozenMolecule object.
A deep copy is made.
"""
# assert isinstance(other, type(self)), "can only copy instances of {}".format(type(self))
# Run a deepcopy here so that items that were _always_ dict (like other.properties) will
# not have any references to the old molecule
other_dict = deepcopy(other.to_dict())
self._initialize_from_dict(other_dict)
def __eq__(self, other):
"""
Test two molecules for equality to see if they are the chemical species, but do not check other
annotated properties.
.. note ::
Note that this method simply tests whether two molecules are identical chemical species using equivalence of
their canonical isomeric SMILES. No effort is made to ensure that the atoms are in the same order or that
any annotated properties are preserved.
"""
# updated to use the new isomorphic checking method, with full matching
# TODO the doc string did not match the previous function what matching should this method do?
return Molecule.are_isomorphic(self, other, return_atom_map=False)[0]
def __deepcopy__(self, memo):
cls = self.__class__
return cls(self.to_dict())
def add_default_hierarchy_schemes(self, overwrite_existing=True):
"""
Adds ``chain`` and ``residue`` hierarchy schemes.
The Open Force Field Toolkit has no native understanding of hierarchical
atom organisation schemes common to other biomolecular software, such as
"residues" or "chains" (see :ref:`userguide_hierarchy`). Hierarchy
schemes allow iteration over groups of atoms according to their
metadata. For more information, see
:class:`~openff.toolkit.topology.molecule.HierarchyScheme`.
If a ``Molecule`` with the default hierarchy schemes
changes, :meth:`Molecule.update_hierarchy_schemes()` must be called before
the residues or chains are iterated over again or else the iteration may
be incorrect.
Parameters
----------
overwrite_existing : bool, default=True
Whether to overwrite existing instances of the `residue` and `chain`
hierarchy schemes. If this is ``False`` and either of the hierarchy
schemes are already defined on this molecule, an exception will be
raised.
Raises
------
HierarchySchemeWithIteratorNameAlreadyRegisteredException
When ``overwrite_existing=False`` and either the ``chains`` or
``residues`` hierarchy scheme is already configured.
See also
--------
HierarchyScheme, Molecule.add_hierarchy_scheme,
Molecule.update_hierarchy_schemes, Molecule.perceive_residues,
"""
self._add_chain_hierarchy_scheme(overwrite_existing=overwrite_existing)
self._add_residue_hierarchy_scheme(overwrite_existing=overwrite_existing)
def _add_chain_hierarchy_scheme(self, overwrite_existing=True):
"""Add ``chain`` hierarchy scheme."""
if overwrite_existing:
if "chains" in self._hierarchy_schemes.keys():
self.delete_hierarchy_scheme("chains")
self.add_hierarchy_scheme(("chain_id",), "chains")
def _add_residue_hierarchy_scheme(self, overwrite_existing=True):
"""Add ``residue`` hierarchy scheme."""
if overwrite_existing:
if "residues" in self._hierarchy_schemes.keys():
self.delete_hierarchy_scheme("residues")
self.add_hierarchy_scheme(
("chain_id", "residue_number", "insertion_code", "residue_name"), "residues"
)
def add_hierarchy_scheme(
self,
uniqueness_criteria,
iterator_name,
):
"""
Use the molecule's metadata to facilitate iteration over its atoms.
This method will add an attribute with the name given by the
``iterator_name`` argument that provides an iterator over groups of
atoms. Atoms are grouped by the values in their ``atom.metadata``
dictionary; any atoms with the same values for the keys given in the
``uniqueness_criteria`` argument will be in the same group. These groups
have the type :class:`~openff.toolkit.topology.molecule.HierarchyElement`.
Hierarchy schemes are not updated dynamically; if a ``Molecule`` with
hierarchy schemes changes, :meth:`Molecule.update_hierarchy_schemes()` must
be called before the scheme is iterated over again or else the grouping
may be incorrect.
Hierarchy schemes allow iteration over groups of atoms according to
their metadata. For more information, see
:class:`~openff.toolkit.topology.molecule.HierarchyScheme`.
Parameters
----------
uniqueness_criteria : tuple of str
The names of ``Atom`` metadata entries that define this scheme. An
atom belongs to a ``HierarchyElement`` only if its metadata has the
same values for these criteria as the other atoms in the
``HierarchyElement``.
iterator_name : str
Name of the iterator that will be exposed to access the hierarchy
elements generated by this scheme.
Returns
-------
new_hier_scheme : openff.toolkit.topology.HierarchyScheme
The newly created HierarchyScheme
See also
--------
Molecule.add_default_hierarchy_schemes, Molecule.hierarchy_schemes,
Molecule.delete_hierarchy_scheme, Molecule.update_hierarchy_schemes,
HierarchyScheme,
"""
if iterator_name in self._hierarchy_schemes:
msg = (
f'Can not add iterator with name "{iterator_name}" to this topology, as iterator '
f"name is already used by {self._hierarchy_schemes[iterator_name]}"
)
raise HierarchySchemeWithIteratorNameAlreadyRegisteredException(msg)
new_hier_scheme = HierarchyScheme(
self,
uniqueness_criteria,
iterator_name,
)
self._hierarchy_schemes[iterator_name] = new_hier_scheme
self.update_hierarchy_schemes([iterator_name])
return new_hier_scheme
@property
def hierarchy_schemes(self) -> Dict[str, "HierarchyScheme"]:
"""
The hierarchy schemes available on the molecule.
Hierarchy schemes allow iteration over groups of atoms according to
their metadata. For more information, see
:class:`~openff.toolkit.topology.molecule.HierarchyScheme`.
Returns
-------
A dict of the form {str: HierarchyScheme}
The HierarchySchemes associated with the molecule.
See also
--------
Molecule.add_hierarchy_scheme, Molecule.delete_hierarchy_scheme,
Molecule.update_hierarchy_schemes, Topology.hierarchy_iterator,
HierarchyScheme
"""
return self._hierarchy_schemes
def delete_hierarchy_scheme(self, iter_name):
"""
Remove an existing ``HierarchyScheme`` specified by its iterator name.
Hierarchy schemes allow iteration over groups of atoms according to
their metadata. For more information, see
:class:`~openff.toolkit.topology.molecule.HierarchyScheme`.
Parameters
----------
iter_name : str
See also
--------
Molecule.add_hierarchy_scheme, Molecule.update_hierarchy_schemes,
Molecule.hierarchy_schemes, HierarchyScheme
"""
if iter_name not in self._hierarchy_schemes:
raise HierarchySchemeNotFoundException(
f'Can not delete HierarchyScheme with name "{iter_name}" '
f"because no HierarchyScheme with that iterator name exists"
)
self._hierarchy_schemes.pop(iter_name)
def update_hierarchy_schemes(self, iter_names=None):
"""
Infer a hierarchy from atom metadata according to the existing hierarchy
schemes.
Hierarchy schemes allow iteration over groups of atoms according to
their metadata. For more information, see
:class:`~openff.toolkit.topology.molecule.HierarchyScheme`.
Parameters
----------
iter_names : Iterable of str, Optional
Only perceive hierarchy for HierarchySchemes that expose these
iterator names. If not provided, all known hierarchies will be
perceived, overwriting previous results if applicable.
See also
--------
Molecule.add_hierarchy_scheme, Molecule.delete_hierarchy_schemes,
Molecule.hierarchy_schemes, HierarchyScheme
"""
if iter_names is None:
iter_names = self._hierarchy_schemes.keys()
for iter_name in iter_names:
hierarchy_scheme = self._hierarchy_schemes[iter_name]
hierarchy_scheme.perceive_hierarchy()
def __getattr__(self, name: str):
"""If a requested attribute is not found, check the hierarchy schemes"""
try:
return self.__dict__["_hierarchy_schemes"][name].hierarchy_elements
except KeyError:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute {name!r}"
)
def __dir__(self):
"""Add the hierarchy scheme iterator names to dir"""
return list(self._hierarchy_schemes.keys()) + list(super().__dir__())
def to_smiles(
self,
isomeric=True,
explicit_hydrogens=True,
mapped=False,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
):
"""
Return a canonical isomeric SMILES representation of the current molecule.
A partially mapped smiles can also be generated for atoms of interest by supplying an `atom_map` to the
properties dictionary.
.. note :: RDKit and OpenEye versions will not necessarily return the same representation.
Parameters
----------
isomeric: bool optional, default= True
return an isomeric smiles
explicit_hydrogens: bool optional, default=True
return a smiles string containing all hydrogens explicitly
mapped: bool optional, default=False
return a explicit hydrogen mapped smiles, the atoms to be mapped can be controlled by supplying an
atom map into the properties dictionary. If no mapping is passed all atoms will be mapped in order, else
an atom map dictionary from the current atom index to the map id should be supplied with no duplicates.
The map ids (values) should start from 0 or 1.
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry or
openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for SMILES conversion
Returns
-------
smiles : str
Canonical isomeric explicit-hydrogen SMILES
Examples
--------
>>> from openff.toolkit.utils import get_data_file_path
>>> sdf_filepath = get_data_file_path('molecules/ethanol.sdf')
>>> molecule = Molecule(sdf_filepath)
>>> smiles = molecule.to_smiles()
"""
# Initialize cached_smiles dict for this molecule if none exists
if self._cached_smiles is None:
self._cached_smiles = {}
# Figure out which toolkit should be used to create the SMILES
if isinstance(toolkit_registry, ToolkitRegistry):
to_smiles_method = toolkit_registry.resolve("to_smiles")
elif isinstance(toolkit_registry, ToolkitWrapper):
to_smiles_method = toolkit_registry.to_smiles
else:
raise InvalidToolkitRegistryError(
"Invalid toolkit_registry passed to to_smiles. Expected ToolkitRegistry or ToolkitWrapper. "
f"Got {type(toolkit_registry)}"
)
# Get a string representation of the function containing the toolkit name so we can check
# if a SMILES was already cached for this molecule. This will return, for example
# "RDKitToolkitWrapper.to_smiles"
smiles_hash = (
to_smiles_method.__qualname__
+ str(isomeric)
+ str(explicit_hydrogens)
+ str(mapped)
)
smiles_hash += str(self._properties.get("atom_map", None))
# Check to see if a SMILES for this molecule was already cached using this method
if smiles_hash in self._cached_smiles:
return self._cached_smiles[smiles_hash]
else:
smiles = to_smiles_method(self, isomeric, explicit_hydrogens, mapped)
self._cached_smiles[smiles_hash] = smiles
return smiles
@classmethod
def from_inchi(
cls,
inchi,
allow_undefined_stereo=False,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
):
"""
Construct a Molecule from a InChI representation
Parameters
----------
inchi : str
The InChI representation of the molecule.
allow_undefined_stereo : bool, default=False
Whether to accept InChI with undefined stereochemistry. If False,
an exception will be raised if a InChI with undefined stereochemistry
is passed into this function.
toolkit_registry : openff.toolkit.utils.toolkits.ToolRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for InChI-to-molecule conversion
Returns
-------
molecule : openff.toolkit.topology.Molecule
Examples
--------
Make cis-1,2-Dichloroethene:
>>> molecule = Molecule.from_inchi('InChI=1S/C2H2Cl2/c3-1-2-4/h1-2H/b2-1-')
"""
if isinstance(toolkit_registry, ToolkitRegistry):
molecule = toolkit_registry.call(
"from_inchi",
inchi,
_cls=cls,
allow_undefined_stereo=allow_undefined_stereo,
)
elif isinstance(toolkit_registry, ToolkitWrapper):
toolkit = toolkit_registry
molecule = toolkit.from_inchi(
inchi, _cls=cls, allow_undefined_stereo=allow_undefined_stereo
)
else:
raise InvalidToolkitRegistryError(
"Invalid toolkit_registry passed to from_inchi. Expected ToolkitRegistry or ToolkitWrapper. "
f"Got {type(toolkit_registry)}"
)
return molecule
def to_inchi(self, fixed_hydrogens=False, toolkit_registry=GLOBAL_TOOLKIT_REGISTRY):
"""
Create an InChI string for the molecule using the requested toolkit backend.
InChI is a standardised representation that does not capture tautomers unless specified using the fixed
hydrogen layer.
For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/
Parameters
----------
fixed_hydrogens: bool, default=False
If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard
specific InChI string of the molecule.
toolkit_registry : openff.toolkit.utils.toolkits.ToolRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for molecule-to-InChI conversion
Returns
--------
inchi: str
The InChI string of the molecule.
Raises
-------
InvalidToolkitRegistryError
If an invalid object is passed as the toolkit_registry parameter
"""
if isinstance(toolkit_registry, ToolkitRegistry):
inchi = toolkit_registry.call(
"to_inchi", self, fixed_hydrogens=fixed_hydrogens
)
elif isinstance(toolkit_registry, ToolkitWrapper):
toolkit = toolkit_registry
inchi = toolkit.to_inchi(self, fixed_hydrogens=fixed_hydrogens)
else:
raise InvalidToolkitRegistryError(
"Invalid toolkit_registry passed to to_inchi. Expected ToolkitRegistry or ToolkitWrapper. "
f"Got {type(toolkit_registry)}"
)
return inchi
def to_inchikey(
self, fixed_hydrogens=False, toolkit_registry=GLOBAL_TOOLKIT_REGISTRY
):
"""
Create an InChIKey for the molecule using the requested toolkit backend.
InChIKey is a standardised representation that does not capture tautomers unless specified
using the fixed hydrogen layer.
For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/
Parameters
----------
fixed_hydrogens: bool, default=False
If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific
InChI string of the molecule.
toolkit_registry : openff.toolkit.utils.toolkits.ToolRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for molecule-to-InChIKey conversion
Returns
--------
inchi_key: str
The InChIKey representation of the molecule.
Raises
-------
InvalidToolkitRegistryError
If an invalid object is passed as the toolkit_registry parameter
"""
if isinstance(toolkit_registry, ToolkitRegistry):
inchi_key = toolkit_registry.call(
"to_inchikey", self, fixed_hydrogens=fixed_hydrogens
)
elif isinstance(toolkit_registry, ToolkitWrapper):
toolkit = toolkit_registry
inchi_key = toolkit.to_inchikey(self, fixed_hydrogens=fixed_hydrogens)
else:
raise InvalidToolkitRegistryError(
"Invalid toolkit_registry passed to to_inchikey. Expected ToolkitRegistry or ToolkitWrapper. "
f"Got {type(toolkit_registry)}"
)
return inchi_key
@classmethod
def from_smiles(
cls,
smiles,
hydrogens_are_explicit=False,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
allow_undefined_stereo=False,
):
"""
Construct a Molecule from a SMILES representation
Parameters
----------
smiles : str
The SMILES representation of the molecule.
hydrogens_are_explicit : bool, default = False
If False, the cheminformatics toolkit will perform hydrogen addition
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for SMILES-to-molecule conversion
allow_undefined_stereo : bool, default=False
Whether to accept SMILES with undefined stereochemistry. If False,
an exception will be raised if a SMILES with undefined stereochemistry
is passed into this function.
Returns
-------
molecule : openff.toolkit.topology.Molecule
Examples
--------
>>> molecule = Molecule.from_smiles('Cc1ccccc1')
"""
if isinstance(toolkit_registry, ToolkitRegistry):
molecule = toolkit_registry.call(
"from_smiles",
smiles,
hydrogens_are_explicit=hydrogens_are_explicit,
allow_undefined_stereo=allow_undefined_stereo,
_cls=cls,
)
elif isinstance(toolkit_registry, ToolkitWrapper):
toolkit = toolkit_registry
molecule = toolkit.from_smiles(
smiles,
hydrogens_are_explicit=hydrogens_are_explicit,
allow_undefined_stereo=allow_undefined_stereo,
_cls=cls,
)
else:
raise InvalidToolkitRegistryError(
"Invalid toolkit_registry passed to from_smiles. Expected ToolkitRegistry or ToolkitWrapper. "
f"Got {type(toolkit_registry)}"
)
return molecule
def _is_exactly_the_same_as(self, other):
for atom1, atom2 in zip(self.atoms, other.atoms):
if (
(atom1.atomic_number != atom2.atomic_number)
or (atom1.formal_charge != atom2.formal_charge)
or (atom1.is_aromatic != atom2.is_aromatic)
or (atom1.stereochemistry != atom2.stereochemistry)
):
return False
for bond1, bond2 in zip(self.bonds, other.bonds):
if (
(bond1.atom1_index != bond2.atom1_index)
or (bond1.atom2_index != bond2.atom2_index)
or (bond1.is_aromatic != bond2.is_aromatic)
or (bond1.stereochemistry != bond2.stereochemistry)
):
return False
return True
@staticmethod
def are_isomorphic(
mol1,
mol2,
return_atom_map=False,
aromatic_matching=True,
formal_charge_matching=True,
bond_order_matching=True,
atom_stereochemistry_matching=True,
bond_stereochemistry_matching=True,
strip_pyrimidal_n_atom_stereo=True,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
):
"""
Determine if ``mol1`` is isomorphic to ``mol2``.
``are_isomorphic()`` compares two molecule's graph representations and
the chosen node/edge attributes. Connections and atomic numbers are
always checked.
If nx.Graphs() are given they must at least have ``atomic_number``
attributes on nodes. Other attributes that ``are_isomorphic()`` can
optionally check...
- ... in nodes are:
- ``is_aromatic``
- ``formal_charge``
- ``stereochemistry``
- ... in edges are:
- ``is_aromatic``
- ``bond_order``
- ``stereochemistry``
By default, all attributes are checked, but stereochemistry around
pyrimidal nitrogen is ignored.
.. warning :: This API is experimental and subject to change.
Parameters
----------
mol1 : an openff.toolkit.topology.molecule.FrozenMolecule or nx.Graph()
The first molecule to test for isomorphism.
mol2 : an openff.toolkit.topology.molecule.FrozenMolecule or nx.Graph()
The second molecule to test for isomorphism.
return_atom_map: bool, default=False, optional
Return a ``dict`` containing the atomic mapping instead of a
``bool``.
aromatic_matching: bool, default=True, optional
If ``False``, aromaticity of graph nodes and edges are ignored for
the purpose of determining isomorphism.
formal_charge_matching: bool, default=True, optional
If ``False``, formal charges of graph nodes are ignored for
the purpose of determining isomorphism.
bond_order_matching: bool, default=True, optional
If ``False``, bond orders of graph edges are ignored for
the purpose of determining isomorphism.
atom_stereochemistry_matching : bool, default=True, optional
If ``False``, atoms' stereochemistry is ignored for the
purpose of determining isomorphism.
bond_stereochemistry_matching : bool, default=True, optional
If ``False``, bonds' stereochemistry is ignored for the
purpose of determining isomorphism.
strip_pyrimidal_n_atom_stereo: bool, default=True, optional
If ``True``, any stereochemistry defined around pyrimidal
nitrogen stereocenters will be disregarded in the isomorphism
check.
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for
removing stereochemistry from pyrimidal nitrogens.
Returns
-------
molecules_are_isomorphic : bool
atom_map : default=None, Optional,
[Dict[int,int]] ordered by mol1 indexing {mol1_index: mol2_index}
If molecules are not isomorphic given input arguments, will return None instead of dict.
"""
def _object_to_n_atoms(obj):
import networkx as nx
if isinstance(obj, FrozenMolecule):
return obj.n_atoms
elif isinstance(obj, nx.Graph):
return obj.number_of_nodes()
else:
raise TypeError(
"are_isomorphic accepts a NetworkX Graph or OpenFF "
+ f"(Frozen)Molecule, not {type(obj)}"
)
# Quick number of atoms check. Important for large molecules
if _object_to_n_atoms(mol1) != _object_to_n_atoms(mol2):
return False, None
# If the number of atoms match, check the Hill formula
if Molecule._object_to_hill_formula(mol1) != Molecule._object_to_hill_formula(
mol2
):
return False, None
# Do a quick check to see whether the inputs are totally identical (including being in the same atom order)
if isinstance(mol1, FrozenMolecule) and isinstance(mol2, FrozenMolecule):
if mol1._is_exactly_the_same_as(mol2):
return True, {i: i for i in range(mol1.n_atoms)}
# Build the user defined matching functions
def node_match_func(x, y):
# always match by atleast atomic number
is_equal = x["atomic_number"] == y["atomic_number"]
if aromatic_matching:
is_equal &= x["is_aromatic"] == y["is_aromatic"]
if formal_charge_matching:
is_equal &= x["formal_charge"] == y["formal_charge"]
if atom_stereochemistry_matching:
is_equal &= x["stereochemistry"] == y["stereochemistry"]
return is_equal
# check if we want to do any bond matching if not the function is None
if aromatic_matching or bond_order_matching or bond_stereochemistry_matching:
def edge_match_func(x, y):
# We don't need to check the exact bond order (which is 1 or 2)
# if the bond is aromatic. This way we avoid missing a match only
# if the alternate bond orders 1 and 2 are assigned differently.
if aromatic_matching and bond_order_matching:
is_equal = (x["is_aromatic"] == y["is_aromatic"]) or (
x["bond_order"] == y["bond_order"]
)
elif aromatic_matching:
is_equal = x["is_aromatic"] == y["is_aromatic"]
elif bond_order_matching:
is_equal = x["bond_order"] == y["bond_order"]
else:
is_equal = None
if bond_stereochemistry_matching:
if is_equal is None:
is_equal = x["stereochemistry"] == y["stereochemistry"]
else:
is_equal &= x["stereochemistry"] == y["stereochemistry"]
return is_equal
else:
edge_match_func = None
# Here we should work out what data type we have, also deal with lists?
def to_networkx(data):
"""For the given data type, return the networkx graph"""
import networkx as nx
if strip_pyrimidal_n_atom_stereo:
SMARTS = "[N+0X3:1](-[*])(-[*])(-[*])"
if isinstance(data, FrozenMolecule):
# Molecule class instance
if strip_pyrimidal_n_atom_stereo:
# Make a copy of the molecule so we don't modify the original
data = deepcopy(data)
data.strip_atom_stereochemistry(
SMARTS, toolkit_registry=toolkit_registry
)
return data.to_networkx()
elif isinstance(data, nx.Graph):
return data
else:
raise NotImplementedError(
f"The input type {type(data)} is not supported,"
f"please supply an openff.toolkit.topology.molecule.Molecule "
f"or networkx.Graph representation of the molecule."
)
mol1_netx = to_networkx(mol1)
mol2_netx = to_networkx(mol2)
from networkx.algorithms.isomorphism import GraphMatcher # type: ignore
GM = GraphMatcher(
mol1_netx, mol2_netx, node_match=node_match_func, edge_match=edge_match_func
)
isomorphic = GM.is_isomorphic()
if isomorphic and return_atom_map:
topology_atom_map = GM.mapping
# reorder the mapping by keys
sorted_mapping = {}
for key in sorted(topology_atom_map.keys()):
sorted_mapping[key] = topology_atom_map[key]
return isomorphic, sorted_mapping
else:
return isomorphic, None
def is_isomorphic_with(self, other, **kwargs):
"""
Check if the molecule is isomorphic with the other molecule which can be an openff.toolkit.topology.Molecule
or nx.Graph(). Full matching is done using the options described bellow.
.. warning :: This API is experimental and subject to change.
Parameters
----------
other: openff.toolkit.topology.Molecule or nx.Graph()
aromatic_matching: bool, default=True, optional
compare the aromatic attributes of bonds and atoms.
formal_charge_matching: bool, default=True, optional
compare the formal charges attributes of the atoms.
bond_order_matching: bool, deafult=True, optional
compare the bond order on attributes of the bonds.
atom_stereochemistry_matching : bool, default=True, optional
If ``False``, atoms' stereochemistry is ignored for the
purpose of determining equality.
bond_stereochemistry_matching : bool, default=True, optional
If ``False``, bonds' stereochemistry is ignored for the
purpose of determining equality.
strip_pyrimidal_n_atom_stereo: bool, default=True, optional
If ``True``, any stereochemistry defined around pyrimidal
nitrogen stereocenters will be disregarded in the isomorphism
check.
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for
removing stereochemistry from pyrimidal nitrogens.
Returns
-------
isomorphic : bool
"""
return Molecule.are_isomorphic(
self,
other,
return_atom_map=False,
aromatic_matching=kwargs.get("aromatic_matching", True),
formal_charge_matching=kwargs.get("formal_charge_matching", True),
bond_order_matching=kwargs.get("bond_order_matching", True),
atom_stereochemistry_matching=kwargs.get(
"atom_stereochemistry_matching", True
),
bond_stereochemistry_matching=kwargs.get(
"bond_stereochemistry_matching", True
),
strip_pyrimidal_n_atom_stereo=kwargs.get(
"strip_pyrimidal_n_atom_stereo", True
),
toolkit_registry=kwargs.get("toolkit_registry", GLOBAL_TOOLKIT_REGISTRY),
)[0]
def generate_conformers(
self,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
n_conformers=10,
rms_cutoff=None,
clear_existing=True,
make_carboxylic_acids_cis=True,
):
"""
Generate conformers for this molecule using an underlying toolkit.
If ``n_conformers=0``, no toolkit wrapper will be called. If ``n_conformers=0``
and ``clear_existing=True``, ``molecule.conformers`` will be set to ``None``.
Parameters
----------
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry or
openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for SMILES-to-molecule conversion
n_conformers : int, default=1
The maximum number of conformers to produce
rms_cutoff : openmm.unit.Quantity-wrapped float, in units of distance, optional, default=None
The minimum RMS value at which two conformers are considered redundant and one is deleted. Precise
implementation of this cutoff may be toolkit-dependent. If ``None``, the cutoff is set to be the
default value for each ``ToolkitWrapper`` (generally 1 Angstrom).
clear_existing : bool, default=True
Whether to overwrite existing conformers for the molecule
make_carboxylic_acids_cis: bool, default=True
Guarantee all conformers have exclusively cis carboxylic acid groups (COOH)
by rotating the proton in any trans carboxylic acids 180 degrees around the
C-O bond. Works around a bug in conformer generation by the OpenEye toolkit
where trans COOH is much more common than it should be.
Examples
--------
>>> molecule = Molecule.from_smiles('CCCCCC')
>>> molecule.generate_conformers()
Raises
------
InvalidToolkitRegistryError
If an invalid object is passed as the toolkit_registry parameter
"""
# If no conformers are requested, do not call to a ToolkitWrapper at all
if n_conformers == 0:
if clear_existing:
self._conformers = None
return
if isinstance(toolkit_registry, ToolkitRegistry):
return toolkit_registry.call(
"generate_conformers",
self,
n_conformers=n_conformers,
rms_cutoff=rms_cutoff,
clear_existing=clear_existing,
raise_exception_types=[],
make_carboxylic_acids_cis=make_carboxylic_acids_cis,
)
elif isinstance(toolkit_registry, ToolkitWrapper):
toolkit = toolkit_registry
return toolkit.generate_conformers(
self,
n_conformers=n_conformers,
rms_cutoff=rms_cutoff,
clear_existing=clear_existing,
make_carboxylic_acids_cis=make_carboxylic_acids_cis,
)
else:
raise InvalidToolkitRegistryError(
"Invalid toolkit_registry passed to generate_conformers. Expected ToolkitRegistry or ToolkitWrapper. "
f"Got {type(toolkit_registry)}"
)
def _make_carboxylic_acids_cis(self, toolkit_registry=GLOBAL_TOOLKIT_REGISTRY):
"""
Rotate dihedral angle of any conformers with trans COOH groups so they are cis
Carboxylic acid groups almost always exist in nature in the cis conformation,
with the hydrogen atom in between the two oxygen atoms::
O----H
/
/
/
--C
\\
\\
O
However, the OpenEye toolkit frequently produces carboxylic acid geometries
in the unrealistic trans conformation::
H----O
/
/
/
--C
\\
\\
O
This method converts all conformers in the molecule with the trans conformation
into the corresponding cis conformer by rotating the OH bond around the CO bond
by 180 degrees. Carboxylic acids that are already cis are unchanged. Carboxylic
acid groups are considered cis if their O-C-O-H dihedral angle is acute.
Parameters
----------
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for SMILES-to-molecule conversion
"""
# Return early if there are no conformers
if not self._conformers:
return
# Convert all conformers into one big array
conformers = np.asarray([q.m_as(unit.angstrom) for q in self._conformers])
# Scan the molecule for carboxylic acids
cooh_indices = self.chemical_environment_matches(
"[C:2]([O:3][H:4])=[O:1]", toolkit_registry=toolkit_registry
)
n_conformers, n_cooh_groups = len(conformers), len(cooh_indices)
# Exit early if there are no carboxylic acids
if not n_cooh_groups:
return
# Pull out the coordinates of all carboxylic acid groups into cooh_xyz
cooh_xyz = conformers[:, cooh_indices, :]
assert cooh_xyz.shape == (n_conformers, n_cooh_groups, 4, 3)
def dot(a, b):
"""Compute dot product along last axis of arrays"""
return np.sum(a * b, axis=-1)[..., np.newaxis]
def norm(a):
"""Compute norm along last axis of array"""
return np.linalg.norm(a, axis=-1)[..., np.newaxis]
def dihedral(a):
"""Compute dihedrals of array with shape (..., 4, 3)"""
# Praxeolitic formula
# 1 sqrt, 1 cross product
# from https://stackoverflow.com/q/20305272
p0 = a[..., 0, :]
p1 = a[..., 1, :]
p2 = a[..., 2, :]
p3 = a[..., 3, :]
b0 = -1.0 * (p1 - p0)
b1 = p2 - p1
b2 = p3 - p2
# normalize b1 so that it does not influence magnitude of vector
# rejections that come next
b1 /= norm(b1)
# vector rejections
# v = projection of b0 onto plane perpendicular to b1
# = b0 minus component that aligns with b1
# w = projection of b2 onto plane perpendicular to b1
# = b2 minus component that aligns with b1
v = b0 - dot(b0, b1) * b1
w = b2 - dot(b2, b1) * b1
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = dot(v, w)
y = dot(np.cross(b1, v), w)
return np.arctan2(y, x)
dihedrals = dihedral(cooh_xyz)
assert dihedrals.shape == (n_conformers, n_cooh_groups, 1)
dihedrals.shape = (n_conformers, n_cooh_groups, 1, 1)
# Get indices of trans COOH groups
trans_indices = np.logical_not(
np.logical_and((-np.pi / 2) < dihedrals, dihedrals < (np.pi / 2))
)
# Expand array so it can be used to index cooh_xyz
trans_indices = np.repeat(trans_indices, repeats=4, axis=2)
trans_indices = np.repeat(trans_indices, repeats=3, axis=3)
# Get indices of individual atoms in trans COOH groups (except terminal O)
trans_indices_h = trans_indices.copy()
trans_indices_h[:, :, (0, 1, 2), :] = False
trans_indices_c = trans_indices.copy()
trans_indices_c[:, :, (0, 2, 3), :] = False
trans_indices_o = trans_indices.copy()
trans_indices_o[:, :, (0, 1, 3), :] = False
# Rotate OH around CO bond
# We want to rotate H 180 degrees around the CO bond (b1)
c = cooh_xyz[trans_indices_c].reshape(-1, 3)
o = cooh_xyz[trans_indices_o].reshape(-1, 3)
h = cooh_xyz[trans_indices_h].reshape(-1, 3)
# Axis is defined as the line from the origin along a unit vector, so
# move C to the origin and normalize
point = h - c
axis = o - c
axis /= norm(axis)
# Do the rotation
# https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle
rotated = axis * (dot(axis, point)) - np.cross(np.cross(axis, point), axis)
# Move rotated point back to original coordinates
rotated = rotated + c
# Update the coordinates
cooh_xyz[trans_indices_h] = rotated.reshape((-1))
# Update conformers with rotated coordinates
conformers[:, cooh_indices, :] = cooh_xyz
# Return conformers to original type
self._conformers = [unit.Quantity(conf, unit.angstrom) for conf in conformers]
def apply_elf_conformer_selection(
self,
percentage: float = 2.0,
limit: int = 10,
toolkit_registry: Optional[
Union[ToolkitRegistry, ToolkitWrapper]
] = GLOBAL_TOOLKIT_REGISTRY,
**kwargs,
):
"""Select a set of diverse conformers from the molecule's conformers with ELF.
Applies the `Electrostatically Least-interacting Functional groups method
<https://docs.eyesopen.com/toolkits/python/quacpactk/molchargetheory.html#elf-conformer-selection>`_
to select a set of diverse conformers which have minimal
electrostatically strongly interacting functional groups from the
molecule's conformers.
Parameters
----------
toolkit_registry
The underlying toolkit to use to select the ELF conformers.
percentage
The percentage of conformers with the lowest electrostatic
interaction energies to greedily select from.
limit
The maximum number of conformers to select.
Notes
-----
* The input molecule should have a large set of conformers already
generated to select the ELF conformers from.
* The selected conformers will be retained in the `conformers` list
while unselected conformers will be discarded.
See Also
--------
openff.toolkit.utils.toolkits.OpenEyeToolkitWrapper.apply_elf_conformer_selection
openff.toolkit.utils.toolkits.RDKitToolkitWrapper.apply_elf_conformer_selection
"""
if isinstance(toolkit_registry, ToolkitRegistry):
toolkit_registry.call(
"apply_elf_conformer_selection",
molecule=self,
percentage=percentage,
limit=limit,
**kwargs,
)
elif isinstance(toolkit_registry, ToolkitWrapper):
toolkit = toolkit_registry
toolkit.apply_elf_conformer_selection( # type: ignore[attr-defined]
molecule=self, percentage=percentage, limit=limit, **kwargs
)
else:
raise InvalidToolkitRegistryError(
f"Invalid toolkit_registry passed to apply_elf_conformer_selection."
f"Expected ToolkitRegistry or ToolkitWrapper. Got "
f"{type(toolkit_registry)}"
)
def compute_partial_charges_am1bcc(
self,
use_conformers=None,
strict_n_conformers=False,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
):
"""
.. deprecated:: 0.11.0
This method was deprecated in v0.11.0 and will soon be removed.
Use :py:meth:`assign_partial_charges(partial_charge_method='am1bcc')
<Molecule.assign_partial_charges>` instead.
Calculate partial atomic charges for this molecule using AM1-BCC run by an underlying toolkit
and assign them to this molecule's ``partial_charges`` attribute.
Parameters
----------
strict_n_conformers : bool, default=False
Whether to raise an exception if an invalid number of conformers is provided for the given charge method.
If this is False and an invalid number of conformers is found, a warning will be raised.
use_conformers : iterable of openmm.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3)
and dimension of distance. Optional, default=None Coordinates to use for partial charge calculation.
If None, an appropriate number of conformers for the given charge method will be generated.
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for the calculation
Examples
--------
>>> molecule = Molecule.from_smiles('CCCCCC')
>>> molecule.generate_conformers()
>>> molecule.compute_partial_charges_am1bcc()
Raises
------
InvalidToolkitRegistryError
If an invalid object is passed as the toolkit_registry parameter
"""
# TODO: Remove in version 0.12.0
warnings.warn(
"compute_partial_charges_am1bcc is deprecated and will be removed in version 0.12.0. "
"Use assign_partial_charges(partial_charge_method='am1bcc') instead.",
UserWarning,
)
self.assign_partial_charges(
partial_charge_method="am1bcc",
use_conformers=use_conformers,
strict_n_conformers=strict_n_conformers,
toolkit_registry=toolkit_registry,
)
def assign_partial_charges(
self,
partial_charge_method: str,
strict_n_conformers=False,
use_conformers=None,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
normalize_partial_charges=True,
):
"""
Calculate partial atomic charges and store them in the molecule.
``assign_partial_charges`` computes charges using the specified toolkit
and assigns the new values to the ``partial_charges`` attribute.
Supported charge methods vary from toolkit to toolkit, but some
supported methods are:
- ``"am1bcc"``
- ``"am1bccelf10"`` (requires OpenEye Toolkits)
- ``"am1-mulliken"``
- ``"mmff94"``
- ``"gasteiger"``
For more supported charge methods and details, see the corresponding
methods in each toolkit wrapper:
- :meth:`OpenEyeToolkitWrapper.assign_partial_charges \
<openff.toolkit.utils.toolkits.OpenEyeToolkitWrapper.assign_partial_charges>`
- :meth:`RDKitToolkitWrapper.assign_partial_charges \
<openff.toolkit.utils.toolkits.RDKitToolkitWrapper.assign_partial_charges>`
- :meth:`AmberToolsToolkitWrapper.assign_partial_charges \
<openff.toolkit.utils.toolkits.AmberToolsToolkitWrapper.assign_partial_charges>`
- :meth:`BuiltInToolkitWrapper.assign_partial_charges \
<openff.toolkit.utils.toolkits.BuiltInToolkitWrapper.assign_partial_charges>`
Parameters
----------
partial_charge_method : string
The partial charge calculation method to use for partial charge
calculation.
strict_n_conformers : bool, default=False
Whether to raise an exception if an invalid number of conformers is
provided for the given charge method. If this is False and an
invalid number of conformers is found, a warning will be raised.
use_conformers : iterable of openmm.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and
dimension of distance. Optional, default=None
Coordinates to use for partial charge calculation. If None, an
appropriate number of conformers will be generated.
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry or
openff.toolkit.utils.toolkits.ToolkitWrapper,
optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for the
calculation.
normalize_partial_charges : bool, default=True
Whether to offset partial charges so that they sum to the total
formal charge of the molecule. This is used to prevent accumulation
of rounding errors when the partial charge assignment method returns
values at limited precision.
Examples
--------
>>> molecule = Molecule.from_smiles('CCCCCC')
>>> molecule.assign_partial_charges('am1-mulliken')
Raises
------
InvalidToolkitRegistryError
If an invalid object is passed as the toolkit_registry parameter
See Also
--------
openff.toolkit.utils.toolkits.OpenEyeToolkitWrapper.assign_partial_charges
openff.toolkit.utils.toolkits.RDKitToolkitWrapper.assign_partial_charges
openff.toolkit.utils.toolkits.AmberToolsToolkitWrapper.assign_partial_charges
openff.toolkit.utils.toolkits.BuiltInToolkitWrapper.assign_partial_charges
"""
# Raise a warning when users try to apply these charge methods to "large" molecules
WARN_LARGE_MOLECULES: Set[str] = {
"am1bcc",
"am1bccelf10",
"am1-mulliken",
"am1bccnosymspt",
"am1elf10",
}
if partial_charge_method in WARN_LARGE_MOLECULES:
if self.n_atoms > 150:
warnings.warn(
f"Warning! Partial charge method '{partial_charge_method}' is not designed "
"for use on large (i.e. > 150 atoms) molecules and may crash or take hours to "
f"run on this molecule (found {self.n_atoms} atoms). For more, see "
"https://docs.openforcefield.org/projects/toolkit/en/stable/faq.html"
"#parameterizing-my-system-which-contains-a-large-molecule-is-taking-forever-whats-wrong",
)
if isinstance(toolkit_registry, ToolkitRegistry):
# We may need to try several toolkitwrappers to find one
# that supports the desired partial charge method, so we
# tell the ToolkitRegistry to continue trying ToolkitWrappers
# if one raises an error (raise_exception_types=[])
toolkit_registry.call(
"assign_partial_charges",
molecule=self,
partial_charge_method=partial_charge_method,
use_conformers=use_conformers,
strict_n_conformers=strict_n_conformers,
normalize_partial_charges=normalize_partial_charges,
raise_exception_types=[],
_cls=self.__class__,
)
elif isinstance(toolkit_registry, ToolkitWrapper):
toolkit_wrapper: ToolkitWrapper = toolkit_registry
toolkit_wrapper.assign_partial_charges( # type: ignore[attr-defined]
self,
partial_charge_method=partial_charge_method,
use_conformers=use_conformers,
strict_n_conformers=strict_n_conformers,
normalize_partial_charges=normalize_partial_charges,
_cls=self.__class__,
)
else:
raise InvalidToolkitRegistryError(
f"Invalid toolkit_registry passed to assign_partial_charges."
f"Expected ToolkitRegistry or ToolkitWrapper. Got {type(toolkit_registry)}"
)
def _normalize_partial_charges(self):
"""
Add offsets to each partial charge to ensure that they sum to the formal charge of the molecule,
to the limit of a python float's precision. Modifies the partial charges in-place.
"""
expected_charge = self.total_charge
current_charge = 0.0 * unit.elementary_charge
for pc in self.partial_charges:
current_charge += pc
charge_offset = (expected_charge - current_charge) / self.n_atoms
self.partial_charges += charge_offset
def assign_fractional_bond_orders(
self,
bond_order_model=None,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
use_conformers=None,
):
"""
Update and store list of bond orders this molecule.
Bond orders are stored on each bond, in the
``bond.fractional_bond_order`` attribute.
.. warning :: This API is experimental and subject to change.
Parameters
----------
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry or
openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for SMILES-to-molecule conversion
bond_order_model : string, optional. Default=None
The bond order model to use for fractional bond order calculation. If ``None``, ``"am1-wiberg"`` is used.
use_conformers : iterable of openmm.unit.Quantity(np.array) with shape (n_atoms, 3) and dimension of distance,
optional, default=None
The conformers to use for fractional bond order calculation. If ``None``, an appropriate number
of conformers will be generated by an available ``ToolkitWrapper``.
Examples
--------
>>> from openff.toolkit import Molecule
>>> molecule = Molecule.from_smiles('CCCCCC')
>>> molecule.assign_fractional_bond_orders()
Raises
------
InvalidToolkitRegistryError
If an invalid object is passed as the toolkit_registry parameter
"""
if isinstance(toolkit_registry, ToolkitRegistry):
return toolkit_registry.call(
"assign_fractional_bond_orders",
self,
bond_order_model=bond_order_model,
use_conformers=use_conformers,
)
elif isinstance(toolkit_registry, ToolkitWrapper):
toolkit = toolkit_registry
return toolkit.assign_fractional_bond_orders(
self, bond_order_model=bond_order_model, use_conformers=use_conformers
)
else:
raise InvalidToolkitRegistryError(
f"Invalid toolkit_registry passed to assign_fractional_bond_orders. "
f"Expected ToolkitRegistry or ToolkitWrapper. Got {type(toolkit_registry)}."
)
def _invalidate_cached_properties(self):
"""
Indicate that the chemical entity has been altered.
"""
# if hasattr(self, '_cached_properties'):
# delattr(self, '_cached_properties')
self._conformers = None
self._partial_charges = None
self._propers = None
self._impropers = None
self._hill_formula = None
self._cached_smiles = None
# TODO: Clear fractional bond orders
self._ordered_connection_table_hash = None
for atom in self.atoms:
if "_molecule_atom_index" in atom.__dict__:
del atom.__dict__["_molecule_atom_index"]
def to_networkx(self):
"""Generate a NetworkX undirected graph from the molecule.
Nodes are Atoms labeled with atom indices and atomic elements (via the ``element`` node atrribute).
Edges denote chemical bonds between Atoms.
.. todo ::
* Do we need a ``from_networkx()`` method? If so, what would the Graph be required to provide?
* Should edges be labeled with discrete bond types in some aromaticity model?
* Should edges be labeled with fractional bond order if a method is specified?
* Should we add other per-atom and per-bond properties (e.g. partial charges) if present?
* Can this encode bond/atom chirality?
Returns
-------
graph : networkx.Graph
The resulting graph, with nodes (atoms) labeled with atom indices, elements, stereochemistry and
aromaticity flags and bonds with two atom indices, bond order, stereochemistry, and aromaticity flags
Examples
--------
Retrieve the bond graph for imatinib (OpenEye toolkit required)
>>> molecule = Molecule.from_iupac('imatinib')
>>> nxgraph = molecule.to_networkx()
"""
import networkx as nx
G = nx.Graph()
for atom in self.atoms:
G.add_node(
atom.molecule_atom_index,
atomic_number=atom.atomic_number,
is_aromatic=atom.is_aromatic,
stereochemistry=atom.stereochemistry,
formal_charge=atom.formal_charge,
)
# G.add_node(atom.molecule_atom_index, attr_dict={'atomic_number': atom.atomic_number})
for bond in self.bonds:
G.add_edge(
bond.atom1_index,
bond.atom2_index,
bond_order=bond.bond_order,
is_aromatic=bond.is_aromatic,
stereochemistry=bond.stereochemistry,
)
# G.add_edge(bond.atom1_index, bond.atom2_index, attr_dict={'order':bond.bond_order})
return G
def find_rotatable_bonds(
self, ignore_functional_groups=None, toolkit_registry=GLOBAL_TOOLKIT_REGISTRY
):
"""
Find all bonds classed as rotatable ignoring any matched to the ``ignore_functional_groups`` list.
Parameters
----------
ignore_functional_groups: optional, List[str], default=None,
A list of bond SMARTS patterns to be ignored when finding rotatable bonds.
toolkit_registry: openff.toolkit.utils.toolkits.ToolkitRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapperl, optional, default=None
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for SMARTS matching
Returns
-------
bonds: List[openff.toolkit.topology.molecule.Bond]
The list of openff.toolkit.topology.molecule.Bond instances which are rotatable.
"""
# general rotatable bond smarts taken from RDKit
# https://github.com/rdkit/rdkit/blob/1bf6ef3d65f5c7b06b56862b3fb9116a3839b229/rdkit/Chem/Lipinski.py#L47%3E
rotatable_bond_smarts = "[!$(*#*)&!D1:1]-&!@[!$(*#*)&!D1:2]"
# get all of the general matches
general_matches = self.chemical_environment_matches(
query=rotatable_bond_smarts, toolkit_registry=toolkit_registry
)
# this will give all forwards and backwards matches, so condense them down with this function
def condense_matches(matches):
condensed_matches = set()
for m in matches:
condensed_matches.add(tuple(sorted(m)))
return condensed_matches
general_bonds = condense_matches(general_matches)
# now refine the list using the ignore groups
if ignore_functional_groups is not None:
matches_to_ignore = set()
# make ignore_functional_groups an iterable object
if isinstance(ignore_functional_groups, str):
ignore_functional_groups = [ignore_functional_groups]
else:
try:
iter(ignore_functional_groups)
except TypeError:
ignore_functional_groups = [ignore_functional_groups]
# find the functional groups to remove
for functional_group in ignore_functional_groups:
# note I run the searches through this function so they have to be SMIRKS?
ignore_matches = self.chemical_environment_matches(
query=functional_group, toolkit_registry=toolkit_registry
)
ignore_matches = condense_matches(ignore_matches)
# add the new matches to the matches to ignore
matches_to_ignore.update(ignore_matches)
# now remove all the matches
for match in matches_to_ignore:
try:
general_bonds.remove(match)
# if the key is not in the list, the ignore pattern was not valid
except KeyError:
continue
# gather a list of bond instances to return
rotatable_bonds = [self.get_bond_between(*bond) for bond in general_bonds]
return rotatable_bonds
def _add_atom(
self,
atomic_number,
formal_charge,
is_aromatic,
stereochemistry=None,
name=None,
metadata=None,
invalidate_cache: bool = True,
):
"""
Add an atom
Parameters
----------
atomic_number : int
Atomic number of the atom
formal_charge : int
Formal charge of the atom
is_aromatic : bool
If True, atom is aromatic; if False, not aromatic
stereochemistry : str, optional, default=None
Either 'R' or 'S' for specified stereochemistry, or None if stereochemistry is irrelevant
name : str, optional, default=None
An optional name for the atom
metadata : dict[str: (int, str)], default=None
An optional dictionary where keys are strings and values are strings or ints. This is intended
to record atom-level information used to inform hierarchy definition and iteration, such as
grouping atom by residue and chain.
invalidate_cache : bool, default=True
Whether or not to invalidate the cache of the molecule upon the addition of this atom. This should
be left to its default value (`True`) for safety.
Returns
-------
index : int
The index of the atom in the molecule
Examples
--------
Define a methane molecule
>>> molecule = Molecule()
>>> molecule.name = 'methane'
>>> C = molecule.add_atom(6, 0, False)
>>> H1 = molecule.add_atom(1, 0, False)
>>> H2 = molecule.add_atom(1, 0, False)
>>> H3 = molecule.add_atom(1, 0, False)
>>> H4 = molecule.add_atom(1, 0, False)
>>> bond_idx = molecule.add_bond(C, H1, False, 1)
>>> bond_idx = molecule.add_bond(C, H2, False, 1)
>>> bond_idx = molecule.add_bond(C, H3, False, 1)
>>> bond_idx = molecule.add_bond(C, H4, False, 1)
"""
# Create an atom
atom = Atom(
atomic_number,
formal_charge,
is_aromatic,
stereochemistry=stereochemistry,
name=name,
metadata=metadata,
molecule=self,
)
self._atoms.append(atom)
if invalidate_cache:
self._invalidate_cached_properties()
# Since we just appended it, we can just return the length - 1
return len(self._atoms) - 1
def _add_bond(
self,
atom1,
atom2,
bond_order,
is_aromatic,
stereochemistry=None,
fractional_bond_order=None,
invalidate_cache: bool = True,
):
"""
Add a bond between two specified atom indices
Parameters
----------
atom1 : int or openff.toolkit.topology.molecule.Atom
Index of first atom or first atom
atom2_index : int or openff.toolkit.topology.molecule.Atom
Index of second atom or second atom
bond_order : int
Integral bond order of Kekulized form
is_aromatic : bool
True if this bond is aromatic, False otherwise
stereochemistry : str, optional, default=None
Either 'E' or 'Z' for specified stereochemistry, or None if stereochemistry is irrelevant
fractional_bond_order : float, optional, default=None
The fractional (eg. Wiberg) bond order
invalidate_cache : bool, default=True
Whether or not to invalidate the cache of the molecule upon the addition of this atom. This should
be left to its default value (`True`) for safety.
Returns
-------
index : int
The index of the bond in the molecule
"""
if isinstance(atom1, int) and isinstance(atom2, int):
atom1_atom = self.atoms[atom1]
atom2_atom = self.atoms[atom2]
elif isinstance(atom1, Atom) and isinstance(atom2, Atom):
atom1_atom = atom1
atom2_atom = atom2
else:
raise Exception(
"Invalid inputs to molecule._add_bond. Expected ints or Atoms. "
f"Received {atom1} (type {type(atom1)}) and {atom2} (type {type(atom2)}) "
)
# TODO: Check to make sure bond does not already exist
if atom1_atom.is_bonded_to(atom2_atom):
raise Exception(
f"Bond already exists between {atom1_atom} and {atom2_atom})"
)
bond = Bond(
atom1_atom,
atom2_atom,
bond_order,
is_aromatic,
stereochemistry=stereochemistry,
fractional_bond_order=fractional_bond_order,
)
self._bonds.append(bond)
if invalidate_cache:
self._invalidate_cached_properties()
# Since we just appended it, we can just return the length - 1
return len(self._bonds) - 1
def _add_conformer(self, coordinates):
"""
Add a conformation of the molecule
Parameters
----------
coordinates: openmm.unit.Quantity(np.array) with shape (n_atoms, 3) and dimension of distance
Coordinates of the new conformer, with the first dimension of the array corresponding to the atom index in
the molecule's indexing system.
Returns
-------
index: int
The index of this conformer
"""
if coordinates.shape != (self.n_atoms, 3):
raise InvalidConformerError(
"molecule.add_conformer given input of the wrong shape: "
f"Given {coordinates.shape}, expected {(self.n_atoms, 3)}"
)
if isinstance(coordinates, unit.Quantity):
if not coordinates.units.is_compatible_with(unit.angstrom):
raise IncompatibleUnitError(
"Coordinates passed to Molecule._add_conformer with incompatible units. "
"Ensure that units are dimension of length."
)
elif hasattr(coordinates, "unit"):
from openff.units.openmm import from_openmm
from openmm import unit as openmm_unit
if not isinstance(coordinates, openmm_unit.Quantity):
raise IncompatibleUnitError(
"Unsupported type passed to Molecule._add_conformer setter. "
"Found object of type {type(other)}."
)
if not coordinates.unit.is_compatible(openmm_unit.meter):
raise IncompatibleUnitError(
"Coordinates passed to Molecule._add_conformer with units of incompatible dimensionality. "
f"Adding conformers with OpenMM-style units is supported, by found units of {coordinates.unit}. "
"Ensure that units are dimension of length."
)
coordinates = from_openmm(coordinates)
else:
raise IncompatibleUnitError(
"Unknown object passed to Molecule._add_conformer. Expected types include "
f"openmm.unit.Quantity and openff.units.unit.Quantity, found type {type(coordinates)}."
)
tmp_conf = unit.Quantity(
np.zeros(shape=(self.n_atoms, 3), dtype=float), unit.angstrom
)
try:
tmp_conf[:] = coordinates
except AttributeError as e:
print(e)
if self._conformers is None:
# TODO should we checking that the exact same conformer is not in the list already?
self._conformers = []
self._conformers.append(tmp_conf)
return len(self._conformers)
@property
def partial_charges(self):
"""
Returns the partial charges (if present) on the molecule.
Returns
-------
partial_charges : a openmm.unit.Quantity - wrapped numpy array [1 x n_atoms] or None
The partial charges on the molecule's atoms. Returns None if no charges have been specified.
"""
return self._partial_charges
@partial_charges.setter
def partial_charges(self, charges):
"""
Set the atomic partial charges for this molecule.
Parameters
----------
charges : None or a openmm.unit.Quantity - wrapped numpy array [1 x n_atoms]
The partial charges to assign to the molecule. If not None, must be in units compatible with
openmm.unit.elementary_charge
"""
if charges is None:
self._partial_charges = None
elif charges.shape == (self.n_atoms,):
if isinstance(charges, unit.Quantity):
if charges.units in unit.elementary_charge.compatible_units():
self._partial_charges = charges
if hasattr(charges, "unit"):
from openmm import unit as openmm_unit
if not isinstance(charges, openmm_unit.Quantity):
raise IncompatibleUnitError(
"Unsupported type passed to partial_charges setter. "
"Found object of type {type(charges)}."
)
elif isinstance(charges, openmm_unit.Quantity):
from openff.units.openmm import from_openmm
converted = from_openmm(charges)
if converted.units in unit.elementary_charge.compatible_units():
self._partial_charges = converted
@property
def n_particles(self) -> int:
"""
.. deprecated:: 0.11.0
This property has been deprecated and will soon be removed. Use
:meth:`Molecule.n_atoms` instead.
..
"""
_molecule_deprecation("n_particles", "n_atoms")
return self.n_atoms
@property
def n_atoms(self) -> int:
"""
The number of Atom objects.
"""
return len(self._atoms)
@property
def n_bonds(self):
"""
The number of Bond objects in the molecule.
"""
return sum([1 for bond in self.bonds])
@property
def n_angles(self) -> int:
"""Number of angles in the molecule."""
self._construct_angles()
return len(self._angles)
@property
def n_propers(self) -> int:
"""Number of proper torsions in the molecule."""
self._construct_torsions()
assert (
self._propers is not None
), "_construct_torsions always sets _propers to a set"
return len(self._propers)
@property
def n_impropers(self) -> int:
"""Number of possible improper torsions in the molecule."""
self._construct_torsions()
assert (
self._impropers is not None
), "_construct_torsions always sets _impropers to a set"
return len(self._impropers)
@property
def particles(self) -> List[Atom]:
"""
.. deprecated:: 0.11.0
This property has been deprecated and will soon be removed. Use
:meth:`Molecule.atoms` instead.
..
"""
_molecule_deprecation("particles", "atoms")
return self.atoms
def particle(self, index: int) -> Atom:
"""
.. deprecated:: 0.11.0
This method has been deprecated and will soon be removed. Use
:meth:`Molecule.atom` instead.
"""
_molecule_deprecation("particle", "atom")
return self.atom(index)
def particle_index(self, particle: Atom) -> int:
"""
.. deprecated:: 0.11.0
This method has been deprecated and will soon be removed. Use
:meth:`Molecule.atom_index` instead.
"""
_molecule_deprecation("particle_index", "atom_index")
return self.atom_index(particle)
@property
def atoms(self):
"""
Iterate over all Atom objects in the molecule.
"""
return self._atoms
def atom(self, index: int) -> Atom:
"""
Get the atom with the specified index.
Parameters
----------
index : int
Returns
-------
atom : openff.toolkit.topology.Atom
"""
return self._atoms[index]
def atom_index(self, atom: Atom) -> int:
"""
Returns the index of the given atom in this molecule
.. TODO: document behaviour when atom is not present in self
Parameters
----------
atom : openff.toolkit.topology.Atom
Returns
-------
index : int
The index of the given atom in this molecule
"""
return atom.molecule_atom_index
@property
def conformers(self):
"""
Returns the list of conformers for this molecule.
Conformers are presented as a list of ``Quantity``-wrapped NumPy
arrays, of shape (3 x n_atoms) and with dimensions of [Distance]. The
return value is the actual list of conformers, and changes to the
contents affect the original ``FrozenMolecule``.
"""
return self._conformers
@property
def n_conformers(self) -> int:
"""
The number of conformers for this molecule.
"""
if self._conformers is None:
return 0
return len(self._conformers)
@property
def bonds(self) -> List[Bond]:
"""
Iterate over all Bond objects in the molecule.
"""
return self._bonds
def bond(self, index: int) -> Bond:
"""
Get the bond with the specified index.
Parameters
----------
index : int
Returns
-------
bond : openff.toolkit.topology.Bond
"""
return self._bonds[index]
@property
def angles(self) -> Set[Tuple[Atom, Atom, Atom]]:
"""
Get an iterator over all i-j-k angles.
"""
self._construct_angles()
return self._angles
@property
def torsions(self) -> Set[Tuple[Atom, Atom, Atom, Atom]]:
"""
Get an iterator over all i-j-k-l torsions.
Note that i-j-k-i torsions (cycles) are excluded.
Returns
-------
torsions : iterable of 4-Atom tuples
"""
self._construct_torsions()
assert (
self._torsions is not None
), "_construct_torsions always sets _torsions to a set"
return self._torsions
@property
def propers(self) -> Set[Tuple[Atom, Atom, Atom, Atom]]:
"""
Iterate over all proper torsions in the molecule
.. todo::
* Do we need to return a ``Torsion`` object that collects information about fractional bond orders?
"""
self._construct_torsions()
assert (
self._propers is not None
), "_construct_torsions always sets _propers to a set"
return self._propers
@property
def impropers(self) -> Set[Tuple[Atom, Atom, Atom, Atom]]:
"""
Iterate over all improper torsions in the molecule.
.. todo ::
* Do we need to return a ``Torsion`` object that collects information about fractional bond orders?
Returns
-------
impropers : set of tuple
An iterator of tuples, each containing the atoms making
up a possible improper torsion.
See Also
--------
smirnoff_impropers, amber_impropers
"""
self._construct_torsions()
assert (
self._impropers is not None
), "_construct_torsions always sets _impropers to a set"
return self._impropers
@property
def smirnoff_impropers(self) -> Set[Tuple[Atom, Atom, Atom, Atom]]:
"""
Iterate over all impropers with trivalent centers, reporting the central atom second.
The central atom is reported second in each torsion. This method reports
an improper for each trivalent atom in the molecule, whether or not any
given force field would assign it improper torsion parameters.
Also note that this will return 6 possible atom orderings around each improper
center. In current SMIRNOFF parameterization, three of these six
orderings will be used for the actual assignment of the improper term
and measurement of the angles. These three orderings capture the three unique
angles that could be calculated around the improper center, therefore the sum
of these three terms will always return a consistent energy.
The exact three orderings that will be applied during parameterization can not be
determined in this method, since it requires sorting the atom indices, and
those indices may change when this molecule is added to a Topology.
For more details on the use of three-fold ('trefoil') impropers, see
https://openforcefield.github.io/standards/standards/smirnoff/#impropertorsions
Returns
-------
impropers : set of tuple
An iterator of tuples, each containing the indices of atoms making
up a possible improper torsion. The central atom is listed second
in each tuple.
See Also
--------
impropers, amber_impropers
"""
# TODO: Replace with non-cheminformatics-toolkit method
# (ie. just looping over all atoms and finding ones that have 3 bonds?)
smirnoff_improper_smarts = "[*:1]~[X3:2](~[*:3])~[*:4]"
improper_idxs = self.chemical_environment_matches(smirnoff_improper_smarts)
smirnoff_impropers = {
(self.atom(imp[0]), self.atom(imp[1]), self.atom(imp[2]), self.atom(imp[3]))
for imp in improper_idxs
}
return smirnoff_impropers
@property
def amber_impropers(self) -> Set[Tuple[Atom, Atom, Atom, Atom]]:
"""
Iterate over all impropers with trivalent centers, reporting the central atom first.
The central atom is reported first in each torsion. This method reports
an improper for each trivalent atom in the molecule, whether or not any
given force field would assign it improper torsion parameters.
Also note that this will return 6 possible atom orderings around each
improper center. In current AMBER parameterization, one of these six
orderings will be used for the actual assignment of the improper term
and measurement of the angle. This method does not encode the logic to
determine which of the six orderings AMBER would use.
Returns
-------
impropers : set of tuple
An iterator of tuples, each containing the indices of atoms making
up a possible improper torsion. The central atom is listed first in
each tuple.
See Also
--------
impropers, smirnoff_impropers
"""
# TODO: Replace with non-cheminformatics-toolkit method
# (ie. just looping over all atoms and finding ones that have 3 bonds?)
amber_improper_smarts = "[X3:1](~[*:2])(~[*:3])~[*:4]"
improper_idxs = self.chemical_environment_matches(amber_improper_smarts)
amber_impropers = {
(self.atom(imp[0]), self.atom(imp[1]), self.atom(imp[2]), self.atom(imp[3]))
for imp in improper_idxs
}
return amber_impropers
def nth_degree_neighbors(self, n_degrees):
"""
Return canonicalized pairs of atoms whose shortest separation is `exactly` n bonds.
Only pairs with increasing atom indices are returned.
Parameters
----------
n: int
The number of bonds separating atoms in each pair
Returns
-------
neighbors: iterator of tuple of Atom
Tuples (len 2) of atom that are separated by ``n`` bonds.
Notes
-----
The criteria used here relies on minimum distances; when there are multiple valid
paths between atoms, such as atoms in rings, the shortest path is considered.
For example, two atoms in "meta" positions with respect to each other in a benzene
are separated by two paths, one length 2 bonds and the other length 4 bonds. This
function would consider them to be 2 apart and would not include them if ``n=4`` was
passed.
"""
if n_degrees <= 0:
raise ValueError(
"Cannot consider neighbors separated by 0 or fewer atoms. Asked to consider "
f"path lengths of {n_degrees}."
)
else:
return _nth_degree_neighbors_from_graphlike(
graphlike=self, n_degrees=n_degrees
)
@property
def total_charge(self):
"""
Return the total charge on the molecule
"""
charge_sum = 0.0 * unit.elementary_charge
for atom in self.atoms:
charge_sum += atom.formal_charge
return charge_sum
@property
def name(self) -> str:
"""
The name (or title) of the molecule
"""
return self._name
@name.setter
def name(self, other):
"""
Set the name of this molecule
"""
if other is None:
self._name = ""
elif type(other) is str:
self._name = other
else:
raise Exception("Molecule name must be a string")
@property
def properties(self) -> Dict[str, Any]:
"""
The properties dictionary of the molecule
"""
return self._properties
@property
def hill_formula(self) -> str:
"""
Get the Hill formula of the molecule
"""
return self.to_hill_formula()
def to_hill_formula(self) -> str:
"""
Generate the Hill formula of this molecule.
Returns
----------
formula : the Hill formula of the molecule
Raises
-----------
NotImplementedError : if the molecule is not of one of the specified types.
"""
if self._hill_formula is None:
atom_nums = [atom.atomic_number for atom in self.atoms]
self._hill_formula = _atom_nums_to_hill_formula(atom_nums)
return self._hill_formula
@staticmethod
def _object_to_hill_formula(obj: Union["Molecule", "nx.Graph"]) -> str:
"""Take a Molecule or NetworkX graph and generate its Hill formula.
This provides a backdoor to the old functionality of Molecule.to_hill_formula, which
was a static method that duck-typed inputs of Molecule or graph objects."""
import networkx as nx
if isinstance(obj, FrozenMolecule):
return obj.to_hill_formula()
elif isinstance(obj, nx.Graph):
return _networkx_graph_to_hill_formula(obj)
else:
raise TypeError(
"_object_to_hill_formula accepts a NetworkX Graph or OpenFF "
+ f"(Frozen)Molecule, not {type(obj)}"
)
def chemical_environment_matches(
self,
query,
unique=False,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
):
"""Find matches in the molecule for a SMARTS string or ``ChemicalEnvironment`` query
Parameters
----------
query : str or ChemicalEnvironment
SMARTS string (with one or more tagged atoms) or ``ChemicalEnvironment`` query.
Query will internally be resolved to SMIRKS using ``query.asSMIRKS()`` if it has an ``.asSMIRKS`` method.
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=GLOBAL_TOOLKIT_REGISTRY
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for chemical environment matches
Returns
-------
matches : list of atom index tuples
A list of tuples, containing the indices of the matching atoms.
Examples
--------
Retrieve all the carbon-carbon bond matches in a molecule
>>> molecule = Molecule.from_iupac('imatinib')
>>> matches = molecule.chemical_environment_matches('[#6X3:1]~[#6X3:2]')
.. todo ::
* Do we want to generalize ``query`` to allow other kinds of queries,
such as mdtraj DSL, pymol selections, atom index slices, etc? We
could call it ``topology.matches(query)`` instead of
``chemical_environment_matches``
"""
# Resolve to SMIRKS if needed
# TODO: Update this to use updated ChemicalEnvironment API
if hasattr(query, "smirks"):
smirks = query.smirks
elif type(query) == str:
smirks = query
else:
raise ValueError("'query' must be either a string or a ChemicalEnvironment")
# Use specified cheminformatics toolkit to determine matches with specified aromaticity model
# TODO: Simplify this by requiring a toolkit registry for the molecule?
# TODO: Do we have to pass along an aromaticity model?
if isinstance(toolkit_registry, ToolkitRegistry):
matches = toolkit_registry.call(
"find_smarts_matches",
self,
smirks,
unique=unique,
)
elif isinstance(toolkit_registry, ToolkitWrapper):
matches = toolkit_registry.find_smarts_matches(
self,
smirks,
unique=unique,
)
else:
raise InvalidToolkitRegistryError(
"'toolkit_registry' must be either a ToolkitRegistry or a ToolkitWrapper"
)
return matches
@classmethod
def from_iupac(
cls,
iupac_name,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
allow_undefined_stereo=False,
**kwargs,
):
"""Generate a molecule from IUPAC or common name
.. note :: This method requires the OpenEye toolkit to be installed.
Parameters
----------
iupac_name : str
IUPAC name of molecule to be generated
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=GLOBAL_TOOLKIT_REGISTRY
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for chemical environment matches
allow_undefined_stereo : bool, default=False
If false, raises an exception if molecule contains undefined stereochemistry.
Returns
-------
molecule : Molecule
The resulting molecule with position
Examples
--------
Create a molecule from an IUPAC name
>>> molecule = Molecule.from_iupac('4-[(4-methylpiperazin-1-yl)methyl]-N-(4-methyl-3-{[4-(pyridin-3-yl)pyrimidin-2-yl]amino}phenyl)benzamide') # noqa
Create a molecule from a common name
>>> molecule = Molecule.from_iupac('imatinib')
"""
if isinstance(toolkit_registry, ToolkitRegistry):
molecule = toolkit_registry.call(
"from_iupac",
iupac_name,
allow_undefined_stereo=allow_undefined_stereo,
_cls=cls,
**kwargs,
)
elif isinstance(toolkit_registry, ToolkitWrapper):
toolkit = toolkit_registry
molecule = toolkit.from_iupac(
iupac_name,
allow_undefined_stereo=allow_undefined_stereo,
_cls=cls,
**kwargs,
)
else:
raise Exception(
"Invalid toolkit_registry passed to from_iupac. Expected ToolkitRegistry or ToolkitWrapper. "
f"Got {type(toolkit_registry)}."
)
return molecule
def to_iupac(self, toolkit_registry=GLOBAL_TOOLKIT_REGISTRY):
"""Generate IUPAC name from Molecule
Returns
-------
iupac_name : str
IUPAC name of the molecule
.. note :: This method requires the OpenEye toolkit to be installed.
Examples
--------
>>> from openff.toolkit.utils import get_data_file_path
>>> sdf_filepath = get_data_file_path('molecules/ethanol.sdf')
>>> molecule = Molecule(sdf_filepath)
>>> iupac_name = molecule.to_iupac()
"""
if isinstance(toolkit_registry, ToolkitRegistry):
to_iupac_method = toolkit_registry.resolve("to_iupac")
elif isinstance(toolkit_registry, ToolkitWrapper):
to_iupac_method = toolkit_registry.to_iupac
else:
raise Exception(
"Invalid toolkit_registry passed to to_iupac. Expected ToolkitRegistry or ToolkitWrapper. "
f"Got {type(toolkit_registry)}"
)
# TODO: Can `to_iupac` fail if given a well-behaved OFFMol/OEMol?
result = to_iupac_method(self)
return result
@classmethod
def from_topology(cls, topology):
"""Return a Molecule representation of an OpenFF Topology containing a single Molecule object.
Parameters
----------
topology : openff.toolkit.topology.Topology
The :class:`Topology` object containing a single :class:`Molecule` object.
Note that OpenMM and MDTraj ``Topology`` objects are not supported.
Returns
-------
molecule : openff.toolkit.topology.Molecule
The Molecule object in the topology
Raises
------
ValueError
If the topology does not contain exactly one molecule.
Examples
--------
Create a molecule from a Topology object that contains exactly one molecule
>>> from openff.toolkit import Molecule, Topology
>>> topology = Topology.from_molecules(Molecule.from_smiles('[CH4]'))
>>> molecule = Molecule.from_topology(topology)
"""
# TODO: Ensure we are dealing with an OpenFF Topology object
if topology.n_molecules != 1:
raise ValueError("Topology must contain exactly one molecule")
molecule = [i for i in topology.molecules][0]
return cls(molecule)
def to_topology(self):
"""
Return an OpenFF Topology representation containing one copy of this molecule
Returns
-------
topology : openff.toolkit.topology.Topology
A Topology representation of this molecule
Examples
--------
>>> from openff.toolkit import Molecule
>>> molecule = Molecule.from_iupac('imatinib')
>>> topology = molecule.to_topology()
"""
from openff.toolkit.topology import Topology
return Topology.from_molecules(self)
@classmethod
def from_file(
cls,
file_path,
file_format=None,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
allow_undefined_stereo=False,
):
"""
Create one or more molecules from a file
.. todo::
* Extend this to also include some form of .offmol Open Force Field Molecule format?
* Generalize this to also include file-like objects?
Parameters
----------
file_path : str or file-like object
The path to the file or file-like object to stream one or more molecules from.
file_format : str, optional, default=None
Format specifier, usually file suffix (eg. 'MOL2', 'SMI')
Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for your
loaded toolkits for details.
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry or
openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=GLOBAL_TOOLKIT_REGISTRY
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for file loading. If a Toolkit is passed, only
the highest-precedence toolkit is used
allow_undefined_stereo : bool, default=False
If false, raises an exception if oemol contains undefined stereochemistry.
Returns
-------
molecules : Molecule or list of Molecules
If there is a single molecule in the file, a Molecule is returned;
otherwise, a list of Molecule objects is returned.
Examples
--------
>>> from openff.toolkit.tests.utils import get_monomer_mol2_file_path
>>> mol2_file_path = get_monomer_mol2_file_path('cyclohexane')
>>> molecule = Molecule.from_file(mol2_file_path)
"""
if file_format is None:
if isinstance(file_path, pathlib.Path):
file_path: str = file_path.as_posix()
if not isinstance(file_path, str):
raise Exception(
"If providing a file-like object for reading molecules, the format must be specified"
)
# Assume that files ending in ".gz" should use their second-to-last suffix for compatibility check
# TODO: Will all cheminformatics packages be OK with gzipped files?
if file_path[-3:] == ".gz":
file_format = file_path.split(".")[-2]
else:
file_format = file_path.split(".")[-1]
file_format = file_format.upper()
if file_format == "XYZ":
raise UnsupportedFileTypeError(
"Parsing `.xyz` files is not currently supported because they lack sufficient "
"chemical information to be used with SMIRNOFF force fields. For more information, "
"see https://open-forcefield-toolkit.readthedocs.io/en/latest/faq.html or to provide "
"feedback please visit https://github.com/openforcefield/openff-toolkit/issues/1145."
)
# Determine which toolkit to use (highest priority that's compatible with input type)
if isinstance(toolkit_registry, ToolkitRegistry):
# TODO: Encapsulate this logic into ToolkitRegistry.call()?
toolkit = None
supported_read_formats = {}
for query_toolkit in toolkit_registry.registered_toolkits:
if file_format in query_toolkit.toolkit_file_read_formats:
toolkit = query_toolkit
break
supported_read_formats[
query_toolkit.toolkit_name
] = query_toolkit.toolkit_file_read_formats
if toolkit is None:
msg = (
f"No toolkits in registry can read file {file_path} (format {file_format}). Supported "
f"formats in the provided ToolkitRegistry are {supported_read_formats}. "
)
# Per issue #407, not allowing RDKit to read mol2 has confused a lot of people. Here we add text
# to the error message that will hopefully reduce this confusion.
if file_format == "MOL2" and RDKitToolkitWrapper.is_available():
msg += (
"RDKit does not fully support input of molecules from mol2 format unless they "
"have Corina atom types, and this is not common in the simulation community. For this "
"reason, the Open Force Field Toolkit does not use "
"RDKit to read .mol2. Consider reading from SDF instead. If you would like to attempt "
"to use RDKit to read mol2 anyway, you can load the molecule of interest into an RDKit "
"molecule and use openff.toolkit.topology.Molecule.from_rdkit, but we do not recommend this."
)
elif file_format == "PDB" and RDKitToolkitWrapper.is_available():
msg += (
"RDKit can not safely read PDBs on their own. Information about bond order and aromaticity "
"is likely to be lost. PDBs can be used along with a valid smiles string with RDKit using "
"the constructor Molecule.from_pdb_and_smiles(file_path, smiles)"
)
raise NotImplementedError(msg)
elif isinstance(toolkit_registry, ToolkitWrapper):
# TODO: Encapsulate this logic in ToolkitWrapper?
toolkit = toolkit_registry
if file_format not in toolkit.toolkit_file_read_formats:
msg = (
f"Toolkit {toolkit.toolkit_name} can not read file {file_path} (format {file_format}). Supported "
f"formats for this toolkit are {toolkit.toolkit_file_read_formats}."
)
if toolkit.toolkit_name == "The RDKit" and file_format == "PDB":
msg += (
"RDKit can however read PDBs with a valid smiles string using the "
"Molecule.from_pdb_and_smiles(file_path, smiles) constructor"
)
raise NotImplementedError(msg)
else:
raise InvalidToolkitRegistryError(
"'toolkit_registry' must be either a ToolkitRegistry or a ToolkitWrapper"
)
mols = list()
if isinstance(file_path, str):
mols = toolkit.from_file(
file_path,
file_format=file_format,
allow_undefined_stereo=allow_undefined_stereo,
_cls=cls,
)
elif hasattr(file_path, "read"):
file_obj = file_path
mols = toolkit.from_file_obj(
file_obj,
file_format=file_format,
allow_undefined_stereo=allow_undefined_stereo,
_cls=cls,
)
if len(mols) == 0:
raise Exception(f"Unable to read molecule from file: {file_path}")
elif len(mols) == 1:
return mols[0]
return mols
@classmethod
@requires_package("openmm")
def from_polymer_pdb(
cls,
file_path: Union[str, TextIO],
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
):
"""
Loads a polymer from a PDB file.
Currently only supports proteins with canonical amino acids that are
either uncapped or capped by ACE/NME groups, but may later be extended
to handle other common polymers, or accept user-defined polymer
templates. Only one polymer chain may be present in the PDB file, and it
must be the only molecule present.
Connectivity and bond orders are assigned by matching SMARTS codes for
the supported residues against atom names. The PDB file must include
all atoms with the correct standard atom names described in the
`PDB Chemical Component Dictionary <https://www.wwpdb.org/data/ccd>`_.
Residue names are used to assist trouble-shooting failed assignments,
but are not used in the actual assignment process.
Metadata such as residues, chains, and atom names are recorded in the
``Atom.metadata`` attribute, which is a dictionary mapping from
strings like "residue_name" to the appropriate value. ``from_polymer_pdb``
returns a molecule that can be iterated over with the ``.residues`` and
``.chains`` attributes, as well as the usual ``.atoms``.
Parameters
----------
file_path : str or file object
PDB information to be passed to OpenMM PDBFile object for loading
toolkit_registry = ToolkitWrapper or ToolkitRegistry. Default = None
Either a ToolkitRegistry, ToolkitWrapper
Returns
-------
molecule : openff.toolkit.topology.Molecule
Raises
------
UnassignedChemistryInPDBError
If an atom or bond could not be assigned; the exception will
provide a detailed diagnostic of what went wrong.
MultipleMoleculesInPDBError
If all atoms and bonds could be assigned, but the PDB includes
multiple chains or molecules.
"""
import openmm.unit as openmm_unit
from openmm.app import PDBFile
if isinstance(toolkit_registry, ToolkitWrapper):
toolkit_registry = ToolkitRegistry([toolkit_registry])
pdb = PDBFile(file_path)
substructure_file_path = get_data_file_path(
"proteins/aa_residues_substructures_explicit_bond_orders_with_caps.json"
)
with open(substructure_file_path, "r") as subfile:
substructure_dictionary = json.load(subfile)
offmol = toolkit_registry.call(
"_polymer_openmm_topology_to_offmol", pdb.topology, substructure_dictionary
)
coords = unit.Quantity(
np.array(
[
[*vec3.value_in_unit(openmm_unit.angstrom)]
for vec3 in pdb.getPositions()
]
),
unit.angstrom,
)
offmol.add_conformer(coords)
offmol = toolkit_registry.call("_assign_aromaticity_and_stereo_from_3d", offmol)
for i, atom in enumerate(pdb.topology.atoms()):
offmol.atoms[i].name = atom.name
offmol.atoms[i].metadata["residue_name"] = atom.residue.name
offmol.atoms[i].metadata["residue_number"] = atom.residue.id
offmol.atoms[i].metadata["insertion_code"] = atom.residue.insertionCode
offmol.atoms[i].metadata["chain_id"] = atom.residue.chain.id
offmol.add_default_hierarchy_schemes()
if offmol._has_multiple_molecules():
raise MultipleMoleculesInPDBError(
"This PDB has multiple molecules. The OpenFF Toolkit requires "
+ "that only one molecule is present in a PDB. Try splitting "
+ "each molecule into its own PDB with another tool, and "
+ "load any small molecules with Molecule.from_pdb_and_smiles."
)
return offmol
def _has_multiple_molecules(self) -> bool:
import networkx as nx
graph = self.to_networkx()
num_disconnected_subgraphs = sum(1 for _ in nx.connected_components(graph))
return num_disconnected_subgraphs > 1
def _to_xyz_file(self, file_path):
"""
Write the current molecule and its conformers to a multiframe xyz file, if the molecule
has no current coordinates all atoms will be set to 0,0,0 in keeping with the behaviour of the
backend toolkits.
Information on the type of XYZ file written can be found here <http://openbabel.org/wiki/XYZ_(format)>.
Parameters
----------
file_path : str or file-like object
A file-like object or the path to the file to be written.
"""
# If we do not have a conformer make one with all zeros
if self.n_conformers == 0:
conformers = [
unit.Quantity(np.zeros((self.n_atoms, 3), dtype=float), unit.angstrom)
]
else:
conformers = self._conformers
if len(conformers) == 1:
end = ""
title = (
lambda frame: f'{self.name if self.name != "" else self.hill_formula}{frame}\n'
)
else:
end = 1
title = (
lambda frame: f'{self.name if self.name != "" else self.hill_formula} Frame {frame}\n'
)
# check if we have a file path or an open file object
if isinstance(file_path, str):
xyz_data = open(file_path, "w")
else:
xyz_data = file_path
# add the data to the xyz_data list
for i, geometry in enumerate(conformers, 1):
xyz_data.write(f"{self.n_atoms}\n" + title(end))
for j, atom_coords in enumerate(geometry.m_as(unit.angstrom)):
x, y, z = atom_coords
xyz_data.write(
f"{SYMBOLS[self.atoms[j].atomic_number]} {x: .10f} {y: .10f} {z: .10f}\n"
)
# now we up the frame count
end = i + 1
# now close the file
xyz_data.close()
def to_file(self, file_path, file_format, toolkit_registry=GLOBAL_TOOLKIT_REGISTRY):
"""Write the current molecule to a file or file-like object
Parameters
----------
file_path : str or file-like object
A file-like object or the path to the file to be written.
file_format : str
Format specifier, one of ['MOL2', 'MOL2H', 'SDF', 'PDB', 'SMI', 'CAN', 'TDT']
Note that not all toolkits support all formats
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, optional, default=GLOBAL_TOOLKIT_REGISTRY
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for file writing. If a Toolkit is passed,
only the highest-precedence toolkit is used
Raises
------
ValueError
If the requested file_format is not supported by one of the installed cheminformatics toolkits
Examples
--------
>>> molecule = Molecule.from_iupac('imatinib')
>>> molecule.to_file('imatinib.mol2', file_format='mol2') # doctest: +SKIP
>>> molecule.to_file('imatinib.sdf', file_format='sdf') # doctest: +SKIP
>>> molecule.to_file('imatinib.pdb', file_format='pdb') # doctest: +SKIP
"""
if isinstance(toolkit_registry, ToolkitRegistry):
pass
elif isinstance(toolkit_registry, ToolkitWrapper):
toolkit = toolkit_registry
toolkit_registry = ToolkitRegistry(toolkit_precedence=[])
toolkit_registry.add_toolkit(toolkit)
else:
raise InvalidToolkitRegistryError(
"'toolkit_registry' must be either a ToolkitRegistry or a ToolkitWrapper"
)
file_format = file_format.upper()
# check if xyz, use the toolkit independent method.
if file_format == "XYZ":
return self._to_xyz_file(file_path=file_path)
# Take the first toolkit that can write the desired output format
toolkit = None
for query_toolkit in toolkit_registry.registered_toolkits:
if file_format in query_toolkit.toolkit_file_write_formats:
toolkit = query_toolkit
break
# Raise an exception if no toolkit was found to provide the requested file_format
if toolkit is None:
supported_formats = {}
for toolkit in toolkit_registry.registered_toolkits:
supported_formats[
toolkit.toolkit_name
] = toolkit.toolkit_file_write_formats
raise ValueError(
f"The requested file format ({file_format}) is not available from any of the installed toolkits "
f"(supported formats: {supported_formats})"
)
# Write file
if type(file_path) == str:
# Open file for writing
toolkit.to_file(self, file_path, file_format)
else:
toolkit.to_file_obj(self, file_path, file_format)
def enumerate_tautomers(
self, max_states=20, toolkit_registry=GLOBAL_TOOLKIT_REGISTRY
):
"""
Enumerate the possible tautomers of the current molecule
Parameters
----------
max_states: int optional, default=20
The maximum amount of molecules that should be returned
toolkit_registry: openff.toolkit.utils.toolkits.ToolkitRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, default=GLOBAL_TOOLKIT_REGISTRY
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use to enumerate the tautomers.
Returns
-------
molecules: List[openff.toolkit.topology.Molecule]
A list of openff.toolkit.topology.Molecule instances not including the input molecule.
"""
if isinstance(toolkit_registry, ToolkitRegistry):
molecules = toolkit_registry.call(
"enumerate_tautomers", molecule=self, max_states=max_states
)
elif isinstance(toolkit_registry, ToolkitWrapper):
molecules = toolkit_registry.enumerate_tautomers(
self, max_states=max_states
)
else:
raise InvalidToolkitRegistryError(
"'toolkit_registry' must be either a ToolkitRegistry or a ToolkitWrapper"
)
return molecules
def enumerate_stereoisomers(
self,
undefined_only=False,
max_isomers=20,
rationalise=True,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
):
"""
Enumerate the stereocenters and bonds of the current molecule.
Parameters
----------
undefined_only: bool optional, default=False
If we should enumerate all stereocenters and bonds or only those with undefined stereochemistry
max_isomers: int optional, default=20
The maximum amount of molecules that should be returned
rationalise: bool optional, default=True
If we should try to build and rationalise the molecule to ensure it can exist
toolkit_registry: openff.toolkit.utils.toolkits.ToolkitRegistry or
lopenff.toolkit.utils.toolkits.ToolkitWrapper, default=GLOBAL_TOOLKIT_REGISTRY
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use to enumerate the stereoisomers.
Returns
--------
molecules: List[openff.toolkit.topology.Molecule]
A list of :class:`Molecule` instances not including the input molecule.
"""
if isinstance(toolkit_registry, ToolkitRegistry):
molecules = toolkit_registry.call(
"enumerate_stereoisomers",
molecule=self,
undefined_only=undefined_only,
max_isomers=max_isomers,
rationalise=rationalise,
)
elif isinstance(toolkit_registry, ToolkitWrapper):
molecules = toolkit_registry.enumerate_stereoisomers(
self,
undefined_only=undefined_only,
max_isomers=max_isomers,
rationalise=rationalise,
)
else:
raise InvalidToolkitRegistryError(
"'toolkit_registry' must be either a ToolkitRegistry or a ToolkitWrapper"
)
return molecules
@OpenEyeToolkitWrapper.requires_toolkit()
def enumerate_protomers(self, max_states=10):
"""
Enumerate the formal charges of a molecule to generate different protomoers.
Parameters
----------
max_states: int optional, default=10,
The maximum number of protomer states to be returned.
Returns
-------
molecules: List[openff.toolkit.topology.Molecule],
A list of the protomers of the input molecules not including the input.
"""
toolkit = OpenEyeToolkitWrapper()
molecules = toolkit.enumerate_protomers(molecule=self, max_states=max_states)
return molecules
@classmethod
@RDKitToolkitWrapper.requires_toolkit()
def from_rdkit(
cls, rdmol, allow_undefined_stereo=False, hydrogens_are_explicit=False
):
"""
Create a Molecule from an RDKit molecule.
Requires the RDKit to be installed.
Parameters
----------
rdmol : rkit.RDMol
An RDKit molecule
allow_undefined_stereo : bool, default=False
If ``False``, raises an exception if ``rdmol`` contains undefined stereochemistry.
hydrogens_are_explicit : bool, default=False
If ``False``, RDKit will perform hydrogen addition using ``Chem.AddHs``
Returns
-------
molecule : openff.toolkit.topology.Molecule
An OpenFF molecule
Examples
--------
Create a molecule from an RDKit molecule
>>> from rdkit import Chem
>>> from openff.toolkit.tests.utils import get_data_file_path
>>> rdmol = Chem.MolFromMolFile(get_data_file_path('systems/monomers/ethanol.sdf'))
>>> molecule = Molecule.from_rdkit(rdmol)
"""
toolkit = RDKitToolkitWrapper()
molecule = toolkit.from_rdkit(
rdmol,
allow_undefined_stereo=allow_undefined_stereo,
hydrogens_are_explicit=hydrogens_are_explicit,
_cls=cls,
)
return molecule
def to_rdkit(
self,
aromaticity_model=DEFAULT_AROMATICITY_MODEL,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
):
"""
Create an RDKit molecule
Requires the RDKit to be installed.
Parameters
----------
aromaticity_model : str, optional, default=DEFAULT_AROMATICITY_MODEL
The aromaticity model to use
Returns
-------
rdmol : rdkit.RDMol
An RDKit molecule
Examples
--------
Convert a molecule to RDKit
>>> from openff.toolkit.utils import get_data_file_path
>>> sdf_filepath = get_data_file_path('molecules/ethanol.sdf')
>>> molecule = Molecule(sdf_filepath)
>>> rdmol = molecule.to_rdkit()
"""
# toolkit = RDKitToolkitWrapper()
if isinstance(toolkit_registry, ToolkitWrapper):
return toolkit_registry.to_rdkit(self, aromaticity_model=aromaticity_model)
else:
return toolkit_registry.call(
"to_rdkit", self, aromaticity_model=aromaticity_model
)
@classmethod
@OpenEyeToolkitWrapper.requires_toolkit()
def from_openeye(cls, oemol, allow_undefined_stereo=False):
"""
Create a ``Molecule`` from an OpenEye molecule.
Requires the OpenEye toolkit to be installed.
Parameters
----------
oemol : openeye.oechem.OEMol
An OpenEye molecule
allow_undefined_stereo : bool, default=False
If ``False``, raises an exception if oemol contains undefined stereochemistry.
Returns
-------
molecule : openff.toolkit.topology.Molecule
An OpenFF molecule
Examples
--------
Create a ``Molecule`` from an OpenEye OEMol
>>> from openeye import oechem
>>> from openff.toolkit.tests.utils import get_data_file_path
>>> ifs = oechem.oemolistream(get_data_file_path('systems/monomers/ethanol.mol2'))
>>> oemols = list(ifs.GetOEGraphMols())
>>> molecule = Molecule.from_openeye(oemols[0])
"""
toolkit = OpenEyeToolkitWrapper()
molecule = toolkit.from_openeye(
oemol, allow_undefined_stereo=allow_undefined_stereo, _cls=cls
)
return molecule
@requires_package("qcelemental")
def to_qcschema(self, multiplicity=1, conformer=0, extras=None):
"""
Create a QCElemental Molecule.
.. warning :: This API is experimental and subject to change.
Parameters
----------
multiplicity : int, default=1,
The multiplicity of the molecule;
sets ``molecular_multiplicity`` field for QCElemental Molecule.
conformer : int, default=0,
The index of the conformer to use for the QCElemental Molecule geometry.
extras : dict, default=None
A dictionary that should be included in the ``extras`` field on the QCElemental Molecule.
This can be used to include extra information, such as a smiles representation.
Returns
---------
qcelemental.models.Molecule
A validated QCElemental Molecule.
Examples
--------
Create a QCElemental Molecule:
>>> import qcelemental as qcel
>>> mol = Molecule.from_smiles('CC')
>>> mol.generate_conformers(n_conformers=1)
>>> qcemol = mol.to_qcschema()
Raises
--------
MissingOptionalDependencyError
If qcelemental is not installed, the qcschema can not be validated.
InvalidConformerError
No conformer found at the given index.
"""
import qcelemental as qcel
# get/ check the geometry
try:
geometry = self.conformers[conformer].m_as(unit.bohr)
except (IndexError, TypeError):
raise InvalidConformerError(
"The molecule must have a conformation to produce a valid qcschema; "
f"no conformer was found at index {conformer}."
)
# Gather the required qcschema data
charge = self.total_charge.m_as(unit.elementary_charge)
connectivity = [
(bond.atom1_index, bond.atom2_index, bond.bond_order) for bond in self.bonds
]
symbols = [SYMBOLS[atom.atomic_number] for atom in self.atoms]
if extras is not None:
extras[
"canonical_isomeric_explicit_hydrogen_mapped_smiles"
] = self.to_smiles(mapped=True)
else:
extras = {
"canonical_isomeric_explicit_hydrogen_mapped_smiles": self.to_smiles(
mapped=True
)
}
schema_dict = {
"symbols": symbols,
"geometry": geometry,
# If we have no bonds we must supply None
"connectivity": connectivity if connectivity else None,
"molecular_charge": charge,
"molecular_multiplicity": multiplicity,
"extras": extras,
}
return qcel.models.Molecule.from_data(schema_dict, validate=True)
@classmethod
def from_mapped_smiles(
cls,
mapped_smiles,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
allow_undefined_stereo=False,
):
"""
Create an :class:`Molecule` from a mapped SMILES made with cmiles.
The molecule will be in the order of the indexing in the mapped smiles string.
.. warning :: This API is experimental and subject to change.
Parameters
----------
mapped_smiles: str
A CMILES-style mapped smiles string with explicit hydrogens.
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry
or openff.toolkit.utils.toolkits.ToolkitWrapper, optional
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for SMILES-to-molecule conversion
allow_undefined_stereo : bool, default=False
If false, raises an exception if oemol contains undefined stereochemistry.
Returns
----------
offmol : openff.toolkit.topology.molecule.Molecule
An OpenFF molecule instance.
Raises
--------
SmilesParsingError
If the given SMILES had no indexing picked up by the toolkits.
"""
# create the molecule from the smiles and check we have the right number of indexes
# in the mapped SMILES
offmol = cls.from_smiles(
mapped_smiles,
hydrogens_are_explicit=True,
toolkit_registry=toolkit_registry,
allow_undefined_stereo=allow_undefined_stereo,
)
# check we found some mapping and remove it as we do not want to expose atom maps
try:
mapping = offmol._properties.pop("atom_map")
except KeyError:
raise SmilesParsingError(
"The given SMILES has no indexing, please generate a valid explicit hydrogen "
"mapped SMILES using cmiles."
)
if len(mapping) != offmol.n_atoms:
raise SmilesParsingError(
"The mapped smiles does not contain enough indexes to remap the molecule."
)
# remap the molecule using the atom map found in the smiles
# the order is mapping = Dict[current_index: new_index]
# first renumber the mapping dict indexed from 0, currently from 1 as 0 indicates no mapping in toolkits
adjusted_mapping = dict((current, new - 1) for current, new in mapping.items())
return offmol.remap(adjusted_mapping, current_to_new=True)
@classmethod
@requires_package("qcelemental")
def from_qcschema(
cls,
qca_record,
client=None,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
allow_undefined_stereo=False,
):
"""
Create a Molecule from a QCArchive molecule record or dataset entry
based on attached cmiles information.
For a molecule record, a conformer will be set from its geometry.
For a dataset entry, if a corresponding client instance is provided,
the starting geometry for that entry will be used as a conformer.
A QCElemental Molecule produced from ``Molecule.to_qcschema`` can be round-tripped
through this method to produce a new, valid Molecule.
Parameters
----------
qca_record : dict
A QCArchive molecule record or dataset entry.
client : optional, default=None,
A qcportal.FractalClient instance to use for fetching an initial geometry.
Only used if ``qca_record`` is a dataset entry.
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry or
openff.toolkit.utils.toolkits.ToolkitWrapper, optional
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for SMILES-to-molecule conversion
allow_undefined_stereo : bool, default=False
If false, raises an exception if qca_record contains undefined stereochemistry.
Returns
-------
molecule : openff.toolkit.topology.Molecule
An OpenFF molecule instance.
Examples
--------
Get Molecule from a QCArchive molecule record:
>>> from qcportal import FractalClient
>>> client = FractalClient()
>>> offmol = Molecule.from_qcschema(
... client.query_molecules(molecular_formula="C16H20N3O5")[0]
... )
Get Molecule from a QCArchive optimization entry:
>>> from qcportal import FractalClient
>>> client = FractalClient()
>>> optds = client.get_collection(
... "OptimizationDataset",
... "SMIRNOFF Coverage Set 1"
... )
>>> offmol = Molecule.from_qcschema(optds.get_entry('coc(o)oc-0'))
Same as above, but with conformer(s) from initial molecule(s) by
providing client to database:
>>> offmol = Molecule.from_qcschema(
... optds.get_entry('coc(o)oc-0'),
... client=client
... )
Raises
-------
AttributeError
If the record dict can not be made from ``qca_record``, or if the
provided ``client`` could not retrieve the initial molecule.
KeyError
If the record does not contain the
``canonical_isomeric_explicit_hydrogen_mapped_smiles``.
InvalidConformerError
Silent error, if the conformer could not be attached.
"""
# We can accept the Dataset entry record or the dict with JSON encoding
# lets get it all in the dict rep
if not isinstance(qca_record, dict):
try:
qca_record = qca_record.dict(encoding="json")
except AttributeError:
raise AttributeError(
"The object passed could not be converted to a dict with json encoding"
)
# identify if this is a dataset entry
if "attributes" in qca_record:
mapped_smiles = qca_record["attributes"][
"canonical_isomeric_explicit_hydrogen_mapped_smiles"
]
if client is not None:
# try and find the initial molecule conformations and attach them
# collect the input molecules
try:
input_mols = client.query_molecules(
id=qca_record["initial_molecules"]
)
except KeyError:
# this must be an optimisation record
input_mols = client.query_molecules(
id=qca_record["initial_molecule"]
)
except AttributeError:
raise AttributeError(
"The provided client can not query molecules, make sure it is an instance of"
"qcportal.client.FractalClient() with the correct address."
)
else:
input_mols = []
# identify if this is a molecule record
elif "extras" in qca_record:
mapped_smiles = qca_record["extras"][
"canonical_isomeric_explicit_hydrogen_mapped_smiles"
]
input_mols = [qca_record]
else:
raise KeyError(
"The record must contain the hydrogen mapped smiles to be safely made from the archive. "
"It is not present in either 'attributes' or 'extras' on the provided `qca_record`"
)
# make a new molecule that has been reordered to match the cmiles mapping
offmol = cls.from_mapped_smiles(
mapped_smiles,
toolkit_registry=toolkit_registry,
allow_undefined_stereo=allow_undefined_stereo,
)
# now for each molecule convert and attach the input geometry
initial_ids = {}
for molecule in input_mols:
if not isinstance(molecule, dict):
mol = molecule.dict(encoding="json")
else:
mol = molecule
geometry = unit.Quantity(
np.array(mol["geometry"], float).reshape(-1, 3), unit.bohr
)
try:
offmol._add_conformer(geometry.to(unit.angstrom))
# in case this molecule didn't come from a server at all
if "id" in mol:
initial_ids[mol["id"]] = offmol.n_conformers - 1
except InvalidConformerError:
print(
"Invalid conformer for this molecule, the geometry could not be attached."
)
# attach a dict that has the initial molecule ids and the number of the conformer it is stored in
# if it's empty, don't bother
if initial_ids:
offmol._properties["initial_molecules"] = initial_ids
return offmol
@classmethod
@RDKitToolkitWrapper.requires_toolkit()
def from_pdb_and_smiles(cls, file_path, smiles, allow_undefined_stereo=False):
"""
Create a Molecule from a pdb file and a SMILES string using RDKit.
Requires RDKit to be installed.
.. warning :: This API is experimental and subject to change.
The molecule is created and sanitised based on the SMILES string, we then find a mapping
between this molecule and one from the PDB based only on atomic number and connections.
The SMILES molecule is then reindexed to match the PDB, the conformer is attached, and the
molecule returned.
Note that any stereochemistry in the molecule is set by the SMILES, and not the coordinates
of the PDB.
Parameters
----------
file_path: str
PDB file path
smiles : str
a valid smiles string for the pdb, used for stereochemistry, formal charges, and bond order
allow_undefined_stereo : bool, default=False
If false, raises an exception if SMILES contains undefined stereochemistry.
Returns
--------
molecule : openff.toolkit.Molecule
An OFFMol instance with ordering the same as used in the PDB file.
Raises
------
InvalidConformerError
If the SMILES and PDB molecules are not isomorphic.
"""
toolkit = RDKitToolkitWrapper()
return toolkit.from_pdb_and_smiles(
file_path, smiles, allow_undefined_stereo, _cls=cls
)
def canonical_order_atoms(self, toolkit_registry=GLOBAL_TOOLKIT_REGISTRY):
"""
Produce a copy of the molecule with the atoms reordered canonically.
Each toolkit defines its own canonical ordering of atoms. The canonical
order may change from toolkit version to toolkit version or between
toolkits.
.. warning :: This API is experimental and subject to change.
Parameters
----------
toolkit_registry : openff.toolkit.utils.toolkits.ToolkitRegistry or
openff.toolkit.utils.toolkits.ToolkitWrapper, optional
:class:`ToolkitRegistry` or :class:`ToolkitWrapper` to use for
SMILES-to-molecule conversion
Returns
-------
molecule : openff.toolkit.topology.Molecule
An new OpenFF style molecule with atoms in the canonical order.
"""
if isinstance(toolkit_registry, ToolkitRegistry):
return toolkit_registry.call("canonical_order_atoms", self)
elif isinstance(toolkit_registry, ToolkitWrapper):
toolkit = toolkit_registry
return toolkit.canonical_order_atoms(self)
else:
raise InvalidToolkitRegistryError(
"Invalid toolkit_registry passed to from_smiles. Expected ToolkitRegistry or ToolkitWrapper. "
f"Got {type(toolkit_registry)}."
)
def remap(self, mapping_dict, current_to_new=True):
"""
Remap all of the indexes in the molecule to match the given mapping dict
.. warning :: This API is experimental and subject to change.
Parameters
----------
mapping_dict : dict,
A dictionary of the mapping between indexes, this should start from 0.
current_to_new : bool, default=True
If this is ``True``, then ``mapping_dict`` is of the form ``{current_index: new_index}``;
otherwise, it is of the form ``{new_index: current_index}``
Returns
-------
new_molecule : openff.toolkit.topology.molecule.Molecule
An openff.toolkit.Molecule instance with all attributes transferred, in the PDB order.
"""
# make sure the size of the mapping matches the current molecule
if len(mapping_dict) != self.n_atoms:
raise ValueError(
f"The number of mapping indices({len(mapping_dict)}) does not match the number of"
f"atoms in this molecule({self.n_atoms})"
)
# make two mapping dicts we need new to old for atoms
# and old to new for bonds
if current_to_new:
cur_to_new = mapping_dict
new_to_cur = dict(zip(mapping_dict.values(), mapping_dict.keys()))
else:
new_to_cur = mapping_dict
cur_to_new = dict(zip(mapping_dict.values(), mapping_dict.keys()))
new_molecule = self.__class__()
new_molecule.name = self.name
try:
# add the atoms list
for i in range(self.n_atoms):
# get the old atom info
old_atom = self._atoms[new_to_cur[i]]
new_molecule._add_atom(**old_atom.to_dict())
# this is the first time we access the mapping; catch an index error here corresponding to mapping that starts
# from 0 or higher
except (KeyError, IndexError):
raise IndexError(
f"The mapping supplied is missing a relation corresponding to atom({i})"
)
# add the bonds but with atom indexes in a sorted ascending order
for bond in self._bonds:
atoms = sorted([cur_to_new[bond.atom1_index], cur_to_new[bond.atom2_index]])
bond_dict = bond.to_dict()
bond_dict["atom1"] = atoms[0]
bond_dict["atom2"] = atoms[1]
new_molecule._add_bond(**bond_dict)
# we can now resort the bonds
sorted_bonds = sorted(
new_molecule.bonds, key=operator.attrgetter("atom1_index", "atom2_index")
)
new_molecule._bonds = sorted_bonds
# remap the charges
if self.partial_charges is not None:
new_charges = np.zeros(self.n_atoms)
for i in range(self.n_atoms):
new_charges[i] = self.partial_charges[new_to_cur[i]].m_as(
unit.elementary_charge
)
new_molecule.partial_charges = new_charges * unit.elementary_charge
# remap the conformers there can be more than one
if self.conformers is not None:
for conformer in self.conformers:
new_conformer = np.zeros((self.n_atoms, 3))
for i in range(self.n_atoms):
new_conformer[i] = conformer[new_to_cur[i]].m_as(unit.angstrom)
new_molecule._add_conformer(new_conformer * unit.angstrom)
# move any properties across
new_molecule._properties = deepcopy(self._properties)
return new_molecule
def to_openeye(
self,
toolkit_registry=GLOBAL_TOOLKIT_REGISTRY,
aromaticity_model=DEFAULT_AROMATICITY_MODEL,
):
"""
Create an OpenEye molecule
Requires the OpenEye toolkit to be installed.
.. todo ::
* Use stored conformer positions instead of an argument.
* Should the aromaticity model be specified in some other way?
Parameters
----------
aromaticity_model : str, optional, default=DEFAULT_AROMATICITY_MODEL
The aromaticity model to use
Returns
-------
oemol : openeye.oechem.OEMol
An OpenEye molecule
Examples
--------
Create an OpenEye molecule from a Molecule
>>> molecule = Molecule.from_smiles('CC')
>>> oemol = molecule.to_openeye()
"""
# toolkit = OpenEyeToolkitWrapper()
if isinstance(toolkit_registry, ToolkitWrapper):
return toolkit_registry.to_openeye(
self, aromaticity_model=aromaticity_model
)
else:
return toolkit_registry.call(
"to_openeye", self, aromaticity_model=aromaticity_model
)
def _construct_angles(self):
"""
Get an iterator over all i-j-k angles.
"""
# TODO: Build Angle objects instead of tuple of atoms.
if not hasattr(self, "_angles"):
self._construct_bonded_atoms_list()
self._angles = set()
for atom1 in self._atoms:
for atom2 in self._bondedAtoms[atom1]:
for atom3 in self._bondedAtoms[atom2]:
if atom1 == atom3:
continue
# TODO: Encapsulate this logic into an Angle class.
if atom1.molecule_atom_index < atom3.molecule_atom_index:
self._angles.add((atom1, atom2, atom3))
else:
self._angles.add((atom3, atom2, atom1))
def _construct_torsions(self):
"""
Construct sets containing the atoms improper and proper torsions
"""
# TODO: Build Proper/ImproperTorsion objects instead of tuple of atoms.
if not hasattr(self, "_torsions"):
self._construct_bonded_atoms_list()
self._propers = set()
self._impropers = set()
for atom1 in self._atoms:
for atom2 in self._bondedAtoms[atom1]:
for atom3 in self._bondedAtoms[atom2]:
if atom1 == atom3:
continue
for atom4 in self._bondedAtoms[atom3]:
if atom4 == atom2:
continue
# Exclude i-j-k-i
if atom1 == atom4:
continue
if atom1.molecule_atom_index < atom4.molecule_atom_index:
torsion = (atom1, atom2, atom3, atom4)
else:
torsion = (atom4, atom3, atom2, atom1)
self._propers.add(torsion)
for atom3i in self._bondedAtoms[atom2]:
if atom3i == atom3:
continue
if atom3i == atom1:
continue
improper = (atom1, atom2, atom3, atom3i)
self._impropers.add(improper)
self._torsions = self._propers | self._impropers
def _construct_bonded_atoms_list(self):
"""
Construct list of all atoms each atom is bonded to.
"""
# TODO: Add this to cached_properties
if not hasattr(self, "_bondedAtoms"):
# self._atoms = [ atom for atom in self.atoms() ]
self._bondedAtoms = dict()
for atom in self._atoms:
self._bondedAtoms[atom] = set()
for bond in self._bonds:
atom1 = self.atoms[bond.atom1_index]
atom2 = self.atoms[bond.atom2_index]
self._bondedAtoms[atom1].add(atom2)
self._bondedAtoms[atom2].add(atom1)
def _is_bonded(self, atom_index_1, atom_index_2):
"""Return True if atoms are bonded, False if not.
Parameters
----------
atom_index_1 : int
atom_index_2 : int
Atom indices
Returns
-------
is_bonded : bool
True if atoms are bonded, False otherwise
"""
self._construct_bonded_atoms_list()
atom1 = self._atoms[atom_index_1]
atom2 = self._atoms[atom_index_2]
return atom2 in self._bondedAtoms[atom1]
def get_bond_between(self, i, j):
"""Returns the bond between two atoms
Parameters
----------
i, j : int or Atom
Atoms or atom indices to check
Returns
-------
bond : Bond
The bond between i and j.
"""
if isinstance(i, int) and isinstance(j, int):
atom_i = self._atoms[i]
atom_j = self._atoms[j]
elif isinstance(i, Atom) and isinstance(j, Atom):
atom_i = i
atom_j = j
else:
raise TypeError(
"Invalid input passed to get_bond_between(). Expected ints or Atoms, "
f"got {j} and {j}."
)
for bond in atom_i.bonds:
for atom in bond.atoms:
if atom == atom_i:
continue
if atom == atom_j:
return bond
from openff.toolkit.topology import NotBondedError
raise NotBondedError(f"No bond between atom {i} and {j}")
class Molecule(FrozenMolecule):
"""
Mutable chemical representation of a molecule, such as a small molecule or biopolymer.
.. todo :: What other API calls would be useful for supporting biopolymers as small molecules? Perhaps iterating
over chains and residues?
Examples
--------
Create a molecule from an sdf file
>>> from openff.toolkit.utils import get_data_file_path
>>> sdf_filepath = get_data_file_path('molecules/ethanol.sdf')
>>> molecule = Molecule(sdf_filepath)
Convert to OpenEye OEMol object
>>> oemol = molecule.to_openeye()
Create a molecule from an OpenEye molecule
>>> molecule = Molecule.from_openeye(oemol)
Convert to RDKit Mol object
>>> rdmol = molecule.to_rdkit()
Create a molecule from an RDKit molecule
>>> molecule = Molecule.from_rdkit(rdmol)
Create a molecule from IUPAC name (requires the OpenEye toolkit)
>>> molecule = Molecule.from_iupac('imatinib')
Create a molecule from SMILES
>>> molecule = Molecule.from_smiles('Cc1ccccc1')
.. warning :: This API is experimental and subject to change.
"""
def __init__(self, *args, **kwargs):
"""
Create a new Molecule object
Parameters
----------
other : optional, default=None
If specified, attempt to construct a copy of the molecule from the
specified object. This can be any one of the following:
* a :class:`Molecule` object
* a file that can be used to construct a :class:`Molecule` object
* an ``openeye.oechem.OEMol``
* an ``rdkit.Chem.rdchem.Mol``
* a serialized :class:`Molecule` object
Examples
--------
Create an empty molecule:
>>> empty_molecule = Molecule()
Create a molecule from a file that can be used to construct a molecule,
using either a filename or file-like object:
>>> from openff.toolkit.utils import get_data_file_path
>>> sdf_filepath = get_data_file_path('molecules/ethanol.sdf')
>>> molecule = Molecule(sdf_filepath)
>>> molecule = Molecule(open(sdf_filepath, 'r'), file_format='sdf')
>>> import gzip
>>> mol2_gz_filepath = get_data_file_path('molecules/toluene.mol2.gz')
>>> molecule = Molecule(gzip.GzipFile(mol2_gz_filepath, 'r'), file_format='mol2')
Create a molecule from another molecule:
>>> molecule_copy = Molecule(molecule)
Convert to OpenEye OEMol object
>>> oemol = molecule.to_openeye()
Create a molecule from an OpenEye molecule:
>>> molecule = Molecule(oemol)
Convert to RDKit Mol object
>>> rdmol = molecule.to_rdkit()
Create a molecule from an RDKit molecule:
>>> molecule = Molecule(rdmol)
Convert the molecule into a dictionary and back again:
>>> serialized_molecule = molecule.to_dict()
>>> molecule_copy = Molecule(serialized_molecule)
.. todo ::
* If a filename or file-like object is specified but the file
contains more than one molecule, what is the proper behavior?
Read just the first molecule, or raise an exception if more
than one molecule is found?
* Should we also support SMILES strings or IUPAC names for
``other``?
"""
super(Molecule, self).__init__(*args, **kwargs)
# TODO: Change this to add_atom(Atom) to improve encapsulation and extensibility?
def add_atom(
self,
atomic_number,
formal_charge,
is_aromatic,
stereochemistry=None,
name=None,
metadata=None,
):
"""
Add an atom to the molecule.
Parameters
----------
atomic_number : int
Atomic number of the atom
formal_charge : int
Formal charge of the atom
is_aromatic : bool
If ``True``, atom is aromatic; if ``False``, not aromatic
stereochemistry : str, optional, default=None
Either ``'R'`` or ``'S'`` for specified stereochemistry, or ``None`` if stereochemistry is irrelevant
name : str, optional
An optional name for the atom
metadata : dict[str: (int, str)], default=None
An optional dictionary where keys are strings and values are strings or ints. This is intended
to record atom-level information used to inform hierarchy definition and iteration, such as
grouping atom by residue and chain.
Returns
-------
index : int
The index of the atom in the molecule
Examples
--------
Define a methane molecule
>>> molecule = Molecule()
>>> molecule.name = 'methane'
>>> C = molecule.add_atom(6, 0, False)
>>> H1 = molecule.add_atom(1, 0, False)
>>> H2 = molecule.add_atom(1, 0, False)
>>> H3 = molecule.add_atom(1, 0, False)
>>> H4 = molecule.add_atom(1, 0, False)
>>> bond_idx = molecule.add_bond(C, H1, 1, False)
>>> bond_idx = molecule.add_bond(C, H2, 1, False)
>>> bond_idx = molecule.add_bond(C, H3, 1, False)
>>> bond_idx = molecule.add_bond(C, H4, 1, False)
>>> molecule.to_smiles(explicit_hydrogens=False)
'C'
"""
atom_index = self._add_atom(
atomic_number,
formal_charge,
is_aromatic,
stereochemistry=stereochemistry,
name=name,
metadata=metadata,
)
return atom_index
def add_bond(
self,
atom1,
atom2,
bond_order,
is_aromatic,
stereochemistry=None,
fractional_bond_order=None,
):
"""
Add a bond between two specified atom indices
Parameters
----------
atom1 : int or openff.toolkit.topology.molecule.Atom
Index of first atom
atom2 : int or openff.toolkit.topology.molecule.Atom
Index of second atom
bond_order : int
Integral bond order of Kekulized form
is_aromatic : bool
True if this bond is aromatic, False otherwise
stereochemistry : str, optional, default=None
Either ``'E'`` or ``'Z'`` for specified stereochemistry, or ``None`` if stereochemistry is irrelevant
fractional_bond_order : float, optional, default=None
The fractional (eg. Wiberg) bond order
Returns
-------
index: int
Index of the bond in this molecule
Examples
--------
For an example of use, see :py:meth:`add_atom`.
"""
bond_index = self._add_bond(
atom1,
atom2,
bond_order,
is_aromatic,
stereochemistry=stereochemistry,
fractional_bond_order=fractional_bond_order,
)
return bond_index
def add_conformer(self, coordinates):
"""
Add a conformation of the molecule
Parameters
----------
coordinates: unit-wrapped np.array with shape (n_atoms, 3) and dimension of distance
Coordinates of the new conformer, with the first dimension of the array corresponding to the atom index in
the molecule's indexing system.
Returns
-------
index: int
The index of this conformer
"""
# TODO how can be check that a set of coords and no connections
# is a conformation that does not change connectivity?
return self._add_conformer(coordinates)
def visualize(
self,
backend="rdkit",
width=None,
height=None,
show_all_hydrogens=True,
):
"""
Render a visualization of the molecule in Jupyter
Parameters
----------
backend : str, optional, default='rdkit'
The visualization engine to use. Choose from:
- ``"rdkit"``
- ``"openeye"``
- ``"nglview"`` (requires conformers)
width : int, optional, default=500
Width of the generated representation (only applicable to
``backend=openeye`` or ``backend=rdkit``)
height : int, optional, default=300
Width of the generated representation (only applicable to
``backend=openeye`` or ``backend=rdkit``)
show_all_hydrogens : bool, optional, default=True
Whether to explicitly depict all hydrogen atoms. (only applicable to
``backend=openeye`` or ``backend=rdkit``)
Returns
-------
object
Depending on the backend chosen:
- rdkit → IPython.display.SVG
- openeye → IPython.display.Image
- nglview → nglview.NGLWidget
"""
from openff.toolkit.utils.toolkits import OPENEYE_AVAILABLE, RDKIT_AVAILABLE
backend = backend.lower()
if backend == "nglview":
try:
import nglview as nv
except ImportError:
raise MissingOptionalDependencyError("nglview")
if width is not None or height is not None:
# TODO: More specific exception
raise ValueError(
"The width, height, and show_all_hydrogens arguments do not apply to the nglview backend."
)
elif not show_all_hydrogens:
# TODO: More specific exception
# TODO: Implement this? Should be able to just strip hydrogens from the PDB
raise ValueError(
"show_all_hydrogens=False is not supported by the nglview backend"
)
if self.conformers:
from openff.toolkit.utils.viz import _OFFTrajectoryNGLView
trajectory_like = _OFFTrajectoryNGLView(self)
widget = nv.NGLWidget(trajectory_like)
return widget
else:
# TODO: More specific exception
raise ValueError(
"Visualizing with NGLview requires that the molecule has "
"conformers."
)
width = 500 if width is None else width
height = 300 if height is None else height
show_all_hydrogens = True if show_all_hydrogens is None else show_all_hydrogens
if backend == "rdkit":
if RDKIT_AVAILABLE:
from IPython.display import SVG
from rdkit.Chem.Draw import ( # type: ignore[import]
rdDepictor,
rdMolDraw2D,
)
from rdkit.Chem.rdmolops import RemoveHs # type: ignore[import]
rdmol = self.to_rdkit()
if not show_all_hydrogens:
# updateExplicitCount: Keep a record of the hydrogens we remove.
# This is used in visualization to distinguish eg radicals from normal species
rdmol = RemoveHs(rdmol, updateExplicitCount=True)
rdDepictor.SetPreferCoordGen(True)
rdDepictor.Compute2DCoords(rdmol)
rdmol = rdMolDraw2D.PrepareMolForDrawing(rdmol)
drawer = rdMolDraw2D.MolDraw2DSVG(width, height)
drawer.DrawMolecule(rdmol)
drawer.FinishDrawing()
return SVG(drawer.GetDrawingText())
else:
warnings.warn(
"RDKit was requested as a visualization backend but "
"it was not found to be installed. Falling back to "
"trying to use OpenEye for visualization."
)
backend = "openeye"
if backend == "openeye":
if OPENEYE_AVAILABLE:
from IPython.display import Image
from openeye import oedepict
oemol = self.to_openeye()
opts = oedepict.OE2DMolDisplayOptions(
width, height, oedepict.OEScale_AutoScale
)
if show_all_hydrogens:
opts.SetHydrogenStyle(oedepict.OEHydrogenStyle_ImplicitAll)
oedepict.OEPrepareDepiction(oemol)
img = oedepict.OEImage(width, height)
display = oedepict.OE2DMolDisplay(oemol, opts)
oedepict.OERenderMolecule(img, display)
png = oedepict.OEWriteImageToString("png", img)
return Image(png)
# TODO: More specific exception
raise ValueError("Could not find an appropriate backend")
def perceive_residues(self, substructure_file_path=None, strict_chirality=True):
"""
Perceive a polymer's residues and permit iterating over them.
Perceives residues by matching substructures in the current molecule
with a substructure dictionary file, using SMARTS, and assigns residue
names and numbers to atom metadata. It then constructs a residue hierarchy
scheme to allow iterating over residues.
Parameters
----------
substructure_file_path : str, optional, default=None
Path to substructure library file in JSON format. Defaults to using
built-in substructure file.
strict_chirality: bool, optional, default=True
Whether to use strict chirality symbols (stereomarks) for
substructure matchings with SMARTS.
"""
# Read substructure dictionary file
if not substructure_file_path:
substructure_file_path = get_data_file_path(
"proteins/aa_residues_substructures_with_caps.json"
)
with open(substructure_file_path, "r") as subfile:
substructure_dictionary = json.load(subfile)
# TODO: Think of a better way to deal with no strict chirality case
# if ignoring strict chirality, remove/update keys in inner dictionary
if not strict_chirality:
# make a copy of substructure dict
substructure_dictionary_no_chirality = deepcopy(substructure_dictionary)
# Update inner key (SMARTS) maintaining its value
for res_name, inner_dict in substructure_dictionary.items():
for smarts, atom_types in inner_dict.items():
smarts_no_chirality = smarts.replace("@", "") # remove @ in smarts
substructure_dictionary_no_chirality[res_name][
smarts_no_chirality
] = substructure_dictionary_no_chirality[res_name].pop(
smarts
) # update key
# replace with the new substructure dictionary
substructure_dictionary = substructure_dictionary_no_chirality
all_matches = list()
for residue_name, smarts_dict in substructure_dictionary.items():
matches = dict()
for smarts in smarts_dict:
for match in self.chemical_environment_matches(smarts):
matches[match] = smarts
all_matches.append(
{
"atom_idxs": match,
"atom_idxs_set": set(match),
"smarts": smarts,
"residue_name": residue_name,
"atom_names": smarts_dict[smarts],
}
)
# Remove matches that are subsets of other matches
# give precedence to the SMARTS defined at the end of the file
match_idxs_to_delete = set()
for match_idx in range(len(all_matches) - 1, 0, -1):
this_match_set = all_matches[match_idx]["atom_idxs_set"]
this_match_set_size = len(this_match_set)
for match_before_this_idx in range(match_idx):
match_before_this_set = all_matches[match_before_this_idx][
"atom_idxs_set"
]
match_before_this_set_size = len(match_before_this_set)
n_overlapping_atoms = len(
this_match_set.intersection(match_before_this_set)
)
if n_overlapping_atoms > 0:
if match_before_this_set_size < this_match_set_size:
match_idxs_to_delete.add(match_before_this_idx)
else:
match_idxs_to_delete.add(match_idx)
match_idxs_to_delete_list = sorted(list(match_idxs_to_delete), reverse=True)
for match_idx in match_idxs_to_delete_list:
all_matches.pop(match_idx)
all_matches.sort(key=lambda x: min(x["atom_idxs"]))
# Now the matches have been deduplicated and de-subsetted
for residue_num, match_dict in enumerate(all_matches):
for smarts_idx, atom_idx in enumerate(match_dict["atom_idxs"]):
self.atoms[atom_idx].metadata["residue_name"] = match_dict[
"residue_name"
]
self.atoms[atom_idx].metadata["residue_number"] = str(residue_num + 1)
self.atoms[atom_idx].metadata["insertion_code"] = " "
self.atoms[atom_idx].metadata["atom_name"] = match_dict["atom_names"][
smarts_idx
]
# Now add the residue hierarchy scheme
self._add_residue_hierarchy_scheme()
def _ipython_display_(self): # pragma: no cover
from IPython.display import display
try:
return display(self.visualize(backend="nglview"))
except (ImportError, ValueError):
pass
try:
return display(self.visualize(backend="rdkit"))
except ValueError:
pass
try:
return display(self.visualize(backend="openeye"))
except ValueError:
pass
def _networkx_graph_to_hill_formula(graph: "nx.Graph") -> str:
"""
Convert a NetworkX graph to a Hill formula.
Parameters
----------
graph : nx.Graph
The graph to convert.
Returns
-------
str
The Hill formula corresponding to the graph.
"""
import networkx as nx
if not isinstance(graph, nx.Graph):
raise Exception("The graph must be a NetworkX graph.")
atom_nums = list(dict(graph.nodes(data="atomic_number", default=1)).values())
return _atom_nums_to_hill_formula(atom_nums)
def _atom_nums_to_hill_formula(atom_nums: List[int]) -> str:
"""
Given a `Counter` object of atom counts by atomic number, generate the corresponding
Hill formula. See https://en.wikipedia.org/wiki/Chemical_formula#Hill_system"""
from collections import Counter
SYMBOLS_ = deepcopy(SYMBOLS)
SYMBOLS_[0] = "X"
atom_symbol_counts = Counter(SYMBOLS_[atom_num] for atom_num in atom_nums)
formula = []
# Check for C and H first, to make a correct hill formula
for el in ["C", "H"]:
if el in atom_symbol_counts:
count = atom_symbol_counts.pop(el)
formula.append(el)
if count > 1:
formula.append(str(count))
# now get the rest of the elements in alphabetical ordering
for el in sorted(atom_symbol_counts.keys()):
count = atom_symbol_counts.pop(el)
formula.append(el)
if count > 1:
formula.append(str(count))
return "".join(formula)
def _nth_degree_neighbors_from_graphlike(
graphlike: Union[Molecule, "_SimpleMolecule"], n_degrees: int
) -> Generator[
Union[Tuple[Atom, Atom], Tuple["_SimpleAtom", "_SimpleAtom"]], None, None
]:
"""
Given a graph-like object, return a tuple of the nth degree neighbors of each atom.
The input `graphlike` object must provide a .to_networkx() method and an
`atoms` property that can be indexed.
See Molecule.nth_degree_neighbors for more details.
Parameters
----------
graphlike : Union[Molecule, _SimpleMolecule]
The graph-like object to get the neighbors of.
n: int
The number of bonds separating atoms in each pair
Returns
-------
neighbors: iterator of tuple of Atom
Tuples (len 2) of atom that are separated by ``n`` bonds.
"""
graph = graphlike.to_networkx()
for node_i in graph.nodes:
for node_j in graph.nodes:
if node_i == node_j:
continue
path_length = nx.shortest_path_length(graph, node_i, node_j)
if path_length == n_degrees:
if node_i > node_j:
continue
yield (graphlike.atoms[node_i], graphlike.atoms[node_j])
class HierarchyScheme:
"""
Perceives hierarchy elements from the metadata of atoms in a ``Molecule``.
The Open Force Field Toolkit has no native understanding of hierarchical
atom organisation schemes common to other biomolecular software, such as
"residues" or "chains" (see :ref:`userguide_hierarchy`). To facilitate
iterating over groups of atoms, a ``HierarchyScheme`` can be used to collect
atoms into ``HierarchyElements``, groups of atoms that share the same
values for certain metadata elements. Metadata elements are stored in the
``Atom.properties`` attribute.
Hierarchy schemes are not updated dynamically; if a ``Molecule`` with
hierarchy schemes changes, :meth:`Molecule.update_hierarchy_schemes()` must
be called before the scheme is iterated over again or else the grouping
may be incorrect.
A ``HierarchyScheme`` contains the information needed to perceive
``HierarchyElement`` objects from a ``Molecule`` containing atoms with
metadata.
See also
--------
Molecule.add_default_hierarchy_schemes, Molecule.add_hierarchy_scheme,
Molecule.hierarchy_schemes, Molecule.delete_hierarchy_scheme,
Molecule.update_hierarchy_schemes, Molecule.perceive_residues,
Topology.hierarchy_iterator, HierarchyElement
"""
def __init__(
self,
parent: FrozenMolecule,
uniqueness_criteria: Union[Tuple[str], List[str]],
iterator_name: str,
):
"""
Create a new hierarchy scheme for iterating over groups of atoms.
Parameters
----------
parent
The ``Molecule`` to which this scheme belongs.
uniqueness_criteria
The names of ``Atom`` metadata entries that define this scheme. An
atom belongs to a ``HierarchyElement`` only if its metadata has the
same values for these criteria as the other atoms in the
``HierarchyElement``.
iterator_name
The name of the iterator that will be exposed to access the hierarchy
elements generated by this scheme
"""
if (type(uniqueness_criteria) is not list) and (
type(uniqueness_criteria) is not tuple
):
raise TypeError(
f"'uniqueness_criteria' kwarg must be a list or a tuple of strings,"
f" received {repr(uniqueness_criteria)} "
f"(type {type(uniqueness_criteria)}) instead."
)
for criterion in uniqueness_criteria:
if type(criterion) is not str:
raise TypeError(
f"Each item in the 'uniqueness_criteria' kwarg must be a string,"
f" received {repr(criterion)} "
f"(type {type(criterion)}) instead."
)
if type(iterator_name) is not str:
raise TypeError(
f"'iterator_name' kwarg must be a string, received {repr(iterator_name)} "
f"(type {type(iterator_name)}) instead."
)
self.parent = parent
self.uniqueness_criteria = uniqueness_criteria
self.iterator_name = iterator_name
self.hierarchy_elements: List[HierarchyElement] = list()
def to_dict(self):
"""
Serialize this object to a basic dict of strings, ints, and floats
"""
return_dict = dict()
return_dict["uniqueness_criteria"] = self.uniqueness_criteria
return_dict["iterator_name"] = self.iterator_name
return_dict["hierarchy_elements"] = [
e.to_dict() for e in self.hierarchy_elements
]
return return_dict
def perceive_hierarchy(self):
"""
Prepare the parent ``Molecule`` for iteration according to this scheme.
Groups the atoms of the parent of this ``HierarchyScheme`` according to
their metadata, and creates ``HierarchyElement`` objects suitable for
iteration over the parent. Atoms missing the metadata fields in
this object's ``uniqueness_criteria`` tuple will have those spots
populated with the string ``'None'``.
This method overwrites the scheme's ``hierarchy_elements`` attribute in
place. Each ``HierarchyElement`` in the scheme's `hierarchy_elements`
attribute is `static` --- that is, it is updated only when
`perceive_hierarchy()` is called, and `not` on-the-fly when atom
metadata is modified.
"""
from collections import defaultdict
self.hierarchy_elements = list()
# Determine which atoms should get added to which HierarchyElements
hier_eles_to_add = defaultdict(list)
for atom in self.parent.atoms:
atom_key = list()
for field_key in self.uniqueness_criteria:
if field_key in atom.metadata:
atom_key.append(atom.metadata[field_key])
else:
atom_key.append("None")
hier_eles_to_add[tuple(atom_key)].append(atom)
# Create the actual HierarchyElements
for atom_key, atoms_to_add in hier_eles_to_add.items():
atom_indices = [p.molecule_atom_index for p in atoms_to_add]
self.add_hierarchy_element(atom_key, atom_indices)
self.sort_hierarchy_elements()
def add_hierarchy_element(self, identifier, atom_indices):
"""
Instantiate a new HierarchyElement belonging to this HierarchyScheme.
This is the main way to instantiate new HierarchyElements.
Parameters
----------
identifier : tuple of str and int
Tuple of metadata values (not keys) that define the uniqueness
criteria for this element
atom_indices : iterable int
The indices of atoms in ``scheme.parent`` that are in this
element
"""
new_hier_ele = HierarchyElement(self, identifier, atom_indices)
self.hierarchy_elements.append(new_hier_ele)
return new_hier_ele
def sort_hierarchy_elements(self):
"""
Semantically sort the HierarchyElements belonging to this object, according to
their identifiers.
"""
# hard-code the sort_func value here, since it's hard to serialize safely
def sort_func(x):
return version.parse(".".join([str(i) for i in x.identifier]))
self.hierarchy_elements.sort(key=sort_func)
def __str__(self):
return (
f"HierarchyScheme with uniqueness_criteria '{self.uniqueness_criteria}', iterator_name "
f"'{self.iterator_name}', and {len(self.hierarchy_elements)} elements"
)
def __repr__(self):
return self.__str__()
class HierarchyElement:
"""An element in a metadata hierarchy scheme, such as a residue or chain."""
def __init__(self, scheme, identifier, atom_indices):
"""
Create a new hierarchy element.
Parameters
----------
scheme : HierarchyScheme
The scheme to which this ``HierarchyElement`` belongs
id : tuple of str and int
Tuple of metadata values (not keys) that define the uniqueness
criteria for this element
atom_indices : iterable int
The indices of particles in ``scheme.parent`` that are in this
element
"""
self.scheme = scheme
self.identifier = identifier
self.atom_indices = deepcopy(atom_indices)
for id_component, uniqueness_component in zip(
identifier, scheme.uniqueness_criteria
):
setattr(self, uniqueness_component, id_component)
def to_dict(self):
"""
Serialize this object to a basic dict of strings, ints, and floats.
"""
return_dict = dict()
return_dict["identifier"] = self.identifier
return_dict["atom_indices"] = self.atom_indices
return return_dict
@property
def n_atoms(self):
"""
The number of atoms in this hierarchy element.
"""
return len(self.atom_indices)
@property
def atoms(self):
"""
Iterator over the atoms in this hierarchy element.
"""
for atom_index in self.atom_indices:
yield self.parent.atoms[atom_index]
def atom(self, index: int) -> Atom:
"""
Get the atom with the specified index.
"""
return self.parent.atoms[self.atom_indices[index]]
@property
def parent(self) -> FrozenMolecule:
"""
The parent molecule for this hierarchy element
"""
return self.scheme.parent
def __str__(self):
return (
f"HierarchyElement {self.identifier} of iterator '{self.scheme.iterator_name}' containing "
f"{len(self.atom_indices)} atom(s)"
)
def __repr__(self):
return self.__str__()
@property
def has_unique_atom_names(self) -> bool:
"""``True`` if the element has unique atom names, ``False`` otherwise."""
return _has_unique_atom_names(self)
def generate_unique_atom_names(self):
"""
Generate unique atom names from the element symbol and count.
Names are generated from the elemental symbol and the number of times
that element is found in the hierarchy element. The character 'x' is
appended to these generated names to reduce the odds that they clash
with an atom name or type imported from another source. For example,
generated atom names might begin 'C1x', 'H1x', 'O1x', 'C2x', etc.
"""
return _generate_unique_atom_names(self)
def _has_unique_atom_names(obj: Union[FrozenMolecule, HierarchyElement]) -> bool:
"""``True`` if the object has unique atom names, ``False`` otherwise."""
unique_atom_names = set([atom.name for atom in obj.atoms])
if len(unique_atom_names) < obj.n_atoms:
return False
return True
def _generate_unique_atom_names(obj: Union[FrozenMolecule, HierarchyElement]):
"""
Generate unique atom names from the element symbol and count.
Names are generated from the elemental symbol and the number of times that
element is found in the hierarchy element or molecule. The character 'x' is
appended to these generated names to reduce the odds that they clash with
an atom name or type imported from another source. For example, generated
atom names might begin 'C1x', 'H1x', 'O1x', 'C2x', etc.
"""
from collections import defaultdict
element_counts: DefaultDict[str, int] = defaultdict(int)
for atom in obj.atoms:
symbol = atom.symbol
element_counts[symbol] += 1
# TODO: It may be worth exposing this as a user option, i.e. to avoid multiple ligands
# parameterized with OpenFF clashing because they have atom names like O1x, H3x, etc.
# i.e. an optional argument could enable a user to `generate_unique_atom_names(blah="y")
# to have one ligand be O1y, etc.
# https://github.com/openforcefield/openff-toolkit/pull/1096#pullrequestreview-767227391
atom.name = symbol + str(element_counts[symbol]) + "x"
| mit | 25d1c3c23fbc06801f6f9e2574e92859 | 36.410754 | 158 | 0.595138 | 4.317196 | false | false | false | false |
openforcefield/openff-toolkit | openff/toolkit/utils/viz.py | 1 | 1579 | import uuid
from io import StringIO
from typing import TYPE_CHECKING, Dict
from openff.units import unit
try:
from nglview import Trajectory as _NGLViewTrajectory
except ImportError: # pragma: no cover
_NGLViewTrajectory = object
if TYPE_CHECKING:
from openff.toolkit.topology.molecule import Molecule
class _OFFTrajectoryNGLView(_NGLViewTrajectory):
"""
Handling conformers of an OpenFF Molecule as frames in a trajectory. Only
to be used for NGLview visualization.
Parameters
----------
molecule : openff.toolkit.topology.Molecule
The molecule (with conformers) to visualize
"""
def __init__(self, molecule: "Molecule"):
self.molecule = molecule
self.ext = "pdb"
self.params: Dict = {}
self.id = str(uuid.uuid4())
def get_coordinates(self, index: int = 0):
if index > self.molecule.n_conformers - 1:
raise IndexError(
f"Molecule only has {self.molecule.n_conformers} conformers, index "
f"{index} is too high. Try a smaller index"
)
return self.molecule.conformers[index].m_as(unit.angstrom)
@property
def n_frames(self) -> int:
return len(self.molecule.conformers)
def get_structure_string(self):
memfile = StringIO()
self.molecule.to_file(memfile, "pdb")
memfile.seek(0)
block = memfile.getvalue()
# FIXME: Prevent multi-model PDB export with a keyword in molecule.to_file()?
models = block.split("END\n", 1)
return models[0]
| mit | e7ac6b3412521246ac4b3fa30ed4f25b | 28.240741 | 85 | 0.642179 | 3.823245 | false | false | false | false |
evansd/whitenoise | src/whitenoise/media_types.py | 1 | 5195 | from __future__ import annotations
import os
class MediaTypes:
__slots__ = ("types_map",)
def __init__(self, *, extra_types: dict[str, str] | None = None) -> None:
self.types_map = default_types()
if extra_types is not None:
self.types_map.update(extra_types)
def get_type(self, path: str) -> str:
name = os.path.basename(path).lower()
media_type = self.types_map.get(name)
if media_type is not None:
return media_type
extension = os.path.splitext(name)[1]
return self.types_map.get(extension, "application/octet-stream")
def default_types() -> dict[str, str]:
"""
We use our own set of default media types rather than the system-supplied
ones. This ensures consistent media type behaviour across varied
environments. The defaults are based on those shipped with nginx, with
some custom additions.
(Auto-generated by scripts/generate_default_media_types.py)
"""
return {
".3gp": "video/3gpp",
".3gpp": "video/3gpp",
".7z": "application/x-7z-compressed",
".ai": "application/postscript",
".asf": "video/x-ms-asf",
".asx": "video/x-ms-asf",
".atom": "application/atom+xml",
".avi": "video/x-msvideo",
".avif": "image/avif",
".bmp": "image/x-ms-bmp",
".cco": "application/x-cocoa",
".crt": "application/x-x509-ca-cert",
".css": "text/css",
".der": "application/x-x509-ca-cert",
".doc": "application/msword",
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
".ear": "application/java-archive",
".eot": "application/vnd.ms-fontobject",
".eps": "application/postscript",
".flv": "video/x-flv",
".gif": "image/gif",
".hqx": "application/mac-binhex40",
".htc": "text/x-component",
".htm": "text/html",
".html": "text/html",
".ico": "image/x-icon",
".jad": "text/vnd.sun.j2me.app-descriptor",
".jar": "application/java-archive",
".jardiff": "application/x-java-archive-diff",
".jng": "image/x-jng",
".jnlp": "application/x-java-jnlp-file",
".jpeg": "image/jpeg",
".jpg": "image/jpeg",
".js": "text/javascript",
".json": "application/json",
".kar": "audio/midi",
".kml": "application/vnd.google-earth.kml+xml",
".kmz": "application/vnd.google-earth.kmz",
".m3u8": "application/vnd.apple.mpegurl",
".m4a": "audio/x-m4a",
".m4v": "video/x-m4v",
".md": "text/markdown",
".mid": "audio/midi",
".midi": "audio/midi",
".mjs": "text/javascript",
".mml": "text/mathml",
".mng": "video/x-mng",
".mov": "video/quicktime",
".mp3": "audio/mpeg",
".mp4": "video/mp4",
".mpeg": "video/mpeg",
".mpg": "video/mpeg",
".odg": "application/vnd.oasis.opendocument.graphics",
".odp": "application/vnd.oasis.opendocument.presentation",
".ods": "application/vnd.oasis.opendocument.spreadsheet",
".odt": "application/vnd.oasis.opendocument.text",
".ogg": "audio/ogg",
".pdb": "application/x-pilot",
".pdf": "application/pdf",
".pem": "application/x-x509-ca-cert",
".pl": "application/x-perl",
".pm": "application/x-perl",
".png": "image/png",
".ppt": "application/vnd.ms-powerpoint",
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
".prc": "application/x-pilot",
".ps": "application/postscript",
".ra": "audio/x-realaudio",
".rar": "application/x-rar-compressed",
".rpm": "application/x-redhat-package-manager",
".rss": "application/rss+xml",
".rtf": "application/rtf",
".run": "application/x-makeself",
".sea": "application/x-sea",
".shtml": "text/html",
".sit": "application/x-stuffit",
".svg": "image/svg+xml",
".svgz": "image/svg+xml",
".swf": "application/x-shockwave-flash",
".tcl": "application/x-tcl",
".tif": "image/tiff",
".tiff": "image/tiff",
".tk": "application/x-tcl",
".ts": "video/mp2t",
".txt": "text/plain",
".war": "application/java-archive",
".wasm": "application/wasm",
".wbmp": "image/vnd.wap.wbmp",
".webm": "video/webm",
".webp": "image/webp",
".wml": "text/vnd.wap.wml",
".wmlc": "application/vnd.wap.wmlc",
".wmv": "video/x-ms-wmv",
".woff": "application/font-woff",
".woff2": "font/woff2",
".xhtml": "application/xhtml+xml",
".xls": "application/vnd.ms-excel",
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
".xml": "text/xml",
".xpi": "application/x-xpinstall",
".xspf": "application/xspf+xml",
".zip": "application/zip",
"apple-app-site-association": "application/pkc7-mime",
"crossdomain.xml": "text/x-cross-domain-policy",
}
| mit | 7be74d997bac42f714c0c0a5754b1e93 | 36.919708 | 93 | 0.546679 | 3.259097 | false | false | false | false |
pypa/pip | src/pip/_internal/models/wheel.py | 4 | 3600 | """Represents a wheel file and provides access to the various parts of the
name that have meaning.
"""
import re
from typing import Dict, Iterable, List
from pip._vendor.packaging.tags import Tag
from pip._internal.exceptions import InvalidWheelFilename
class Wheel:
"""A wheel file"""
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>[^\s-]+?)-(?P<ver>[^\s-]*?))
((-(?P<build>\d[^-]*?))?-(?P<pyver>[^\s-]+?)-(?P<abi>[^\s-]+?)-(?P<plat>[^\s-]+?)
\.whl|\.dist-info)$""",
re.VERBOSE,
)
def __init__(self, filename: str) -> None:
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(f"{filename} is not a valid wheel filename.")
self.filename = filename
self.name = wheel_info.group("name").replace("_", "-")
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group("ver").replace("_", "-")
self.build_tag = wheel_info.group("build")
self.pyversions = wheel_info.group("pyver").split(".")
self.abis = wheel_info.group("abi").split(".")
self.plats = wheel_info.group("plat").split(".")
# All the tag combinations from this file
self.file_tags = {
Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats
}
def get_formatted_file_tags(self) -> List[str]:
"""Return the wheel's tags as a sorted list of strings."""
return sorted(str(tag) for tag in self.file_tags)
def support_index_min(self, tags: List[Tag]) -> int:
"""Return the lowest index that one of the wheel's file_tag combinations
achieves in the given list of supported tags.
For example, if there are 8 supported tags and one of the file tags
is first in the list, then return 0.
:param tags: the PEP 425 tags to check the wheel against, in order
with most preferred first.
:raises ValueError: If none of the wheel's file tags match one of
the supported tags.
"""
try:
return next(i for i, t in enumerate(tags) if t in self.file_tags)
except StopIteration:
raise ValueError()
def find_most_preferred_tag(
self, tags: List[Tag], tag_to_priority: Dict[Tag, int]
) -> int:
"""Return the priority of the most preferred tag that one of the wheel's file
tag combinations achieves in the given list of supported tags using the given
tag_to_priority mapping, where lower priorities are more-preferred.
This is used in place of support_index_min in some cases in order to avoid
an expensive linear scan of a large list of tags.
:param tags: the PEP 425 tags to check the wheel against.
:param tag_to_priority: a mapping from tag to priority of that tag, where
lower is more preferred.
:raises ValueError: If none of the wheel's file tags match one of
the supported tags.
"""
return min(
tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority
)
def supported(self, tags: Iterable[Tag]) -> bool:
"""Return whether the wheel is compatible with one of the given tags.
:param tags: the PEP 425 tags to check the wheel against.
"""
return not self.file_tags.isdisjoint(tags)
| mit | 234541de53061bd3b28b46962d870bed | 38.130435 | 89 | 0.611667 | 3.947368 | false | false | false | false |
pypa/pip | src/pip/_vendor/packaging/utils.py | 37 | 4200 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import re
from typing import FrozenSet, NewType, Tuple, Union, cast
from .tags import Tag, parse_tag
from .version import InvalidVersion, Version
BuildTag = Union[Tuple[()], Tuple[int, str]]
NormalizedName = NewType("NormalizedName", str)
class InvalidWheelFilename(ValueError):
"""
An invalid wheel filename was found, users should refer to PEP 427.
"""
class InvalidSdistFilename(ValueError):
"""
An invalid sdist filename was found, users should refer to the packaging user guide.
"""
_canonicalize_regex = re.compile(r"[-_.]+")
# PEP 427: The build number must start with a digit.
_build_tag_regex = re.compile(r"(\d+)(.*)")
def canonicalize_name(name: str) -> NormalizedName:
# This is taken from PEP 503.
value = _canonicalize_regex.sub("-", name).lower()
return cast(NormalizedName, value)
def canonicalize_version(version: Union[Version, str]) -> str:
"""
This is very similar to Version.__str__, but has one subtle difference
with the way it handles the release segment.
"""
if isinstance(version, str):
try:
parsed = Version(version)
except InvalidVersion:
# Legacy versions cannot be normalized
return version
else:
parsed = version
parts = []
# Epoch
if parsed.epoch != 0:
parts.append(f"{parsed.epoch}!")
# Release segment
# NB: This strips trailing '.0's to normalize
parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
# Pre-release
if parsed.pre is not None:
parts.append("".join(str(x) for x in parsed.pre))
# Post-release
if parsed.post is not None:
parts.append(f".post{parsed.post}")
# Development release
if parsed.dev is not None:
parts.append(f".dev{parsed.dev}")
# Local version segment
if parsed.local is not None:
parts.append(f"+{parsed.local}")
return "".join(parts)
def parse_wheel_filename(
filename: str,
) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
if not filename.endswith(".whl"):
raise InvalidWheelFilename(
f"Invalid wheel filename (extension must be '.whl'): {filename}"
)
filename = filename[:-4]
dashes = filename.count("-")
if dashes not in (4, 5):
raise InvalidWheelFilename(
f"Invalid wheel filename (wrong number of parts): {filename}"
)
parts = filename.split("-", dashes - 2)
name_part = parts[0]
# See PEP 427 for the rules on escaping the project name
if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
raise InvalidWheelFilename(f"Invalid project name: {filename}")
name = canonicalize_name(name_part)
version = Version(parts[1])
if dashes == 5:
build_part = parts[2]
build_match = _build_tag_regex.match(build_part)
if build_match is None:
raise InvalidWheelFilename(
f"Invalid build number: {build_part} in '{filename}'"
)
build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
else:
build = ()
tags = parse_tag(parts[-1])
return (name, version, build, tags)
def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
if filename.endswith(".tar.gz"):
file_stem = filename[: -len(".tar.gz")]
elif filename.endswith(".zip"):
file_stem = filename[: -len(".zip")]
else:
raise InvalidSdistFilename(
f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
f" {filename}"
)
# We are requiring a PEP 440 version, which cannot contain dashes,
# so we split on the last dash.
name_part, sep, version_part = file_stem.rpartition("-")
if not sep:
raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
name = canonicalize_name(name_part)
version = Version(version_part)
return (name, version)
| mit | 023f906dc28eda2631ea4250197d4e92 | 29.882353 | 88 | 0.630714 | 3.839122 | false | false | false | false |
pypa/pip | src/pip/_internal/commands/hash.py | 9 | 1703 | import hashlib
import logging
import sys
from optparse import Values
from typing import List
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES
from pip._internal.utils.misc import read_chunks, write_output
logger = logging.getLogger(__name__)
class HashCommand(Command):
"""
Compute a hash of a local package archive.
These can be used with --hash in a requirements file to do repeatable
installs.
"""
usage = "%prog [options] <file> ..."
ignore_require_venv = True
def add_options(self) -> None:
self.cmd_opts.add_option(
"-a",
"--algorithm",
dest="algorithm",
choices=STRONG_HASHES,
action="store",
default=FAVORITE_HASH,
help="The hash algorithm to use: one of {}".format(
", ".join(STRONG_HASHES)
),
)
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options: Values, args: List[str]) -> int:
if not args:
self.parser.print_usage(sys.stderr)
return ERROR
algorithm = options.algorithm
for path in args:
write_output(
"%s:\n--hash=%s:%s", path, algorithm, _hash_of_file(path, algorithm)
)
return SUCCESS
def _hash_of_file(path: str, algorithm: str) -> str:
"""Return the hash digest of a file."""
with open(path, "rb") as archive:
hash = hashlib.new(algorithm)
for chunk in read_chunks(archive):
hash.update(chunk)
return hash.hexdigest()
| mit | a3e9aea0d3dc8b8bed57d4892cef9b2e | 27.864407 | 84 | 0.602466 | 3.969697 | false | false | false | false |
pypa/pip | src/pip/_internal/utils/filesystem.py | 4 | 5122 | import fnmatch
import os
import os.path
import random
import sys
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
from typing import Any, BinaryIO, Generator, List, Union, cast
from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed
from pip._internal.utils.compat import get_path_uid
from pip._internal.utils.misc import format_size
def check_path_owner(path: str) -> bool:
# If we don't have a way to check the effective uid of this process, then
# we'll just assume that we own the directory.
if sys.platform == "win32" or not hasattr(os, "geteuid"):
return True
assert os.path.isabs(path)
previous = None
while path != previous:
if os.path.lexists(path):
# Check if path is writable by current user.
if os.geteuid() == 0:
# Special handling for root user in order to handle properly
# cases where users use sudo without -H flag.
try:
path_uid = get_path_uid(path)
except OSError:
return False
return path_uid == 0
else:
return os.access(path, os.W_OK)
else:
previous, path = path, os.path.dirname(path)
return False # assume we don't own the path
@contextmanager
def adjacent_tmp_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]:
"""Return a file-like object pointing to a tmp file next to path.
The file is created securely and is ensured to be written to disk
after the context reaches its end.
kwargs will be passed to tempfile.NamedTemporaryFile to control
the way the temporary file will be opened.
"""
with NamedTemporaryFile(
delete=False,
dir=os.path.dirname(path),
prefix=os.path.basename(path),
suffix=".tmp",
**kwargs,
) as f:
result = cast(BinaryIO, f)
try:
yield result
finally:
result.flush()
os.fsync(result.fileno())
# Tenacity raises RetryError by default, explicitly raise the original exception
_replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25))
replace = _replace_retry(os.replace)
# test_writable_dir and _test_writable_dir_win are copied from Flit,
# with the author's agreement to also place them under pip's license.
def test_writable_dir(path: str) -> bool:
"""Check if a directory is writable.
Uses os.access() on POSIX, tries creating files on Windows.
"""
# If the directory doesn't exist, find the closest parent that does.
while not os.path.isdir(path):
parent = os.path.dirname(path)
if parent == path:
break # Should never get here, but infinite loops are bad
path = parent
if os.name == "posix":
return os.access(path, os.W_OK)
return _test_writable_dir_win(path)
def _test_writable_dir_win(path: str) -> bool:
# os.access doesn't work on Windows: http://bugs.python.org/issue2528
# and we can't use tempfile: http://bugs.python.org/issue22107
basename = "accesstest_deleteme_fishfingers_custard_"
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789"
for _ in range(10):
name = basename + "".join(random.choice(alphabet) for _ in range(6))
file = os.path.join(path, name)
try:
fd = os.open(file, os.O_RDWR | os.O_CREAT | os.O_EXCL)
except FileExistsError:
pass
except PermissionError:
# This could be because there's a directory with the same name.
# But it's highly unlikely there's a directory called that,
# so we'll assume it's because the parent dir is not writable.
# This could as well be because the parent dir is not readable,
# due to non-privileged user access.
return False
else:
os.close(fd)
os.unlink(file)
return True
# This should never be reached
raise OSError("Unexpected condition testing for writable directory")
def find_files(path: str, pattern: str) -> List[str]:
"""Returns a list of absolute paths of files beneath path, recursively,
with filenames which match the UNIX-style shell glob pattern."""
result: List[str] = []
for root, _, files in os.walk(path):
matches = fnmatch.filter(files, pattern)
result.extend(os.path.join(root, f) for f in matches)
return result
def file_size(path: str) -> Union[int, float]:
# If it's a symlink, return 0.
if os.path.islink(path):
return 0
return os.path.getsize(path)
def format_file_size(path: str) -> str:
return format_size(file_size(path))
def directory_size(path: str) -> Union[int, float]:
size = 0.0
for root, _dirs, files in os.walk(path):
for filename in files:
file_path = os.path.join(root, filename)
size += file_size(file_path)
return size
def format_directory_size(path: str) -> str:
return format_size(directory_size(path))
| mit | bc3808cff923c0249196cf476f08815a | 32.477124 | 85 | 0.634713 | 3.927914 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.